I am trying to implement Deferred Shading according to the tutorials 35-37 on http://ogldev.atspace.co.uk/www/tutorial35/tutorial35.html with Open Scene Graph 3.4.0
Right now, I'm struggling to get the stencil pass correctly, so I simplified my code as far as possible.
As a result, right now I am using three cameras:
one RTT camera for postion-, normal- and color texture as well as writing the depth buffer.
one stencil camera (set up as a RTT camera with no color output in the fragment shader) for writing into the stencil buffer using the depth test results
one point light camera (set up as a HUD camera), using deferred shading and set to write only where the stencil buffer is not 0.
When I try to display the depth buffer with the point light camera it works.
When I set the clear mask of pointLightCamera to STENCIL_BUFFER_BIT and set the stencil clear to 1, it displays everything. It displays nothing when setting it to 0.
When I disable the clear settings in the pointLightCamera (as they should be) and enable any clear settings for the stencil buffer in the stencil camera, it has no effect whatsoever.
The stencilFunc and stencilOperation are set as they are in the tutorials.
When I start the program it won't display anything (so all the stencil values are 0).
This leads me to the conclusion, that my stencilCamera doesnt actually write into the stencil buffer, yet I have no idea why, I couldn't find anymore help with the osg examples or internet forums and I have tried everything I could think of.
Here are my set ups for the cameras and viewer (the rttCamera has my scene groupnode as a child, which contains a model, the stencilCamera has a sphere model as a child for the light volume and my pointLightCamera has a screenQuad as a child).
texDepth->setTextureSize(1024, 1024);
texDepth->setInternalFormat(GL_DEPTH24_STENCIL8_EXT);
texDepth->setSourceFormat(GL_DEPTH_STENCIL_EXT);
texDepth->setSourceType(GL_UNSIGNED_INT_24_8_EXT);
osg::ref_ptr<osg::Texture2D> texColor = createTexture();
osg::ref_ptr<osg::Texture2D> texPosition = createTexture();
osg::ref_ptr<osg::Texture2D> texNormal = createTexture();
//1. pass camera and set up
osg::ref_ptr<osg::Camera> rttCamera = createRTTCamera(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth, false);
rttCamera->setRenderOrder(osg::Camera::PRE_RENDER, 0);
rttCamera->attach(osg::Camera::COLOR_BUFFER0, texColor);
rttCamera->attach(osg::Camera::COLOR_BUFFER1, texPosition);
rttCamera->attach(osg::Camera::COLOR_BUFFER2, texNormal);
rttCamera->setClearColor(osg::Vec4(0.0, 0.0, 0.0, 0.0));
osg::ref_ptr<osg::Stencil> rttStencil = new osg::Stencil;
rttStencil->setWriteMask(0);
rttCamera->getOrCreateStateSet()->setAttribute(rttStencil, osg::StateAttribute::ON);
rttCamera->setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
osg::ref_ptr<osg::StateSet> ss = rttCamera->getOrCreateStateSet();
osg::ref_ptr<osg::Program> rttProg = new osg::Program;
osg::Shader* vertShader = osgDB::readShaderFile("pass1.vert");
osg::Shader* fragShader = osgDB::readShaderFile("pass1.frag");
rttProg->addShader(vertShader);
rttProg->addShader(fragShader);
ss->setAttributeAndModes(rttProg.get(), osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
osg::ref_ptr<osg::BlendFunc> bf = new osg::BlendFunc;
bf->setFunction(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
ss->setAttributeAndModes(bf, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
rttCamera->addChild(scene.get());
//2. pass: stencil pass camera and set up
osg::ref_ptr<osg::Camera> stencilCamera = createRTTCamera(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth, false);//createHUDCamera(0.0, 1.0, 0.0, 1.0);//
stencilCamera->setRenderOrder(osg::Camera::PRE_RENDER, 2);
stencilCamera->attach(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth);//depth buffer was filled by rttCamera
stencilCamera->getOrCreateStateSet()->setMode(GL_STENCIL_TEST, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
stencilCamera->getOrCreateStateSet()->setMode(GL_DEPTH_TEST, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE); //depth test result will fill stencil buffer
osg::ref_ptr<osg::Depth> depth = new osg::Depth;
depth->setWriteMask(false); //depth test is needed to compare scene to light volume, but light volume must not write into depth buffer
stencilCamera->getOrCreateStateSet()->setAttribute(depth, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
stencilCamera->getOrCreateStateSet()->setMode(GL_CULL_FACE, osg::StateAttribute::OFF | osg::StateAttribute::OVERRIDE);
stencilCamera->setClearMask(GL_STENCIL_BUFFER_BIT);
osg::ref_ptr<osg::StencilTwoSided> stencilWrite = new osg::StencilTwoSided;
stencilWrite->setFunction(osg::StencilTwoSided::FRONT, osg::StencilTwoSided::ALWAYS, 0, 0);
stencilWrite->setFunction(osg::StencilTwoSided::BACK, osg::StencilTwoSided::ALWAYS, 0, 0);
stencilWrite->setOperation(osg::StencilTwoSided::FRONT, osg::StencilTwoSided::KEEP, osg::StencilTwoSided::DECR_WRAP, osg::StencilTwoSided::KEEP);
stencilWrite->setOperation(osg::StencilTwoSided::BACK, osg::StencilTwoSided::KEEP, osg::StencilTwoSided::INCR_WRAP, osg::StencilTwoSided::KEEP);
stencilWrite->setWriteMask(osg::StencilTwoSided::FRONT, 0xFF); //may not be needed
stencilWrite->setWriteMask(osg::StencilTwoSided::BACK, 0xFF);
stencilCamera->getOrCreateStateSet()->setAttribute(stencilWrite, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
stencilCamera->addChild(mtSphere);//lights.get());
osg::ref_ptr<osg::Program> stencilProg = new osg::Program;
stencilProg->addShader(osgDB::readShaderFile("nullTechnique.vert"));
stencilProg->addShader(osgDB::readShaderFile("nullTechnique.frag"));
stencilCamera->getOrCreateStateSet()->setAttributeAndModes(stencilProg.get(), osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
//3. pass: point light pass camera and set up
osg::ref_ptr<osg::Camera> pointLightCamera = createHUDCamera(0.0, 1.0, 0.0, 1.0);
pointLightCamera->setClearMask(0);
pointLightCamera->setRenderOrder(osg::Camera::POST_RENDER, 1);//PRE_RENDER, 2);
pointLightCamera->addChild(screenQuad);
ss = pointLightCamera->getOrCreateStateSet();
osg::ref_ptr<osg::Stencil> stencilRead = new osg::Stencil;
stencilRead->setFunction(osg::Stencil::NOTEQUAL, 1, 0xFF); //render only where stencil buffer is != 0 (this will be 1 as set in stencil pass)
stencilRead->setWriteMask(0);//it should not write into the stencil buffer it reads from
ss->setAttribute(stencilRead, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
osg::ref_ptr<osg::Depth> depthRead = new osg::Depth;
depth->setWriteMask(false);
ss->setAttribute(depth, osg::StateAttribute::ON);
pointLightCamera->attach(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth);
ss->setMode(GL_STENCIL_TEST, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
ss->setMode(GL_DEPTH_TEST, osg::StateAttribute::OFF);
ss->setMode(GL_BLEND, osg::StateAttribute::ON); //all light passes shall add their renderings to the previous light passes
osg::ref_ptr<osg::BlendFunc> renderAddBlendFunc = new osg::BlendFunc;
renderAddBlendFunc->setFunction(GL_ONE, GL_ONE);
ss->setAttribute(renderAddBlendFunc, osg::StateAttribute::ON);
osg::ref_ptr<osg::BlendEquation> renderAddBlendEq = new osg::BlendEquation;
renderAddBlendEq->setEquation(osg::BlendEquation::FUNC_ADD);
ss->setAttribute(renderAddBlendEq, osg::StateAttribute::ON);
osg::ref_ptr<osg::CullFace> cullFacePointLightPass = new osg::CullFace(osg::CullFace::FRONT);
osg::ref_ptr<osg::Program> pointLightProg = new osg::Program;
vertShader = osgDB::readShaderFile("pass2.vert");
fragShader = osgDB::readShaderFile("pass2.frag");
pointLightProg->addShader(vertShader);
pointLightProg->addShader(fragShader);
ss->setAttributeAndModes(pointLightProg.get(), osg::StateAttribute::ON);
ss->setTextureAttributeAndModes(0, texColor);
ss->setTextureAttributeAndModes(1, texPosition);
ss->setTextureAttributeAndModes(2, texNormal);
ss->setTextureAttributeAndModes(3, texDepth);
ss->addUniform(new osg::Uniform("tDiffuse", 0));
ss->addUniform(new osg::Uniform("tPosition", 1));
ss->addUniform(new osg::Uniform("tNormals", 2));
ss->addUniform(new osg::Uniform("tDepth", 3));
ss->addUniform(new osg::Uniform("lightPosition", osg::Vec3(0.0, 0.0, 0.0)));
osg::Vec3 eye, center, up;
rttCamera->getViewMatrixAsLookAt(eye, center, up);
ss->addUniform(new osg::Uniform("cameraPosition", eye));
pointLightCamera->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER);
osg::ref_ptr<osg::Group> root = new osg::Group;
root->addChild(rttCamera);
root->addChild(stencilCamera);
root->addChild(pointLightCamera);
osgViewer::Viewer viewer;
viewer.setCameraManipulator(new osgGA::TrackballManipulator);
viewer.getCamera()->setComputeNearFarMode(osg::CullSettings::DO_NOT_COMPUTE_NEAR_FAR);
viewer.setSceneData(root.get());
osg::DisplaySettings::instance()->setMinimumNumStencilBits(8);
(createRTTCamera and createHUDCamera are taking from the OSG Cookbook).
Okay, I finally found the problem.
I was using FRAME_BUFFER_OBJECT as my render target implementation for the RTT cameras, but FRAME_BUFFER for my HUD camera. I later noticed I got an OpenGL "invalid value" error with this. Now that I use FRAME_BUFFER_OBJECT for all three, I can share the stencil values and it works. I just needed to write the results of my former HUD camera into a new texture, which I then read with a last camera that was a new HUD Camera and that had FRAME_BUFFER as the render target.
Related
I've just implemented deferred rendering and am having trouble getting my skybox working. I try rendering my skybox at the very end of my rendering loop and all I get is a black screen. Here's the rendering loop:
//binds the fbo
gBuffer.Bind();
//the shader that writes info to gbuffer
geometryPass.Bind();
glDepthMask(GL_TRUE);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glDisable(GL_BLEND);
//draw geometry
geometryPass.SetUniform("model", transform.GetModel());
geometryPass.SetUniform("mvp", camera.GetViewProjection() * transform.GetModel());
mesh3.Draw();
geometryPass.SetUniform("model", transform2.GetModel());
geometryPass.SetUniform("mvp", camera.GetViewProjection() * transform2.GetModel());
sphere.Draw();
glDepthMask(GL_FALSE);
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glClear(GL_COLOR_BUFFER_BIT);
//shader that calculates lighting
pointLightPass.Bind();
pointLightPass.SetUniform("cameraPos", camera.GetTransform().GetPosition());
for (int i = 0; i < 2; i++)
{
pointLightPass.SetUniformPointLight("light", pointLights[i]);
pointLightPass.SetUniform("mvp", glm::mat4(1.0f));
//skybox.GetCubeMap()->Bind(9);
quad.Draw();
}
//draw skybox
glEnable(GL_DEPTH_TEST);
skybox.Render(camera);
window.Update();
window.SwapBuffers();
The following is the skybox's render function
glCullFace(GL_FRONT);
glDepthFunc(GL_LEQUAL);
m_transform.SetPosition(camera.GetTransform().GetPosition());
m_shader->Bind();
m_shader->SetUniform("mvp", camera.GetViewProjection() * m_transform.GetModel());
m_shader->SetUniform("cubeMap", 0);
m_cubeMap->Bind(0);
m_cubeMesh->Draw();
glDepthFunc(GL_LESS);
glCullFace(GL_BACK);
And here is the skybox's vertex shader:
layout (location = 0) in vec3 position;
out vec3 TexCoord;
uniform mat4 mvp;
void main()
{
vec4 pos = mvp * vec4(position, 1.0);
gl_Position = pos.xyww;
TexCoord = position;
}
The skybox's fragment shader just sets the output color to texture(cubeMap, TexCoord).
As you can see from the vertex shader, I'm setting the position's z component to be w so that it will always have a depth of 1. I am also setting the depth function to be GL_LEQUAL so that it will fail the depth test. Should this not only draw the skybox in places where other objects weren't already drawn? Why does it result in a black screen?
I know I have set up the skybox correctly because if I just draw the skybox by itself it shows up just fine.
I can briefly see for a split second the geometry that should be drawn before the skybox is drawn on top of everything.
Since you're using double buffering, seeing different things must be due to a different frame being drawn. The depth buffer in the default framebuffer isn't being cleared, which I believe is the cause of the temporal instability at least.
In your case, you want the default depth buffer to be the same as the GBuffer when you draw the skybox. A quick way to achieve this is with glBlitFramebuffer, also avoiding the need to clear it:
glBindFramebuffer(GL_READ_FRAMEBUFFER, gbuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(..., GL_DEPTH_BUFFER_BIT, ...);
Now to explain the black screen when the skybox fills the screen. Without the depth test, of course the skybox just draws. With the depth test, the skybox still draws on the first frame, but shortly after the second frame clears only the colour buffer. The depth buffer still contains stale skybox values so it does not get re-draw for this frame and you're left with black...
However your geometry pass draws without depth testing enabled, so this should still be visible even if the skybox isn't. Also this would only happen with GL_LESS and you have GL_LEQUAL. And you have glDepthMask false, which means nothing should write to the default depth buffer in your code. This points to the depth buffer containing other values, perhaps uninitialized, but in my experience it's initially zero. Also this still happens when the skybox doesn't fill the screen, drawn as a cube away from the camera, which blows away that argument. Now, perhaps if the geometry failed to draw in the second frame that would explain it. For that matter blatant driver bugs would too, but I'm not seeing any problems in the given code.
TLDR: Many unexplained things, so **I tried it myself and can't reproduce your problem...
Here's a quick example based on your code and it works fine for me...
(green sphere is the geometry, red cube is the skybox)
gl_Position = pos:
Note the yellow from additive blending even if the skybox is drawn over the top. I would have thought you'd be seeing this too.
gl_Position = pos.xyww:
Now for the code...
//I haven't enabled back face culling, but that shouldn't affect anything
//binds the fbo
fbo.bind();
//the shader that writes info to gbuffer
//geometryPass.Bind(); //fixed pipeline for now
glDepthMask(GL_TRUE);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glDisable(GL_BLEND);
glColor3f(0,1,0);
fly.uploadCamera(); //glLoadMatrixf
sphere.draw();
glDepthMask(GL_FALSE);
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
fbo.unbind(); //glBindFramebuffer(GL_FRAMEBUFFER, 0);
glClear(GL_COLOR_BUFFER_BIT);
//shader that calculates lighting
drawtex.use();
//pointLightPass.SetUniform("cameraPos", camera.GetTransform().GetPosition());
drawtex.set("tex", *(Texture2D*)fbo.colour[0]);
for (int i = 0; i < 2; i++)
{
//pointLightPass.SetUniformPointLight("light", pointLights[i]);
//pointLightPass.SetUniform("mvp", glm::mat4(1.0f));
//skybox.GetCubeMap()->Bind(9);
drawtex.set("modelviewMat", mat44::identity());
quad.draw();
}
drawtex.unuse();
//draw skybox
glEnable(GL_DEPTH_TEST);
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, fbo.size.x, fbo.size.y, 0, 0, fbo.size.x, fbo.size.y, GL_DEPTH_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
//glCullFace(GL_FRONT);
glDepthFunc(GL_LEQUAL);
//m_transform.SetPosition(camera.GetTransform().GetPosition());
skybox.use();
skybox.set("mvp", fly.camera.getProjection() * fly.camera.getInverse() * mat44::translate(1,0,0));
//m_shader->SetUniform("mvp", camera.GetViewProjection() * m_transform.GetModel());
//m_shader->SetUniform("cubeMap", 0);
//m_cubeMap->Bind(0);
cube.draw();
skybox.unuse();
glDepthFunc(GL_LESS);
//glCullFace(GL_BACK);
//window.Update();
//window.SwapBuffers();
I rendered color and position textures to colorattachment0 and colorattachment1 in a fbo. The color texture is being drawn onto a cube for differed rendering. I need to read from the position texture though, which I sent to colorattachment1. Here is what ive tried:
public float[] getUV()
{
float[] uv = new float[2];
var pixel = new float[4];
GL.ReadBuffer(ReadBufferMode.ColorAttachment1);
GL.ReadPixels((int)curPoint.X, (int)curPoint.Y, 1, 1, OpenTK.Graphics.OpenGL.PixelFormat.Rgba, PixelType.UnsignedByte, pixel);
uv[0] = pixel[0];
uv[1] = pixel[1];
return uv;
}
I call getUV on a mousedown event. Here is how I created my fbo:
GL.Ext.GenFramebuffers(1, out fboHandle);
GL.Ext.BindFramebuffer(FramebufferTarget.FramebufferExt, fboHandle);
GL.Ext.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.ColorAttachment0Ext, TextureTarget.Texture2D, colorTexture, 0);
GL.Ext.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.ColorAttachment1Ext, TextureTarget.Texture2D, uvTexture, 0);
GL.Ext.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.DepthAttachmentExt, TextureTarget.Texture2D, depthBufferTexture, 0);
The values returned from getUV() are totally wrong, I think im missing a step. Ive verified ColorAttachment1 renders correctly, and that the mouse coordinates I am sending to ReadPixels is correct.
I have an DirectX 11 C++ application that displays two rectangles with textures and some text.
Both textures are taken from TGA resources (with alpha channel added).
When I run the program, I get the result:
What's wrong? Take a closer look:
The corners of the rectangles are transparent (and they should be). The rest of textures are set to be 30% opacity (and it works well too).
But, when one texture (let's call it texture1) is over another (texture2):
The corners of texture1 are transparent.
But behind them I see the background of window, instead of texture2.
In other words, transparency of texture interacts with the background of window, not with the textures behind it.
What have I done wrong? What part of my program can be responsible for it? Blending options, render states, shader code...?
In my shader, I set:
technique10 RENDER{
pass P0{
SetVertexShader(CompileShader( vs_4_0, VS()));
SetPixelShader(CompileShader( ps_4_0, PS()));
SetBlendState(SrcAlphaBlendingAdd, float4(0.0f, 0.0f, 0.0f, 0.0f),
0xFFFFFFFF);
}
}
P.s.
Of course, when I change the background of window from blue to another colour, the elements still have the transparency (the corners aren't blue).
edit:
According to #ComicSansMS (+ for nick, anyway ;p ), I've tried to change to order of render elements (I've also moved the smaller texture a bit, to check if the error remains):
The smaller texture is now behind the bigger one. But the problem with corners remains (now it appears on the second texture). I am almost sure that I draw the rectangle behind BEFORE I render the rectangle above (I see the code's lines order).
My depth stencil:
//initialize the description of the stencil state
ZeroMemory(depthStencilsDescs, sizeof(*depthStencilsDescs));
//set up the description of the stencil state
depthStencilsDescs->DepthEnable = true;
depthStencilsDescs->DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilsDescs->DepthFunc = D3D11_COMPARISON_LESS;
depthStencilsDescs->StencilEnable = true;
depthStencilsDescs->StencilReadMask = 0xFF;
depthStencilsDescs->StencilWriteMask = 0xFF;
//stencil operations if pixel is front-facing
depthStencilsDescs->FrontFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilsDescs->FrontFace.StencilDepthFailOp = D3D11_STENCIL_OP_INCR;
depthStencilsDescs->FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilsDescs->FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
//stencil operations if pixel is back-facing
depthStencilsDescs->BackFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilsDescs->BackFace.StencilDepthFailOp = D3D11_STENCIL_OP_DECR;
depthStencilsDescs->BackFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilsDescs->BackFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
//create the depth stencil state
result = device->CreateDepthStencilState(depthStencilsDescs, depthStencilState2D);
The render function:
...
//clear the back buffer
context->ClearRenderTargetView(myRenderTargetView, backgroundColor); //backgroundColor
//clear the depth buffer to 1.0 (max depth)
context->ClearDepthStencilView(depthStencilView, D3D11_CLEAR_DEPTH, 1.0f, 0);
context->OMSetDepthStencilState(depthStencilState2D, 1);
context->VSSetShader(getVertexShader(), NULL, 0);
context->PSSetShader(getPixelShader(), NULL, 0);
for(...){
rectangles[i]->render();
}
The blend state:
D3D11_BLEND_DESC blendDesc;
ZeroMemory(&blendDesc, sizeof(D3D11_BLEND_DESC) );
blendDesc.AlphaToCoverageEnable = false;
blendDesc.IndependentBlendEnable = false;
blendDesc.RenderTarget[0].BlendEnable = true;
blendDesc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendDesc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendDesc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL ;
ID3D11BlendState * blendState;
if (FAILED(device->CreateBlendState(&blendDesc, &blendState))){
}
context->OMSetBlendState(blendState,NULL,0xffffffff);
Your draw order is probably wrong.
Blending does not interact with the pixels of the object behind it, it interacts with the pixels that are currently in the frame buffer.
So if you draw the rectangle in front before the rectangle in the back, its blending operation will interact with what is in the frame buffer at that point (that is, the background).
The solution is obviously to sort your objects by their depth in view space and draw from back-to-front, although that is sometimes easier said than done (especially when allowing arbitrary overlaps).
The other problem seems to be that you draw both rectangles at the same depth value. Your depth test is set to D3D11_COMPARISON_LESS, so as soon as a triangle is drawn on a pixel, the other triangle will fail the depth test for that pixel and won't get drawn at all. This is explains the results you get when swapping the drawing order.
Note that if you draw objects back-to-front there is no need to perform a depth test at all, so you might just want to switch it off in this case. Alternatively, you can try to arrange the objects along the depth axis by giving them different Z values, or just switch to D3D11_COMPARISON_LESS_EQUAL for the depth test.
I'm working on a game using DirectX 9. Here's what I'm trying to do:
After the scene is rendered, on top of it I want to render few sprites: a black cover on entire scene and a few sprites, which are masks showing where the cover should have holes. So far I tried messing with blend mode but with no luck. My code setting it up looks like this:
D3DD->SetRenderState(D3DRS_ALPHABLENDENABLE, TRUE);
D3DD->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
D3DD->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
I guess the best way would be to multiply each sprites alpha, but according to http://msdn.microsoft.com/en-us/library/windows/desktop/bb172508%28v=vs.85%29.aspx no such mode is supported. Is There another way to do this?
edit
Following Nico Schertler's answer, here's the code I came up with:
LPDIRECT3DTEXTURE9 pRenderTexture;
LPDIRECT3DSURFACE9 pRenderSurface,
pBackBuffer;
// create texture
D3DD->CreateTexture(1024,
1024,
1,
D3DUSAGE_RENDERTARGET,
D3DFMT_R5G6B5,
D3DPOOL_DEFAULT,
&pRenderTexture,
NULL);
pRenderTexture->GetSurfaceLevel(0,&pRenderSurface);
// store old render target - back buffer
D3DD->GetRenderTarget(0,&pBackBuffer);
// set new render target - texture
D3DD->SetRenderTarget(0,pRenderSurface);
//clear texture to opaque black
D3DD->Clear(0,
NULL,
D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER,
D3DCOLOR_XRGB(0,0,0),
32.0f,
0);
// set blending
D3DD->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_ZERO);
D3DD->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_ONE);
D3DD->SetRenderState(D3DRS_SRCBLENDALPHA, D3DBLEND_ZERO);
D3DD->SetRenderState(D3DRS_DESTBLENDALPHA, D3DBLEND_SRCALPHA);
D3DD->SetRenderState(D3DRS_SEPARATEALPHABLENDENABLE, TRUE);
//// now I render hole sprites the usual way
// restore back buffe as render target
D3DD->SetRenderTarget(0,pBackBuffer);
// restore blending
D3DD->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
D3DD->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
D3DD->SetRenderState(D3DRS_SRCBLENDALPHA, D3DBLEND_SRCALPHA);
D3DD->SetRenderState(D3DRS_DESTBLENDALPHA, D3DBLEND_INVSRCALPHA);
D3DD->SetRenderState(D3DRS_SEPARATEALPHABLENDENABLE, FALSE);
ulong color = ulong(-1);
Vertex2D v[4];
v[0] = Vertex2D(0, 0, 0);
v[1] = Vertex2D(1023, 0, 0);
v[3] = Vertex2D(1023, 1023, 0);
v[2] = Vertex2D(0, 1023, 0);
D3DD->SetTexture(0, pRenderTexture);
D3DD->SetFVF(D3DFVF_XYZRHW | D3DFVF_DIFFUSE | D3DFVF_TEX1);
D3DD->DrawPrimitiveUP(D3DPT_TRIANGLESTRIP, 2, v, sizeof(Vertex2D));
D3DD->SetTexture(0, NULL);
// release used resources
pRenderTexture->Release();
pRenderSurface->Release();
pBackBuffer->Release();
Unfortunatelly, the app crashes when restoring the old render target. Any advice?
Firstly, you should create the mask in a separate texture first. Then you can add the holes as needed. Finally, draw the mask on the screen:
Initialize the texture
Clear it to opaque black
Using the following blend states:
D3DRS_SRCBLEND -> D3DBLEND_ZERO (hole's color does not matter)
D3DRS_DESTBLEND -> D3DBLEND_ONE (preserve the black color)
D3DRS_SRCBLENDALPHA -> D3DBLEND_ZERO
D3DRS_DESTBLENDALPHA -> D3DBLEND_SRCALPHA
D3DRS_SEPARATEALPHABLENDENABLE -> TRUE
Draw each hole sprite
Restore default blending (src_alpha / inv_src_alpha)
Render the texture as a sprite to the back buffer
The above blend state assumes that the holes are opaque where there should be a hole. Then, the color is calculated by:
blended color = 0 * hole sprite color + 1 * background color
which should always be black.
And the alpha channel is calculated by:
blended alpha = 0 * hole sprite alpha + (1 - hole sprite alpha) * background alpha
So where the hole sprite is opaque, the blended alpha becomes 0. Where it is transparent, the blended alpha is the previous value. Values in between are blended.
I am working on a project in which I am required to capture frames from external device video and render them on openSceneGraph Node. I am also using GLSL shaders. But I don't know how to update textures on runtime. For other uniforms we need to make callbacks but do we also need to make callbacks for samplers in glsl and openSceneGraph ?
My code looks like this. All I am getting right now is a black window.
osg::ref_ptr<osg::Geometry> pictureQuad = osg::createTexturedQuadGeometry(osg::Vec3(0.0f,0.0f,0.0f),
osg::Vec3(_deviceNameToImageFrameMap[deviceName].frame->s(),0.0f,0.0f), osg::Vec3(0.0f,0.0f,_deviceNameToImageFrameMap[deviceName].frame->t()),
0.0f, 1.0f,_deviceNameToImageFrameMap[deviceName].frame->s(), _deviceNameToImageFrameMap[deviceName].frame->t());
//creating texture and setting up parameters for video frame
osg::ref_ptr<osg::TextureRectangle> myTex= new osg::TextureRectangle(_deviceNameToImageFrameMap[deviceName].frame.get());
myTex->setFilter(osg::Texture::MIN_FILTER,osg::Texture::LINEAR);
myTex->setFilter(osg::Texture::MAG_FILTER,osg::Texture::LINEAR);
myTex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
myTex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
_videoSourceNameToNodeMap[sourceName].geode = new osg::Geode();
_videoSourceNameToNodeMap[sourceName].geode->setDataVariance(osg::Object::DYNAMIC);
_videoSourceNameToNodeMap[sourceName].geode->addDrawable(pictureQuad.get());
//apply texture to node
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setTextureAttributeAndModes(0, myTex.get(), osg::StateAttribute::ON);
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setMode(GL_DEPTH_TEST, osg::StateAttribute::OFF);
_videoSourceNameToNodeMap[sourceName].geode->setDataVariance(osg::Object::DYNAMIC);
//Set uniform sampler
osg::Uniform* srcFrame = new osg::Uniform( osg::Uniform::SAMPLER_2D, "srcFrame" );
srcFrame->set(0);
//Set Uniform Alpha
osg::Uniform* alpha = new osg::Uniform( osg::Uniform::FLOAT, "alpha" );
alpha->set(.5f);
alpha->setUpdateCallback(new ExampleCallback());
//Enable blending
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON);
//Adding blend function to node
osg::BlendFunc *bf = new osg::BlendFunc();
bf->setFunction(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setAttributeAndModes(bf);
//apply shader to quad
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setAttributeAndModes(program, osg::StateAttribute::ON);
//add Uniform to shader
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->addUniform( srcFrame );
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->addUniform( alpha );
Just call image->dirty() and all textures using that image gets updated automatically
You don't need to do this bf->setFunction(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); as this is the default function.