I am working on a project in which I am required to capture frames from external device video and render them on openSceneGraph Node. I am also using GLSL shaders. But I don't know how to update textures on runtime. For other uniforms we need to make callbacks but do we also need to make callbacks for samplers in glsl and openSceneGraph ?
My code looks like this. All I am getting right now is a black window.
osg::ref_ptr<osg::Geometry> pictureQuad = osg::createTexturedQuadGeometry(osg::Vec3(0.0f,0.0f,0.0f),
osg::Vec3(_deviceNameToImageFrameMap[deviceName].frame->s(),0.0f,0.0f), osg::Vec3(0.0f,0.0f,_deviceNameToImageFrameMap[deviceName].frame->t()),
0.0f, 1.0f,_deviceNameToImageFrameMap[deviceName].frame->s(), _deviceNameToImageFrameMap[deviceName].frame->t());
//creating texture and setting up parameters for video frame
osg::ref_ptr<osg::TextureRectangle> myTex= new osg::TextureRectangle(_deviceNameToImageFrameMap[deviceName].frame.get());
myTex->setFilter(osg::Texture::MIN_FILTER,osg::Texture::LINEAR);
myTex->setFilter(osg::Texture::MAG_FILTER,osg::Texture::LINEAR);
myTex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
myTex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
_videoSourceNameToNodeMap[sourceName].geode = new osg::Geode();
_videoSourceNameToNodeMap[sourceName].geode->setDataVariance(osg::Object::DYNAMIC);
_videoSourceNameToNodeMap[sourceName].geode->addDrawable(pictureQuad.get());
//apply texture to node
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setTextureAttributeAndModes(0, myTex.get(), osg::StateAttribute::ON);
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setMode(GL_DEPTH_TEST, osg::StateAttribute::OFF);
_videoSourceNameToNodeMap[sourceName].geode->setDataVariance(osg::Object::DYNAMIC);
//Set uniform sampler
osg::Uniform* srcFrame = new osg::Uniform( osg::Uniform::SAMPLER_2D, "srcFrame" );
srcFrame->set(0);
//Set Uniform Alpha
osg::Uniform* alpha = new osg::Uniform( osg::Uniform::FLOAT, "alpha" );
alpha->set(.5f);
alpha->setUpdateCallback(new ExampleCallback());
//Enable blending
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON);
//Adding blend function to node
osg::BlendFunc *bf = new osg::BlendFunc();
bf->setFunction(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setAttributeAndModes(bf);
//apply shader to quad
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->setAttributeAndModes(program, osg::StateAttribute::ON);
//add Uniform to shader
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->addUniform( srcFrame );
_videoSourceNameToNodeMap[sourceName].geode->getOrCreateStateSet()->addUniform( alpha );
Just call image->dirty() and all textures using that image gets updated automatically
You don't need to do this bf->setFunction(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); as this is the default function.
Related
I'm attempting to port a pathtracer to GLSL, and to do this I need to modify a shader sample program to use a texture as the framebuffer instead of the backbuffer.
This is the vertex fragment
#version 130
out vec2 texCoord;
// https://rauwendaal.net/2014/06/14/rendering-a-screen-covering-triangle-in-opengl/
void main()
{
float x = -1.0 + float((gl_VertexID & 1) << 2);
float y = -1.0 + float((gl_VertexID & 2) << 1);
texCoord.x = x;
texCoord.y = y;
gl_Position = vec4(x, y, 0, 1);
}
This is the setup code
gl.GenFramebuffersEXT(2, _FrameBuffer);
gl.BindFramebufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, _FrameBuffer[0]);
gl.GenRenderbuffersEXT(2, _RaytracerBuffer);
gl.BindRenderbufferEXT(OpenGL.GL_RENDERBUFFER_EXT, _RaytracerBuffer[0]);
gl.RenderbufferStorageEXT(OpenGL.GL_RENDERBUFFER_EXT, OpenGL.GL_RGBA32F, (int)viewport[2], (int)viewport[3]);
And this is the runtime code
// Get a reference to the raytracer shader.
var shader = shaderRayMarch;
// setup first framebuffer (RGB32F)
gl.BindFramebufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, _FrameBuffer[0]);
gl.Viewport((int)viewport[0], (int)viewport[1], (int)viewport[2], (int)viewport[3]); //0,0,width,height)
gl.FramebufferRenderbufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, OpenGL.GL_COLOR_ATTACHMENT0_EXT, OpenGL.GL_RENDERBUFFER_EXT, _RaytracerBuffer[0]);
gl.FramebufferRenderbufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, OpenGL.GL_DEPTH_ATTACHMENT_EXT, OpenGL.GL_RENDERBUFFER_EXT, 0);
uint [] DrawBuffers = new uint[1];
DrawBuffers[0] = OpenGL.GL_COLOR_ATTACHMENT0_EXT;
gl.DrawBuffers(1, DrawBuffers);
shader.Bind(gl);
shader.SetUniform1(gl, "screenWidth", viewport[2]);
shader.SetUniform1(gl, "screenHeight", viewport[3]);
shader.SetUniform1(gl, "fov", 40.0f);
gl.DrawArrays(OpenGL.GL_TRIANGLES, 0, 3);
shader.Unbind(gl);
int[] pixels = new int[(int)viewport[2]*(int)viewport[3]*4];
gl.GetTexImage(_RaytracerBuffer[0], 0, OpenGL.GL_RGBA32F, OpenGL.GL_INT, pixels);
But when I inspect the pixels coming back from GetTexImage they're black. When I bind this texture in a further transfer shader they remain black. I suspect I'm missing something in the setup code for the renderbuffer and would appreciate any suggestions you have!
Renderbuffers are not textures. So when you do glGetTexImage on your renderbuffer, you probably got an OpenGL error. When you tried to bind it as a texture with glBindTexture, you probably got an OpenGL error.
If you want to render to a texture, you should render to a texture. As in glGenTextures/glTexImage2D/glFramebufferTexture2D.
Also, please stop using EXT_framebuffer_object. You should be using the core FBO feature, which requires no "EXT" suffixes. Not unless you're using a really ancient OpenGL version.
I am trying to implement Deferred Shading according to the tutorials 35-37 on http://ogldev.atspace.co.uk/www/tutorial35/tutorial35.html with Open Scene Graph 3.4.0
Right now, I'm struggling to get the stencil pass correctly, so I simplified my code as far as possible.
As a result, right now I am using three cameras:
one RTT camera for postion-, normal- and color texture as well as writing the depth buffer.
one stencil camera (set up as a RTT camera with no color output in the fragment shader) for writing into the stencil buffer using the depth test results
one point light camera (set up as a HUD camera), using deferred shading and set to write only where the stencil buffer is not 0.
When I try to display the depth buffer with the point light camera it works.
When I set the clear mask of pointLightCamera to STENCIL_BUFFER_BIT and set the stencil clear to 1, it displays everything. It displays nothing when setting it to 0.
When I disable the clear settings in the pointLightCamera (as they should be) and enable any clear settings for the stencil buffer in the stencil camera, it has no effect whatsoever.
The stencilFunc and stencilOperation are set as they are in the tutorials.
When I start the program it won't display anything (so all the stencil values are 0).
This leads me to the conclusion, that my stencilCamera doesnt actually write into the stencil buffer, yet I have no idea why, I couldn't find anymore help with the osg examples or internet forums and I have tried everything I could think of.
Here are my set ups for the cameras and viewer (the rttCamera has my scene groupnode as a child, which contains a model, the stencilCamera has a sphere model as a child for the light volume and my pointLightCamera has a screenQuad as a child).
texDepth->setTextureSize(1024, 1024);
texDepth->setInternalFormat(GL_DEPTH24_STENCIL8_EXT);
texDepth->setSourceFormat(GL_DEPTH_STENCIL_EXT);
texDepth->setSourceType(GL_UNSIGNED_INT_24_8_EXT);
osg::ref_ptr<osg::Texture2D> texColor = createTexture();
osg::ref_ptr<osg::Texture2D> texPosition = createTexture();
osg::ref_ptr<osg::Texture2D> texNormal = createTexture();
//1. pass camera and set up
osg::ref_ptr<osg::Camera> rttCamera = createRTTCamera(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth, false);
rttCamera->setRenderOrder(osg::Camera::PRE_RENDER, 0);
rttCamera->attach(osg::Camera::COLOR_BUFFER0, texColor);
rttCamera->attach(osg::Camera::COLOR_BUFFER1, texPosition);
rttCamera->attach(osg::Camera::COLOR_BUFFER2, texNormal);
rttCamera->setClearColor(osg::Vec4(0.0, 0.0, 0.0, 0.0));
osg::ref_ptr<osg::Stencil> rttStencil = new osg::Stencil;
rttStencil->setWriteMask(0);
rttCamera->getOrCreateStateSet()->setAttribute(rttStencil, osg::StateAttribute::ON);
rttCamera->setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
osg::ref_ptr<osg::StateSet> ss = rttCamera->getOrCreateStateSet();
osg::ref_ptr<osg::Program> rttProg = new osg::Program;
osg::Shader* vertShader = osgDB::readShaderFile("pass1.vert");
osg::Shader* fragShader = osgDB::readShaderFile("pass1.frag");
rttProg->addShader(vertShader);
rttProg->addShader(fragShader);
ss->setAttributeAndModes(rttProg.get(), osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
osg::ref_ptr<osg::BlendFunc> bf = new osg::BlendFunc;
bf->setFunction(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
ss->setAttributeAndModes(bf, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
rttCamera->addChild(scene.get());
//2. pass: stencil pass camera and set up
osg::ref_ptr<osg::Camera> stencilCamera = createRTTCamera(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth, false);//createHUDCamera(0.0, 1.0, 0.0, 1.0);//
stencilCamera->setRenderOrder(osg::Camera::PRE_RENDER, 2);
stencilCamera->attach(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth);//depth buffer was filled by rttCamera
stencilCamera->getOrCreateStateSet()->setMode(GL_STENCIL_TEST, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
stencilCamera->getOrCreateStateSet()->setMode(GL_DEPTH_TEST, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE); //depth test result will fill stencil buffer
osg::ref_ptr<osg::Depth> depth = new osg::Depth;
depth->setWriteMask(false); //depth test is needed to compare scene to light volume, but light volume must not write into depth buffer
stencilCamera->getOrCreateStateSet()->setAttribute(depth, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
stencilCamera->getOrCreateStateSet()->setMode(GL_CULL_FACE, osg::StateAttribute::OFF | osg::StateAttribute::OVERRIDE);
stencilCamera->setClearMask(GL_STENCIL_BUFFER_BIT);
osg::ref_ptr<osg::StencilTwoSided> stencilWrite = new osg::StencilTwoSided;
stencilWrite->setFunction(osg::StencilTwoSided::FRONT, osg::StencilTwoSided::ALWAYS, 0, 0);
stencilWrite->setFunction(osg::StencilTwoSided::BACK, osg::StencilTwoSided::ALWAYS, 0, 0);
stencilWrite->setOperation(osg::StencilTwoSided::FRONT, osg::StencilTwoSided::KEEP, osg::StencilTwoSided::DECR_WRAP, osg::StencilTwoSided::KEEP);
stencilWrite->setOperation(osg::StencilTwoSided::BACK, osg::StencilTwoSided::KEEP, osg::StencilTwoSided::INCR_WRAP, osg::StencilTwoSided::KEEP);
stencilWrite->setWriteMask(osg::StencilTwoSided::FRONT, 0xFF); //may not be needed
stencilWrite->setWriteMask(osg::StencilTwoSided::BACK, 0xFF);
stencilCamera->getOrCreateStateSet()->setAttribute(stencilWrite, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
stencilCamera->addChild(mtSphere);//lights.get());
osg::ref_ptr<osg::Program> stencilProg = new osg::Program;
stencilProg->addShader(osgDB::readShaderFile("nullTechnique.vert"));
stencilProg->addShader(osgDB::readShaderFile("nullTechnique.frag"));
stencilCamera->getOrCreateStateSet()->setAttributeAndModes(stencilProg.get(), osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
//3. pass: point light pass camera and set up
osg::ref_ptr<osg::Camera> pointLightCamera = createHUDCamera(0.0, 1.0, 0.0, 1.0);
pointLightCamera->setClearMask(0);
pointLightCamera->setRenderOrder(osg::Camera::POST_RENDER, 1);//PRE_RENDER, 2);
pointLightCamera->addChild(screenQuad);
ss = pointLightCamera->getOrCreateStateSet();
osg::ref_ptr<osg::Stencil> stencilRead = new osg::Stencil;
stencilRead->setFunction(osg::Stencil::NOTEQUAL, 1, 0xFF); //render only where stencil buffer is != 0 (this will be 1 as set in stencil pass)
stencilRead->setWriteMask(0);//it should not write into the stencil buffer it reads from
ss->setAttribute(stencilRead, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
osg::ref_ptr<osg::Depth> depthRead = new osg::Depth;
depth->setWriteMask(false);
ss->setAttribute(depth, osg::StateAttribute::ON);
pointLightCamera->attach(osg::Camera::PACKED_DEPTH_STENCIL_BUFFER, texDepth);
ss->setMode(GL_STENCIL_TEST, osg::StateAttribute::ON | osg::StateAttribute::OVERRIDE);
ss->setMode(GL_DEPTH_TEST, osg::StateAttribute::OFF);
ss->setMode(GL_BLEND, osg::StateAttribute::ON); //all light passes shall add their renderings to the previous light passes
osg::ref_ptr<osg::BlendFunc> renderAddBlendFunc = new osg::BlendFunc;
renderAddBlendFunc->setFunction(GL_ONE, GL_ONE);
ss->setAttribute(renderAddBlendFunc, osg::StateAttribute::ON);
osg::ref_ptr<osg::BlendEquation> renderAddBlendEq = new osg::BlendEquation;
renderAddBlendEq->setEquation(osg::BlendEquation::FUNC_ADD);
ss->setAttribute(renderAddBlendEq, osg::StateAttribute::ON);
osg::ref_ptr<osg::CullFace> cullFacePointLightPass = new osg::CullFace(osg::CullFace::FRONT);
osg::ref_ptr<osg::Program> pointLightProg = new osg::Program;
vertShader = osgDB::readShaderFile("pass2.vert");
fragShader = osgDB::readShaderFile("pass2.frag");
pointLightProg->addShader(vertShader);
pointLightProg->addShader(fragShader);
ss->setAttributeAndModes(pointLightProg.get(), osg::StateAttribute::ON);
ss->setTextureAttributeAndModes(0, texColor);
ss->setTextureAttributeAndModes(1, texPosition);
ss->setTextureAttributeAndModes(2, texNormal);
ss->setTextureAttributeAndModes(3, texDepth);
ss->addUniform(new osg::Uniform("tDiffuse", 0));
ss->addUniform(new osg::Uniform("tPosition", 1));
ss->addUniform(new osg::Uniform("tNormals", 2));
ss->addUniform(new osg::Uniform("tDepth", 3));
ss->addUniform(new osg::Uniform("lightPosition", osg::Vec3(0.0, 0.0, 0.0)));
osg::Vec3 eye, center, up;
rttCamera->getViewMatrixAsLookAt(eye, center, up);
ss->addUniform(new osg::Uniform("cameraPosition", eye));
pointLightCamera->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER);
osg::ref_ptr<osg::Group> root = new osg::Group;
root->addChild(rttCamera);
root->addChild(stencilCamera);
root->addChild(pointLightCamera);
osgViewer::Viewer viewer;
viewer.setCameraManipulator(new osgGA::TrackballManipulator);
viewer.getCamera()->setComputeNearFarMode(osg::CullSettings::DO_NOT_COMPUTE_NEAR_FAR);
viewer.setSceneData(root.get());
osg::DisplaySettings::instance()->setMinimumNumStencilBits(8);
(createRTTCamera and createHUDCamera are taking from the OSG Cookbook).
Okay, I finally found the problem.
I was using FRAME_BUFFER_OBJECT as my render target implementation for the RTT cameras, but FRAME_BUFFER for my HUD camera. I later noticed I got an OpenGL "invalid value" error with this. Now that I use FRAME_BUFFER_OBJECT for all three, I can share the stencil values and it works. I just needed to write the results of my former HUD camera into a new texture, which I then read with a last camera that was a new HUD Camera and that had FRAME_BUFFER as the render target.
Okay, first of all, I'm really new to DirectX11 and this is actually my first project using it. I'm also relatively new to Computer Graphics in general so I might have some concepts wrong although, for this particular case, I do not think so. My code is based on the RasterTek tutorials.
In trying to implement a shader shader, I need to render the scene to a 2D texture and then perform a gaussian blur on the resulting image.
That part seems to be working fine as when using the Visual Studio graphics debugger the output seems to be what I expect.
However, after having having done all post processing, I render a quad to the backbuffer using a simple shader that uses the final output of the blur as a resource. This always gives me a black screen. When I debug my pixel shader with the VS graphics debugger, it seem like the Sample(texture, uv) method always returns (0,0,0,1) when trying to sample that texture.
The pixel shader works fine if I use a different texture, like some normal map or whatever, as a resource, just not when using any of the rendertargets from the previous passes.
The behaviour is particularly weird because the actual blur shader works fine when using any of the rendertargets as a resource.
I know I cannot use a rendertarget as both input and output but I think I have that covered since I call OMSetRenderTargets so I can render to the backbuffer.
Here's the step by step of my implementation:
Set Render Targets
Clear them
Clear Depth buffer
Render scene to texture
Turn off Z buffer
Render to quad
Perform horizontal blur
Perform vertical blur
Set back buffer as render target
Clear back buffer
Render final output to quad
Turn z buffer on
Present back buffer
Here is the shader for the quad:
Texture2D shaderTexture : register(t0);
SamplerState SampleType : register(s0);
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
};
float4 main(PixelInputType input) : SV_TARGET
{
return shaderTexture.Sample(SampleType, input.tex);
}
Here's the relevant c++ code
This is how I set the render targets
void DeferredBuffers::SetRenderTargets(ID3D11DeviceContext* deviceContext, bool activeRTs[BUFFER_COUNT]){
vector<ID3D11RenderTargetView*> rts = vector<ID3D11RenderTargetView*>();
for (int i = 0; i < BUFFER_COUNT; ++i){
if (activeRTs[i]){
rts.push_back(m_renderTargetViewArray[i]);
}
}
deviceContext->OMSetRenderTargets(rts.size(), &rts[0], m_depthStencilView);
// Set the viewport.
deviceContext->RSSetViewports(1, &m_viewport);
}
I use a ping pong approach with the Render Targets for the blur.
I render the scene to a MainTarget and depth information to the depthMap. The first pass performs an horizontal blur onto a third target (horizontalBlurred) and then I use that one as input for the vertical blur which renders back to the mainTarget and to the finalTarget. It's a loop because on the vertical pass I'm supposed to blend the PS output with what's on the finalTarget. I left that code (and some other stuff) out as it's not relevant.
The m_Fullscreen is the quad.
bool activeRenderTargets[4] = { true, true, false, false };
// Set the render buffers to be the render target.
m_ShaderManager->getDeferredBuffers()->SetRenderTargets(m_D3D->GetDeviceContext(), activeRenderTargets);
// Clear the render buffers.
m_ShaderManager->getDeferredBuffers()->ClearRenderTargets(m_D3D->GetDeviceContext(), 0.25f, 0.0f, 0.0f, 1.0f);
m_ShaderManager->getDeferredBuffers()->ClearDepthStencil(m_D3D->GetDeviceContext());
// Render the scene to the render buffers.
RenderSceneToTexture();
// Get the matrices.
m_D3D->GetWorldMatrix(worldMatrix);
m_Camera->GetBaseViewMatrix(baseViewMatrix);
m_D3D->GetOrthoMatrix(projectionMatrix);
// Turn off the Z buffer to begin all 2D rendering.
m_D3D->TurnZBufferOff();
// Put the full screen ortho window vertex and index buffers on the graphics pipeline to prepare them for drawing.
m_FullScreenWindow->Render(m_D3D->GetDeviceContext());
ID3D11ShaderResourceView* mainTarget = m_ShaderManager->getDeferredBuffers()->GetShaderResourceView(0);
ID3D11ShaderResourceView* horizontalBlurred = m_ShaderManager->getDeferredBuffers()->GetShaderResourceView(2);
ID3D11ShaderResourceView* depthMap = m_ShaderManager->getDeferredBuffers()->GetShaderResourceView(1);
ID3D11ShaderResourceView* finalTarget = m_ShaderManager->getDeferredBuffers()->GetShaderResourceView(3);
activeRenderTargets[1] = false; //depth map is never a render target again
for (int i = 0; i < numBlurs; ++i){
activeRenderTargets[0] = false; //main target is resource in this pass
activeRenderTargets[2] = true; //horizontal blurred target
activeRenderTargets[3] = false; //unbind final target
m_ShaderManager->getDeferredBuffers()->SetRenderTargets(m_D3D->GetDeviceContext(), activeRenderTargets);
m_ShaderManager->RenderScreenSpaceSSS_HorizontalBlur(m_D3D->GetDeviceContext(), m_FullScreenWindow->GetIndexCount(), worldMatrix, baseViewMatrix, projectionMatrix, mainTarget, depthMap);
activeRenderTargets[0] = true; //rendering to main target
activeRenderTargets[2] = false; //horizontal blurred is resource
activeRenderTargets[3] = true; //rendering to final target
m_ShaderManager->getDeferredBuffers()->SetRenderTargets(m_D3D->GetDeviceContext(), activeRenderTargets);
m_ShaderManager->RenderScreenSpaceSSS_VerticalBlur(m_D3D->GetDeviceContext(), m_FullScreenWindow->GetIndexCount(), worldMatrix, baseViewMatrix, projectionMatrix, horizontalBlurred, depthMap);
}
m_D3D->SetBackBufferRenderTarget();
m_D3D->BeginScene(0.0f, 0.0f, 0.5f, 1.0f);
// Reset the viewport back to the original.
m_D3D->ResetViewport();
m_ShaderManager->RenderTextureShader(m_D3D->GetDeviceContext(), m_FullScreenWindow->GetIndexCount(), worldMatrix, baseViewMatrix, projectionMatrix, depthMap);
m_D3D->TurnZBufferOn();
m_D3D->EndScene();
And, finally, here are 3 screenshots from my graphics log.
They show rendering the scene onto the mainTarget, a verticalPass which takes as input the horizontalBlurred resource and finally, rendering onto the backBuffer, which is what's failing. You can see the resource bound to the shader and how the output is just a black screen. I purposedly set the background as red to find out if it was sampling with wrong coordinates, but nope.
So, has anyone ever experienced something like this? What could be the cause of this bug?
Thanks in advance for any help!
EDIT: The Render_SOMETHING_SOMETHING_shader methods handle binding all the resources, setting the shaders, draw calls etc etc. If necessary I can post them here, but I don't think it's that relevant.
I suspect I'm not correctly rendering particle positions to my FBO, or correctly sampling those positions when rendering, though that may not be the actual problem with my code, admittedly.
I have a complete jsfiddle here: http://jsfiddle.net/p5mdv/53/
A brief overview of the code:
Initialization:
Create an array of random particle positions in x,y,z
Create an array of texture sampling locations (e.g. for 2 particles, first particle at 0,0, next at 0.5,0)
Create a Frame Buffer Object and two particle position textures (one for input, one for output)
Create a full-screen quad (-1,-1 to 1,1)
Particle simulation:
Render a full-screen quad using the particle program (bind frame buffer, set viewport to the dimensions of my particle positions texture, bind input texture, and draw a quad from -1,-1 to 1,1). Input and output textures are swapped each frame.
Particle fragment shader samples the particle texture at the current fragment position (gl_FragCoord.xy), makes some modifications, and writes out the modified position
Particle rendering:
Draw using the vertex buffer of texture sampling locations
Vertex shader uses the sampling location to sample the particle position texture, then transforms them using view projection matrix
Draw the particle using a sprite texture (gl.POINTS)
Questions:
Am I correctly setting the viewport for the FBO in the particle simulation step? I.e. am I correctly rendering a full-screen quad?
// 6 2D corners = 12 vertices
var vertexBuffer = new Float32Array(12);
// -1,-1 to 1,1 screen quad
vertexBuffer[0] = -1;
vertexBuffer[1] = -1;
vertexBuffer[2] = -1;
vertexBuffer[3] = 1;
vertexBuffer[4] = 1;
vertexBuffer[5] = 1;
vertexBuffer[6] = -1;
vertexBuffer[7] = -1;
vertexBuffer[8] = 1;
vertexBuffer[9] = 1;
vertexBuffer[10] = 1;
vertexBuffer[11] = -1;
// Create GL buffers with this data
g.particleSystem.vertexObject = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, g.particleSystem.vertexObject);
gl.bufferData(gl.ARRAY_BUFFER, vertexBuffer, gl.STATIC_DRAW);
...
gl.viewport(0, 0,
g.particleSystem.particleFBO.width,
g.particleSystem.particleFBO.height);
...
// Set the quad as vertex buffer
gl.bindBuffer(gl.ARRAY_BUFFER, g.screenQuad.vertexObject);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
// Draw!
gl.drawArrays(gl.TRIANGLES, 0, 6);
Am I correctly setting the texture coordinates to sample the particle positions?
for(var i=0; i<numParticles; i++)
{
// Coordinates of particle within texture (normalized)
var texCoordX = Math.floor(i % texSize.width) / texSize.width;
var texCoordY = Math.floor(i / texSize.width) / texSize.height;
particleIndices[ pclIdx ] = texCoordX;
particleIndices[ pclIdx + 1 ] = texCoordY;
particleIndices[ pclIdx + 2 ] = 1; // not used in shader
}
The relevant shaders:
Particle simulation fragment shader:
precision mediump float;
uniform sampler2D mParticleTex;
void main()
{
// Current pixel is the particle's position on the texture
vec2 particleSampleCoords = gl_FragCoord.xy;
vec4 particlePos = texture2D(mParticleTex, particleSampleCoords);
// Move the particle up
particlePos.y += 0.1;
if(particlePos.y > 2.0)
{
// Reset
particlePos.y = -2.0;
}
// Write particle out to texture
gl_FragColor = particlePos;
}
Particle rendering vertex shader:
attribute vec4 vPosition;
uniform mat4 u_modelViewProjMatrix;
uniform sampler2D mParticleTex;
void main()
{
vec2 particleSampleCoords = vPosition.xy;
vec4 particlePos = texture2D(mParticleTex, particleSampleCoords);
gl_Position = u_modelViewProjMatrix * particlePos;
gl_PointSize = 10.0;
}
Let me know if there's a better way to go about debugging this, if nothing else. I'm using webgl-debug to find gl errors and logging what I can to the console.
Your quad is facing away from view so I tried adding gl.disable(gl.CULL_FACE), still no result.
Then I noticed that while resizing window panel with canvas it actually shows one black, square-shaped particle. So it seems that rendering loop is not good.
If you look at console log, it fails to load particle image and it also says that FBO size is 512x1 which is not good.
Some function declarations do not exist, as getTexSize. (?!)
Code needs tiding and grouping, and always check console if you're already using it.
Hope this helps a bit.
Found the problem.
gl_FragCoord is from [0,0] to [screenwidth, screenheight], I was wrongly thinking it was from [0,0] to [1,1].
I had to pass in shader variables for width and height, then normalize the sample coordinates before sampling from the texture.
Hi guys and girls the problem I have is I have sucessfully loaded 3 BMP textures (or at least I hope I have using char* textureFilenames[textureCount] = {"cement.bmp","hedge.bmp","sky.bmp"};
and I'm applying it currently using
glTexCoord2f(0.0,0.0);
glVertex3f(-150.0, 0.0, -150.0);
glTexCoord2f(1.0,0.0);
glVertex3f(-150.0, 0.0, 150.0);
glTexCoord2f(1.0,1.0);
glVertex3f(150.0, 0.0, 150.0);
glTexCoord2f(0.0,1.0);
glVertex3f(150.0, 0.0, -150.0);
however it currently only picks up the sky.bmp is there anyway i can select one of the others?
OpenGL is a state machine. The current texture is part of the OpenGL state. The last texture you bind with glBindTexture() will be used until you bind another.
glBindTexture(GL_TEXTURE_2D, cement_texture_id);
// ... following geometry will use the cement texture
glBindTexture(GL_TEXTURE_2D, hedge_texture_id);
// ... hedge texture
glBindTexture(GL_TEXTURE_2D, sky_texture_id);
// ... sky texture
The "OpenGL RedBook" has a chapter on texture mapping that covers the basics.
You mistake lies in your lack of understanding of OpenGL. OpenGL is not a scene graph! It's best to think OpenGL to be a set of drawing tools to paint on a canvas called the frame buffer.
So in using OpenGL you must put your mind in a state similar to if you's draw a picture with pencils, eraser, brush and paint. First you prepare your tools: Textures are like "sheets of colour", meshes of vertices are like some delicate "brush".
Like an artist the very fist step is to prepare your tools. You prepare your geometry (i.e. the meshes), if you use Vertex Buffer Objects you load them into fast memory with glBufferData, and your paint and dye, the textures. This is what you do in the "init" phase (I prefer to do this on demand, so that users don't see a "loading" screen).
First you load all your objects (geometry in VBOs, textures etc.); you do this exactly once for each required object, i.e. once an object is prepared (i.e. complete) you don't have to re-upload it.
Then in every drawing iteration for each object you want to draw you bind the needed OpenGL objects to their targets, then perform the drawing calls, which will then be performed using the currently bound objects.
i.e. something like this, please use common sense to fill in the lacking functions in your mind:
struct texture; // some structure holding texture information, details don't matter here
struct geometry; // structure holding object geometry and cross references
texture *textures;
geometry *geometries;
texture * load_texture(char const *texture_name)
{
texture *tex;
if( texture_already_loaded(textures, texture_name) )
tex = get_texture(texture_name);
else
tex = load_texture_data(textures, texture_name);
return tex;
}
geometry * load_geometry(char const *geometry_name)
{
geometry * geom;
if( geometry_already_loaded(geometries, geometry_name) )
geom = get_geometry(geometry_name);
else
geom = load_geometry_data(geometries, geometry_name)
if( geom->texture_name )
geom->texture = load_texture(geom->texture_name);
return geom;
}
void onetime_initialization()
{
for(geometry_list_entry * geom = geometry_name_list; geom ; geom = geom->next)
geom->geometry = geometry_load(geom->name);
}
void drawGL()
{
glViewport(...);
glClearColor(...);
glClear(...);
glMatrixMode(GL_PROJECTION);
// ...
glMatrixMode(GL_MODELVIEW);
// ...
for(geometry_list_entry * geom = geometry_name_list; geom ; geom = geom->next)
{
glMatrixMode(GL_MODELVIEW); // this is not redundant!
glPushMatrix();
apply_geometry_transformation(geom->transformation); // calls the proper glTranslate, glRotate, glLoadMatrix, glMultMatrix, etc.
glBindTexture(GL_TEXTURE_2D, geom->texture->ID);
draw_geometry(geom);
glMatrixMode(GL_MODELVIEW); // this is not redundant!
glPopMatrix();
}
// ...
swapBuffers();
}