Let's say I have a Texture2D object. What I want to be able to do is to mask it using another Texture2D object (I already have a basic masking shader written), and then assign this new masked texture to the original Texture2D. At the next iteration, I then could be able to do this again but for another mask, and again, and again...
Drawing all the masks in a single texture using RenderTexture is not possible since 1) there would be huge performances issues 2) they are generated at runtime.
Here's what I want to do:
Does anyone else ever had this problem or know a solution? Thanks :)
EDIT:
Here's what I'm doing for now:
// Ok, so GroundLayer is a layer that setup GroundZones. GroundZones are ’slices’ of the screen that can be ‘dug’
// into, the act of digging being masking the main ground texture with another texture containing only a line.
// This is where cumulative masking come handy, since we don’t want to have to track each point where the user
// dug. We want to create a sort of mutable texture, but simpler.
void GroundLayer::digLine(Point start, Point end) {
// To dig a line, we need first to get the texture of the zone we will be digging into. Then we get the
// relative position of the start and end point in the zone's node space. Finally we use the custom shader to
// draw a mask over the existing texture.
for (auto it = _children.begin(); it != _children.end(); it++) {
GroundZone *zone = static_cast<GroundZone *>(*it); // We are sure that all the children are GroundZones
Point nodeStart = zone->convertToNodeSpace(start);
Point nodeEnd = zone->convertToNodeSpace(end);
// Now that we have our two points converted to node space, it's easy to draw a mask that contains a line
// going from the start point to the end point and that is then applied over the current texture.
Size groundZoneSize = zone->getContentSize();
RenderTexture *rt = RenderTexture::create(groundZoneSize.width, groundZoneSize.height);
rt->retain();
rt->beginWithClear(0, 0, 0, 0); {
// Draw a line going from start and going to end in the texture, the line will act as a mask over the
// existing texture
DrawNode *line = DrawNode::create();
line->drawSegment(nodeStart, nodeEnd, 20, Color4F::BLACK);
line->visit();
} rt->end();
// We mask current texture with the mask texture using a custom shader. The resulting texture will then be
// affected to the zone's texture, making a new texture that can be used again next function call.
Texture2D *currentTexture = zone->getTexture();
Texture2D *maskTexture = rt->getSprite()->getTexture();
maskTexture->setAliasTexParameters(); // Disable linear interpolation on the mask
// Setup the shader for currentTexture
// Custom shader is already loaded in the init method of GroundLayer
currentTexture->setShaderProgram(ShaderCache::getInstance()->getProgram(CustomShader_AlphaMask_frag_key));
GLProgram *shader = currentTexture->getShaderProgram();
int maskTexUniformLoc = glGetUniformLocation(shader->getProgram(), "u_alphaMaskTexture");
glUniform1i(maskTexUniformLoc, 1);
// Actually apply the shader over currentTexture
shader->use();
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, maskTexture->getName());
glActiveTexture(GL_TEXTURE0);
// [I'm at lost here :( (or sooner...)]
}
}
Here's the shader I wrote:
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 v_texCoord;
uniform sampler2D u_texture;
uniform sampler2D u_alphaMaskTexture;
void main() {
// Mask the texture with the alpha of the texture
float maskAlpha = texture2D(u_alphaMaskTexture, v_texCoord).a;
float texAlpha = texture2D(u_texture, v_texCoord).a;
float blendAlpha = (1.0 - maskAlpha) * texAlpha; // Show only where mask is invisible
vec3 texColor = texture2D(u_texture, v_texCoord).rgb;
gl_FragColor = vec4(texColor, blendAlpha);
return;
}
Related
I'm attempting to port a pathtracer to GLSL, and to do this I need to modify a shader sample program to use a texture as the framebuffer instead of the backbuffer.
This is the vertex fragment
#version 130
out vec2 texCoord;
// https://rauwendaal.net/2014/06/14/rendering-a-screen-covering-triangle-in-opengl/
void main()
{
float x = -1.0 + float((gl_VertexID & 1) << 2);
float y = -1.0 + float((gl_VertexID & 2) << 1);
texCoord.x = x;
texCoord.y = y;
gl_Position = vec4(x, y, 0, 1);
}
This is the setup code
gl.GenFramebuffersEXT(2, _FrameBuffer);
gl.BindFramebufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, _FrameBuffer[0]);
gl.GenRenderbuffersEXT(2, _RaytracerBuffer);
gl.BindRenderbufferEXT(OpenGL.GL_RENDERBUFFER_EXT, _RaytracerBuffer[0]);
gl.RenderbufferStorageEXT(OpenGL.GL_RENDERBUFFER_EXT, OpenGL.GL_RGBA32F, (int)viewport[2], (int)viewport[3]);
And this is the runtime code
// Get a reference to the raytracer shader.
var shader = shaderRayMarch;
// setup first framebuffer (RGB32F)
gl.BindFramebufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, _FrameBuffer[0]);
gl.Viewport((int)viewport[0], (int)viewport[1], (int)viewport[2], (int)viewport[3]); //0,0,width,height)
gl.FramebufferRenderbufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, OpenGL.GL_COLOR_ATTACHMENT0_EXT, OpenGL.GL_RENDERBUFFER_EXT, _RaytracerBuffer[0]);
gl.FramebufferRenderbufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, OpenGL.GL_DEPTH_ATTACHMENT_EXT, OpenGL.GL_RENDERBUFFER_EXT, 0);
uint [] DrawBuffers = new uint[1];
DrawBuffers[0] = OpenGL.GL_COLOR_ATTACHMENT0_EXT;
gl.DrawBuffers(1, DrawBuffers);
shader.Bind(gl);
shader.SetUniform1(gl, "screenWidth", viewport[2]);
shader.SetUniform1(gl, "screenHeight", viewport[3]);
shader.SetUniform1(gl, "fov", 40.0f);
gl.DrawArrays(OpenGL.GL_TRIANGLES, 0, 3);
shader.Unbind(gl);
int[] pixels = new int[(int)viewport[2]*(int)viewport[3]*4];
gl.GetTexImage(_RaytracerBuffer[0], 0, OpenGL.GL_RGBA32F, OpenGL.GL_INT, pixels);
But when I inspect the pixels coming back from GetTexImage they're black. When I bind this texture in a further transfer shader they remain black. I suspect I'm missing something in the setup code for the renderbuffer and would appreciate any suggestions you have!
Renderbuffers are not textures. So when you do glGetTexImage on your renderbuffer, you probably got an OpenGL error. When you tried to bind it as a texture with glBindTexture, you probably got an OpenGL error.
If you want to render to a texture, you should render to a texture. As in glGenTextures/glTexImage2D/glFramebufferTexture2D.
Also, please stop using EXT_framebuffer_object. You should be using the core FBO feature, which requires no "EXT" suffixes. Not unless you're using a really ancient OpenGL version.
I have a texture problem with the cubemap I'm rendering and can't seem to figure it out. I've generated a cube map with direct x's texture tools and then read it using
D3DX11CreateShaderResourceViewFromFile(device, L"cubemap.dds", 0, 0, &fullcubemap, 0);
The cubemap texture is not high quality at all and it looks really stretched/distorted. I can definitely tell that the images used for the cubemap match correctly, but it's not great at all at the moment
I'm not sure why this is happening. Is it because my textures are too large/small or is it something else? If it's due to the size of the textures, what is a recommended texture size? I am using a sphere for the cubemap not a cube.
Edit:
Shader:
cbuffer SkyboxConstantBuffer {
float4x4 world;
float4x4 view;
float4x4 projection;
};
TextureCube gCubeMap;
SamplerState samTriLinearSam {
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Wrap;
AddressV = Wrap;
};
struct VertexIn {
float4 position : POSITION;
};
struct VertexOut {
float4 position : SV_POSITION;
float4 spherePosition : POSITION;
};
VertexOut VS(VertexIn vin) {
VertexOut vout = (VertexOut)0;
vin.position.w = 1.0f;
vout.position = mul(vin.position, world);
vout.position = mul(vout.position, view);
vout.position = mul(vout.position, projection);
vout.spherePosition = vin.position;
return vout;
}
float4 PS(VertexOut pin) : SV_Target {
return gCubeMap.Sample(samTriLinearSam, pin.spherePosition);//float4(1.0, 0.5, 0.5, 1.0);
}
RasterizerState NoCull {
CullMode = None;
};
DepthStencilState LessEqualDSS {
DepthFunc = LESS_EQUAL;
};
technique11 SkyTech {
pass p0 {
SetVertexShader(CompileShader(vs_4_0, VS()));
SetGeometryShader(NULL);
SetPixelShader(CompileShader(ps_4_0, PS()));
SetRasterizerState(NoCull);
SetDepthStencilState(LessEqualDSS, 0);
}
}
Draw:
immediateContext->OMSetRenderTargets(1, &renderTarget, nullptr);
XMMATRIX sworld, sview, sprojection;
SkyboxConstantBuffer scb;
sview = XMLoadFloat4x4(&_view);
sprojection = XMLoadFloat4x4(&_projection);
sworld = XMLoadFloat4x4(&_world);
scb.world = sworld;
scb.view = sview;
scb.projection = sprojection;
immediateContext->IASetIndexBuffer(cubeMapSphere->getIndexBuffer(), DXGI_FORMAT_R32_UINT, 0);
ID3D11Buffer* vertexBuffer = cubeMapSphere->getVertexBuffer();
//ID3DX11EffectShaderResourceVariable * cMap;
////cMap = skyboxShader->GetVariableByName("gCubeMap")->AsShaderResource();
immediateContext->PSSetShaderResources(0, 1, &fullcubemap);//textures
//cMap->SetResource(fullcubemap);
immediateContext->IASetVertexBuffers(0, 1, &vertexBuffer, &stride, &offset);
immediateContext->VSSetShader(skyboxVertexShader, nullptr, 0);
immediateContext->VSSetConstantBuffers(0, 1, &skyboxConstantBuffer);
immediateContext->PSSetConstantBuffers(0, 1, &skyboxConstantBuffer);
immediateContext->PSSetShader(skyboxPixelShader, nullptr, 0);
immediateContext->UpdateSubresource(skyboxConstantBuffer, 0, nullptr, &scb, 0, 0);
immediateContext->DrawIndexed(cubeMapSphere->getIndexBufferSize(), 0, 0);
Initially I was planning to use this snippet to update the TextureCube variable in the shader
ID3DX11EffectShaderResourceVariable * cMap;
cMap = skyboxShader->GetVariableByName("gCubeMap")->AsShaderResource();
cMap->SetResource(fullcubemap);
But it seems that has no effect, and in fact, without the following line, the sphere I'm using for the cubemap textures with a texture used with another object in the scene, so perhaps there's something going on here? I'm not sure what though.
immediateContext->PSSetShaderResources(0, 1, &fullcubemap);//textures
Edit: Probably not the above, realised that if this wasn't updated, the old texture would be applied as it's never wiped after each draw.
Edit: Tried the cubemap with both a sphere and a cube, still the same texture issue.
Edit: Tried loading the shader resource view differently
D3DX11_IMAGE_LOAD_INFO loadSMInfo;
loadSMInfo.MiscFlags = D3D11_RESOURCE_MISC_TEXTURECUBE;
ID3D11Texture2D* SMTexture = 0;
hr = D3DX11CreateTextureFromFile(device, L"cubemap.dds",
&loadSMInfo, 0, (ID3D11Resource**)&SMTexture, 0);
D3D11_TEXTURE2D_DESC SMTextureDesc;
SMTexture->GetDesc(&SMTextureDesc);
D3D11_SHADER_RESOURCE_VIEW_DESC SMViewDesc;
SMViewDesc.Format = SMTextureDesc.Format;
SMViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
SMViewDesc.TextureCube.MipLevels = SMTextureDesc.MipLevels;
SMViewDesc.TextureCube.MostDetailedMip = 0;
hr = device->CreateShaderResourceView(SMTexture, &SMViewDesc, &fullcubemap);
Still produces the same output, any ideas?
Edit: Tried increasing the zfar distance and the texture remains the exact same no matter what value I put.
Example with second texture with increased view distance.
This texture is used on another object in my scene and comes out fine.
Edit: I have been trying to mess with the scaling of the texture/object
To achieve this I used
vin.position = vin.position * 50.0f;
This is beginning to look sort of like how it should, however, when I turn my camera angle, the image disappears so I obviously know this isn't correct, but if I could just scale the image per pixel or per vertex properly, I'm sure I could get the end result.
Edit:
I can confirm the cubemap is rendering correctly, I was ignoring the view/projection space and just using world and managed to get this, which is the high quality image i'm after, just not correct. Yes the faces are incorrect, but I'm not fussed about that now, it's easy enough to swap them around, I just need to get it rendering with this quality, in the correct space.
When in camera space does it take into account whether or not it's the outside/inside of the sphere? If my textures were over the outside of the sphere and I have the view from the inside, it's not going to look the same?
Issue is with your texture size, its small, you are applying it on larger surface, Make larger textures with more pixels
Its confirm that zfar and scaling has nothing to do with it.
Finally found the issue, silly mistake.
scb.world = XMMatrixTranspose(sworld);
scb.view = XMMatrixTranspose(sview);
scb.projection = XMMatrixTranspose(sprojection);
I'm running into a problem and I don't know what is the best practise for it. I have a background that moves upward, which is in fact "slices" that moves toghether, as if the screen was splitted in 4-5 parts horizontally. I need to be able to draw a hole (circle) in the background (see-through), at a specified position which will change dynamically at each frame or so.
Here is how I generate a zone, I don't think there's much of a problem there:
// A 'zone' is simply the 'slice' of ground that moves upward. There's about 4 of
// them visible on screen at the same time, and they are automatically generated by
// a method irrelevant to the situation. Zones are Sprites.
// ---------
void LevelLayer::Zone::generate(LevelLayer *sender) {
// [...]
// Make a background for the zone
Sprite *background = this->generateBackgroundSprite();
background->setPosition(_contentSize.width / 2, _contentSize.height / 2);
this->addChild(background, 0);
}
This is the Zone::generateBackgroundSprite() method:
// generates dynamically a new background texture
Sprite *LevelLayer::Zone::generateBackgroundSprite() {
RenderTexture *rt = RenderTexture::create(_contentSize.width, _contentSize.height);
rt->retain();
Color4B dirtColorByte = Color4B(/*initialize the color with bytes*/);
Color4F dirtColor(dirtColorByte);
rt->beginWithClear(dirtColor.r, dirtColor.g, dirtColor.b, dirtColor.a);
// [Nothing here yet, gotta learn OpenGL m8]
rt->end();
// ++++++++++++++++++++
// I'm just testing clipping node, it works but the FPS get significantly lower.
// If I lock them to 60, they get down to 30, and if I lock them there they get
// to 20 :(
// Also for the test I'm drawing a square since ClippingNode doesn't seem to
// like circles...
DrawNode *square = DrawNode::create();
Point squarePoints[4] = { Point(-20, -20), Point(20, -20), Point(20, 20), Point(-20, 20) };
square->drawPolygon(squarePoints, 4, Color4F::BLACK, 0.0f, Color4F(0, 0, 0, 0));
square->setPosition(0, 0);
// Make a stencil
Node *stencil = Node::create();
stencil->addChild(square);
// Create a clipping node with the prepared stencil
ClippingNode *clippingNode = ClippingNode::create(stencil);
clippingNode->setInverted(true);
clippingNode->addChild(rt);
Sprite *ret = Sprite::create();
ret->addChild(clippingNode);
rt->release();
return ret;
}
**
So I'm asking you guys, what would you do in such a situation? Is what I am doing a good idea? Would you do it in another more imaginative way?
PS This is a rewrite of a little app I made for iOS (I want to port it to Android), and I was using MutableTextures in the Objective-C version (it was working). I'm just trying to see if there's a better way using RenderTexture, so I can dynamically create background images using OpenGL calls.
EDIT (SOLUTION)
I wrote my own simple fragment shader that "masks" the visible parts of a texture (the background) based on the visible parts of another texture (the mask). I have an array of points that determine where my circles are on the screen, and in the update method I draw them to a RenderTexture. I then take the generated texture and use it as the mask I pass to the shader.
This is my shader:
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 v_texCoord;
uniform sampler2D u_texture;
uniform sampler2D u_alphaMaskTexture;
void main() {
float maskAlpha = texture2D(u_alphaMaskTexture, v_texCoord).a;
float texAlpha = texture2D(u_texture, v_texCoord).a;
float blendAlpha = (1.0 - maskAlpha) * texAlpha; // Show only where mask is not visible
vec3 texColor = texture2D(u_texture, v_texCoord).rgb;
gl_FragColor = vec4(texColor, blendAlpha);
return;
}
init method:
bool HelloWorld::init() {
// [...]
Size visibleSize = Director::getInstance()->getVisibleSize();
// Load and cache the custom shader
this->loadCustomShader();
// 'generateBackgroundSlice()' creates a new RenderTexture and fills it with a
// color, nothing too complicated here so I won't copy-paste it in my edit
m_background = Sprite::createWithTexture(this->generateBackgroundSprite()->getSprite()->getTexture());
m_background->setPosition(visibleSize.width / 2, visibleSize.height / 2);
this->addChild(m_background);
m_background->setShaderProgram(ShaderCache::getInstance()->getProgram(Shader_AlphaMask_frag_key));
GLProgram *shader = m_background->getShaderProgram();
m_alphaMaskTextureUniformLocation = glGetUniformLocation(shader->getProgram(), "u_alphaMaskTexture");
glUniform1i(m_alphaMaskTextureUniformLocation, 1);
m_alphaMaskRender = RenderTexture::create(m_background->getContentSize().width,
m_background->getContentSize().height);
m_alphaMaskRender->retain();
// [...]
}
loadCustomShader method:
void HelloWorld::loadCustomShader() {
// Load the content of the vertex and fragement shader
FileUtils *fileUtils = FileUtils::getInstance();
string vertexSource = ccPositionTextureA8Color_vert;
string fragmentSource = fileUtils->getStringFromFile(
fileUtils->fullPathForFilename("Shader_AlphaMask_frag.fsh"));
// Init a shader and add its attributes
GLProgram *shader = new GLProgram;
shader->initWithByteArrays(vertexSource.c_str(), fragmentSource.c_str());
shader->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION);
shader->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORDS);
shader->link();
shader->updateUniforms();
ShaderCache::getInstance()->addProgram(shader, Shader_AlphaMask_frag_key);
// Trace OpenGL errors if any
CHECK_GL_ERROR_DEBUG();
}
update method:
void HelloWorld::update(float dt) {
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Create the mask texture from the points in the m_circlePos array
GLProgram *shader = m_background->getShaderProgram();
m_alphaMaskRender->beginWithClear(0, 0, 0, 0); // Begin with transparent mask
for (vector<Point>::iterator it = m_circlePos.begin(); it != m_circlePos.end(); it++) {
// draw a circle on the mask
const float radius = 40;
const int resolution = 20;
Point circlePoints[resolution];
Point center = *it;
center = Director::getInstance()->convertToUI(center); // OpenGL has a weird coordinates system
float angle = 0;
for (int i = 0; i < resolution; i++) {
float x = (radius * cosf(angle)) + center.x;
float y = (radius * sinf(angle)) + center.y;
angle += (2 * M_PI) / resolution;
circlePoints[i] = Point(x, y);
}
DrawNode *circle = DrawNode::create();
circle->retain();
circle->drawPolygon(circlePoints, resolution, Color4F::BLACK, 0.0f, Color4F(0, 0, 0, 0));
circle->setPosition(Point::ZERO);
circle->visit();
circle->release();
}
m_alphaMaskRender->end();
Texture2D *alphaMaskTexture = m_alphaMaskRender->getSprite()->getTexture();
alphaMaskTexture->setAliasTexParameters(); // Disable linear interpolation
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
shader->use();
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, alphaMaskTexture->getName());
glActiveTexture(GL_TEXTURE0);
}
What you might want to look at is framebuffers, i'm not too familiar with the mobile API for OpenGL but I'm sure you should have access to framebuffers.
An idea of what you might want to try is to do a first pass where you render the circles's that you want to set to alpha on your background into a new framebuffer texture, then you can use this texture as an alpha map on your pass for rendering your background. So basically when you render your circle you might set the value in the texture to 0.0 for the alpha channel otherwise to 1.0, when rendering you can then set the alpha channel of the fragment to the same value as the alpha of texture of the first pass' of the rendering process.
You can think of it as a the same idea as a mask. But just using another texture.
Hope this helps :)
I'm trying to bind multiple textures to the samplers in my fragment shader. The loading code seems to work well. ATI's CodeXL shows the texture being loaded correctly.
However, when I go to bind the textures for my model to Active textures 0 and 1 I can not get it to send the value to my shader. When I have the shader uniform marked as a usampler2D and use uvec4 to store the color, like I should since my texture is provided as unsigned bytes, I get an all white model. When I change the shader uniform to be a sampler2D and use a vec4 to store the color, my glUniform1i call can no longer get the location of the shader variable and so nothing gets set for the active texture. This results in the diffuse texture being able to be used, but I can not get the normal texture. On the bright side, The diffuse texture is being drawn on the model this way.
I'm not sure what the problem is. I've check several places online trying to figure it out, and I have looked through the redbook. I know I'm missing something, or have some state set wrong, but I can't seem to find it. Thank you in advance for any help you can give me to fix this problem.
Texture Creation
int[] testWidth;
testWidth = new int[1];
testWidth[0] = 1000;
// First bind the texture.
bind();
// Make sure that textures are enabled.
// I read that ATI cards need this before MipMapping.
glEnable(GL_TEXTURE_2D);
// Test to make sure we can create a texture like this.
glTexImage2D(GL_PROXY_TEXTURE_2D, 0, format, width, height,
0, format, GL_UNSIGNED_BYTE, null);
glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH,
testWidth);
if (testWidth[0] == 0)
{
message("Could not load texture onto the graphics card.");
}
else
{
// Not so sure about this part....but it seems to work.
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Load the texture data.
glTexImage2D(texture_type, 0, format, width, height,
0, format, GL_UNSIGNED_BYTE, (GLvoid[]?)value);
// Smaller mipmaps need linear mipmap coords.
// Larger just uses linear of the main texture.
glTexParameterf(texture_type, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR);
glTexParameterf(texture_type, GL_TEXTURE_MAG_FILTER,
GL_LINEAR);
// Clamp the texture to the edges.
glTexParameterf(texture_type, GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
glTexParameterf(texture_type, GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
glTexParameterf(texture_type, GL_TEXTURE_WRAP_R,
GL_CLAMP_TO_EDGE);
// Generate the mipmaps. The tex parameter is there
// for ATI cards. Again, it's something I read online.
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glGenerateMipmap(texture_type);
}
// Now unbind the texture.
unbind();
Texture Binding
if (currentShader != null)
{
currentShader.set_uniform_matrix("model_matrix", ref model_matrix,
true);
if (material != null)
{
if (material.diffuse_texture != null)
{
glActiveTexture(GL_TEXTURE0);
material.diffuse_texture.bind();
currentShader.set_uniform_texture("diffuse_texture",
Constants.DIFFUSE_TEXTURE);
if (material.normal_testure != null)
{
glActiveTexture(GL_TEXTURE1);
material.normal_texture.bind();
currentShader.set_uniform_texture("normal_texture",
Constants.NORMAL_TEXTURE);
}
}
}
// If there is a renderable then render it.
if (renderable != null)
{
renderable.render(1.0);
}
if (material != null)
{
material.unbind();
}
Fragment Shader
#version 400 core
/**
* Smooth the inward vertex color. Smooth it so that the fragments
* which will be in between the vertices as well can get a value close
* to where they are positioned after being rasterized.
*/
smooth in vec4 vertex_color;
/**
* Smooth the inward texture coordinates. Smooth it so that the
* fragments which will be in between the vertices as well can get a
* value close to where they are positioned after being rasterized.
*/
smooth in vec2 out_texture_coordinate;
/**
* The color to make this fragment.
*/
out vec4 frag_color;
/**
* The models diffuse texture. This will be mapped to index 0.
*/
uniform usampler2D diffuse_texture;
/**
* The models normal texture. This will be mapped to index 1.
*/
uniform usampler2D normal_texture;
/**
* The starting function of the shader.
*/
void main(void)
{
uvec4 diffuseColor;
uvec4 normalModifier;
diffuseColor = texture(diffuse_texture, out_texture_coordinate);
normalModifier = texture(normal_texture, out_texture_coordinate);
// Discard any fragments that have an alpha color less than 0.05.
if (diffuseColor.a < 1.0)
{
// This works as part of depth testing to remove the fragments that
// are not useful.
discard;
}
frag_color = diffuseColor;
}
Uniform Setting
/**
* Sets the uniform value for a texture in the shader.
*
* #param name The name of the uniform to bind this texture to.
* This must have already been registered.
*
* #param textureUnit The id for the texture unit to bind to the uniform.
* This is not the texture's id/reference, but the OpenGL texture unit
* that the reference is bound to.
* This is set by calling glActiveTexture.
*/
public void set_uniform_texture(string name, int textureUnit)
{
// Check to make sure the uniform was given a location already.
if (register_uniform(name) == true)
{
// Set the data for this uniform then.
glUniform1i(uniform_mapping.get(name), textureUnit);
}
else
{
message("Texture was not set. %s", name);
}
}
/**
* Register a uniform for passing data to the shader program.
*
* #return true if the uniform was found with a valid location;
* otherwise, false.
*
* #param name The name for the parameter to get a uniform location for.
* Use this name for the variable in your shader.
*/
public bool register_uniform(string name)
{
int location;
// Make sure we didn't already get the location of the uniform value.
if (uniform_mapping.has_key(name) == false)
{
location = Constants.OPENGL_INVALID_INDEX;
// We have no information about this uniform, so try
// to get it's location.
location = glGetUniformLocation(reference, name);
// The location will 0 or higher if we found the uniform.
if (location != Constants.OPENGL_INVALID_INDEX)
{
uniform_mapping.set(name, location);
return true;
}
}
else
{
// The uniform was previously found and can be used.
return true;
}
debug("Uniform %s not found!!!!!", name);
return false;
}
Setting the internal format to GL_RGB/A implies you should be using a sampler2D and not usampler2D, even though the raw image data is initially given as unsigned bytes. EDIT The given data gets converted to the internal format at the call to glTexImage2D (in this case GL_RGBA is 8 bits per channel so not much has to happen). However, for most graphics applications the data is needed with higher accuracy, for example when sampling the texture with non-"nearest" interpolation, which is why it's normally exposed as floats.
To bind multiple textures...
glActiveTexture(GL_TEXTURE0 + firstTextureIndex); //firstTextureIndex should be unique amongst the textures bound for a particular shader
glBindTexture(GL_TEXTURE_2D, myFirstTextureHandle);
glUniform1i(glGetUniformLocation(shaderProgramHandle, "firstSampler"), firstTextureIndex); //note the same index given in the glActiveTexture call. This is also always glUniform1i
and repeat for secondTextureIndex, mySecondTextureHandle, "secondSampler" etc.
If glGetUniformLocation doesn't return a location, double check you actually use it and it affects the shader output (or it gets optimized out completely). Also check for the usual typos or missing "uniform" keyword etc.
Since you don't show the definition of Constants make sure that the following asserts for your code:
if (material.diffuse_texture != null)
{
glActiveTexture(GL_TEXTURE0);
material.diffuse_texture.bind();
assert(Constants.DIFFUSE_TEXTURE + GL_TEXTURE0 == GL_TEXTURE0);
currentShader.set_uniform_texture("diffuse_texture",
Constants.DIFFUSE_TEXTURE);
if (material.normal_testure != null)
{
glActiveTexture(GL_TEXTURE1);
material.normal_texture.bind();
assert(Constants.NORMAL_TEXTURE + GL_TEXTURE0 == GL_TEXTURE1);
currentShader.set_uniform_texture("normal_texture",
Constants.NORMAL_TEXTURE);
}
It's a common misunderstanding that the value passed to the sampler uniform was the enum value given to glActiveTexture. In fact glActiveTexture takes GL_TEXTURE0 as an offset base value.
I suspect I'm not correctly rendering particle positions to my FBO, or correctly sampling those positions when rendering, though that may not be the actual problem with my code, admittedly.
I have a complete jsfiddle here: http://jsfiddle.net/p5mdv/53/
A brief overview of the code:
Initialization:
Create an array of random particle positions in x,y,z
Create an array of texture sampling locations (e.g. for 2 particles, first particle at 0,0, next at 0.5,0)
Create a Frame Buffer Object and two particle position textures (one for input, one for output)
Create a full-screen quad (-1,-1 to 1,1)
Particle simulation:
Render a full-screen quad using the particle program (bind frame buffer, set viewport to the dimensions of my particle positions texture, bind input texture, and draw a quad from -1,-1 to 1,1). Input and output textures are swapped each frame.
Particle fragment shader samples the particle texture at the current fragment position (gl_FragCoord.xy), makes some modifications, and writes out the modified position
Particle rendering:
Draw using the vertex buffer of texture sampling locations
Vertex shader uses the sampling location to sample the particle position texture, then transforms them using view projection matrix
Draw the particle using a sprite texture (gl.POINTS)
Questions:
Am I correctly setting the viewport for the FBO in the particle simulation step? I.e. am I correctly rendering a full-screen quad?
// 6 2D corners = 12 vertices
var vertexBuffer = new Float32Array(12);
// -1,-1 to 1,1 screen quad
vertexBuffer[0] = -1;
vertexBuffer[1] = -1;
vertexBuffer[2] = -1;
vertexBuffer[3] = 1;
vertexBuffer[4] = 1;
vertexBuffer[5] = 1;
vertexBuffer[6] = -1;
vertexBuffer[7] = -1;
vertexBuffer[8] = 1;
vertexBuffer[9] = 1;
vertexBuffer[10] = 1;
vertexBuffer[11] = -1;
// Create GL buffers with this data
g.particleSystem.vertexObject = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, g.particleSystem.vertexObject);
gl.bufferData(gl.ARRAY_BUFFER, vertexBuffer, gl.STATIC_DRAW);
...
gl.viewport(0, 0,
g.particleSystem.particleFBO.width,
g.particleSystem.particleFBO.height);
...
// Set the quad as vertex buffer
gl.bindBuffer(gl.ARRAY_BUFFER, g.screenQuad.vertexObject);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
// Draw!
gl.drawArrays(gl.TRIANGLES, 0, 6);
Am I correctly setting the texture coordinates to sample the particle positions?
for(var i=0; i<numParticles; i++)
{
// Coordinates of particle within texture (normalized)
var texCoordX = Math.floor(i % texSize.width) / texSize.width;
var texCoordY = Math.floor(i / texSize.width) / texSize.height;
particleIndices[ pclIdx ] = texCoordX;
particleIndices[ pclIdx + 1 ] = texCoordY;
particleIndices[ pclIdx + 2 ] = 1; // not used in shader
}
The relevant shaders:
Particle simulation fragment shader:
precision mediump float;
uniform sampler2D mParticleTex;
void main()
{
// Current pixel is the particle's position on the texture
vec2 particleSampleCoords = gl_FragCoord.xy;
vec4 particlePos = texture2D(mParticleTex, particleSampleCoords);
// Move the particle up
particlePos.y += 0.1;
if(particlePos.y > 2.0)
{
// Reset
particlePos.y = -2.0;
}
// Write particle out to texture
gl_FragColor = particlePos;
}
Particle rendering vertex shader:
attribute vec4 vPosition;
uniform mat4 u_modelViewProjMatrix;
uniform sampler2D mParticleTex;
void main()
{
vec2 particleSampleCoords = vPosition.xy;
vec4 particlePos = texture2D(mParticleTex, particleSampleCoords);
gl_Position = u_modelViewProjMatrix * particlePos;
gl_PointSize = 10.0;
}
Let me know if there's a better way to go about debugging this, if nothing else. I'm using webgl-debug to find gl errors and logging what I can to the console.
Your quad is facing away from view so I tried adding gl.disable(gl.CULL_FACE), still no result.
Then I noticed that while resizing window panel with canvas it actually shows one black, square-shaped particle. So it seems that rendering loop is not good.
If you look at console log, it fails to load particle image and it also says that FBO size is 512x1 which is not good.
Some function declarations do not exist, as getTexSize. (?!)
Code needs tiding and grouping, and always check console if you're already using it.
Hope this helps a bit.
Found the problem.
gl_FragCoord is from [0,0] to [screenwidth, screenheight], I was wrongly thinking it was from [0,0] to [1,1].
I had to pass in shader variables for width and height, then normalize the sample coordinates before sampling from the texture.