I am trying to output more than one buffer from a shader - the general goal is to use it for GPGPU purposes. I've looked at this answer and got closer to the goal with this:
document.addEventListener("DOMContentLoaded", function() {
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need WebGL2");
}
gl.canvas.width = 2;
gl.canvas.height = 2;
const vs = `
#version 300 es
in vec2 position;
void main(void) {
gl_Position = vec4(0.0, 0.0, 0.0, 1.0);
}
`;
const fs = `
#version 300 es
precision mediump float;
layout(location = 0) out vec4 outColor0;
layout(location = 1) out vec4 outColor1;
layout(location = 2) out vec4 outColor2;
layout(location = 3) out vec4 outColor3;
layout(location = 4) out vec4 outColor4;
layout(location = 5) out vec4 outColor5;
void main() {
// simplified for question purposes
outColor0 = vec4(1, 0, 0, 1);
outColor1 = vec4(0, 1, 0, 1);
outColor2 = vec4(0, 0, 1, 1);
outColor3 = vec4(1, 1, 0, 1);
outColor4 = vec4(1, 0, 1, 1);
outColor5 = vec4(0, 1, 1, 1);
}
`
const program = twgl.createProgram(gl, [vs, fs]);
const textures = [];
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
for (let i = 0; i < 6; ++i) {
const tex = gl.createTexture();
textures.push(tex);
gl.bindTexture(gl.TEXTURE_2D, tex);
const width = 2;
const height = 2;
const level = 0;
gl.texImage2D(gl.TEXTURE_2D, level, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);
// attach texture to framebuffer
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + i, gl.TEXTURE_2D, tex, level);
}
gl.viewport(0, 0, 2, 2);
// tell it we want to draw to all 4 attachments
gl.drawBuffers([
gl.COLOR_ATTACHMENT0,
gl.COLOR_ATTACHMENT1,
gl.COLOR_ATTACHMENT2,
gl.COLOR_ATTACHMENT3,
gl.COLOR_ATTACHMENT4,
gl.COLOR_ATTACHMENT5,
]);
// draw a single point
gl.useProgram(program);
{
const offset = 0;
const count = 1
gl.drawArrays(gl.TRIANGLE, 0, 4);
}
for (var l = 0; l < 6; l++) {
var pixels = new Uint8Array(gl.canvas.width * gl.canvas.height * 4);
gl.readBuffer(gl.COLOR_ATTACHMENT0 + l);
gl.readPixels(0, 0, gl.canvas.width, gl.canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
console.log(pixels.join(' '));
}
}
main();
})
However, the result is that only one pixel in each buffer gets set, so the output is:
0 0 0 0 255 0 0 255 0 0 0 0 0 0 0 0
0 0 0 0 0 255 0 255 0 0 0 0 0 0 0 0
0 0 0 0 0 0 255 255 0 0 0 0 0 0 0 0
0 0 0 0 255 255 0 255 0 0 0 0 0 0 0 0
0 0 0 0 255 0 255 255 0 0 0 0 0 0 0 0
0 0 0 0 0 255 255 255 0 0 0 0 0 0 0 0
rather than what I was hoping/expecting:
255 0 0 255 255 0 0 255 255 0 0 255 255 0 0 255
etc.
I was expecting that
outColor0 = vec4(1, 0, 0, 1);
is the equivalent to
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
but clearly I am wrong.
So how do I get to the desired outcome - to be able to set each pixel on each of the buffers?
The code does not provide any vertex data even though it's asking it to draw 4 vertices. Further it's passing in gl.TRIANGLE which doesn't exist. It's gl.TRIANGLES with an S at the end. gl.TRIANGLE will be undefined which gets coerced into 0 which matches gl.POINTS
In the JavaScript console
> const gl = document.createElement('canvas').getContext('webgl2');
< undefined
> gl.TRIANGLE
< undefined
> gl.TRIANGLES
< 4
> gl.POINTS
< 0
To put it another way all the gl.CONSTANTS are just integer values. Instead of
gl.drawArrays(gl.TRIANGLES, offset, count)
you can just do this
gl.drawArrays(4, offset, count)
because gl.TRIANGLES = 4.
But you you didn't use gl.TRIANGLES you used gl.TRIANGLE (no S) so you effectively did this
gl.drawArrays(undefined, offset, count)
that was interpreted as
gl.drawArrays(0, offset, count)
0 = gl.POINTS so that's the same as
gl.drawArrays(gl.POINTS, offset, count)
The code then draws a single 1 pixel point 4 times at the same location because you called it with a count of 4
gl.drawArrays(gl.POINTS, 0, 4)
Nothing in your vertex shader changes each iteration so every iteration is going to do exactly the same thing. In this case it's going to draw a 1x1 pixel POINT at clip space position 0,0,0,1 which will end up being the bottom left pixel of the 2x2 pixels.
In any case you probably want to provide vertices but as a simple test if I add
gl_PointSize = 2.0;
to the vertex shader and change the draw call to
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
Then it produces the results you expect. It draws a single 2x2 pixel POINT at clip space position 0,0,0,1
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need WebGL2");
}
gl.canvas.width = 2;
gl.canvas.height = 2;
const vs = `
#version 300 es
in vec2 position;
void main(void) {
gl_Position = vec4(0.0, 0.0, 0.0, 1.0);
gl_PointSize = 2.0;
}
`;
const fs = `
#version 300 es
precision mediump float;
layout(location = 0) out vec4 outColor0;
layout(location = 1) out vec4 outColor1;
layout(location = 2) out vec4 outColor2;
layout(location = 3) out vec4 outColor3;
layout(location = 4) out vec4 outColor4;
layout(location = 5) out vec4 outColor5;
void main() {
// simplified for question purposes
outColor0 = vec4(1, 0, 0, 1);
outColor1 = vec4(0, 1, 0, 1);
outColor2 = vec4(0, 0, 1, 1);
outColor3 = vec4(1, 1, 0, 1);
outColor4 = vec4(1, 0, 1, 1);
outColor5 = vec4(0, 1, 1, 1);
}
`
const program = twgl.createProgram(gl, [vs, fs]);
const textures = [];
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
for (let i = 0; i < 6; ++i) {
const tex = gl.createTexture();
textures.push(tex);
gl.bindTexture(gl.TEXTURE_2D, tex);
const width = 2;
const height = 2;
const level = 0;
gl.texImage2D(gl.TEXTURE_2D, level, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);
// attach texture to framebuffer
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + i, gl.TEXTURE_2D, tex, level);
}
gl.viewport(0, 0, 2, 2);
// tell it we want to draw to all 4 attachments
gl.drawBuffers([
gl.COLOR_ATTACHMENT0,
gl.COLOR_ATTACHMENT1,
gl.COLOR_ATTACHMENT2,
gl.COLOR_ATTACHMENT3,
gl.COLOR_ATTACHMENT4,
gl.COLOR_ATTACHMENT5,
]);
// draw a single point
gl.useProgram(program); {
const offset = 0;
const count = 1
gl.drawArrays(gl.POINTS, 0, 1);
}
for (var l = 0; l < 6; l++) {
var pixels = new Uint8Array(gl.canvas.width * gl.canvas.height * 4);
gl.readBuffer(gl.COLOR_ATTACHMENT0 + l);
gl.readPixels(0, 0, gl.canvas.width, gl.canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
console.log(pixels.join(' '));
}
}
main();
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
You can try using webgl-lint which if I run with your original code will at least complain
Uncaught Error: https://greggman.github.io/webgl-lint/webgl-lint.js:2942: error in drawArrays(/UNKNOWN WebGL ENUM/ undefined, 0, 4): argument 0 is undefined
with WebGLProgram("unnamed") as current program
with the default vertex array bound
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need WebGL2");
}
gl.canvas.width = 2;
gl.canvas.height = 2;
const vs = `
#version 300 es
in vec2 position;
void main(void) {
gl_Position = vec4(0.0, 0.0, 0.0, 1.0);
}
`;
const fs = `
#version 300 es
precision mediump float;
layout(location = 0) out vec4 outColor0;
layout(location = 1) out vec4 outColor1;
layout(location = 2) out vec4 outColor2;
layout(location = 3) out vec4 outColor3;
layout(location = 4) out vec4 outColor4;
layout(location = 5) out vec4 outColor5;
void main() {
// simplified for question purposes
outColor0 = vec4(1, 0, 0, 1);
outColor1 = vec4(0, 1, 0, 1);
outColor2 = vec4(0, 0, 1, 1);
outColor3 = vec4(1, 1, 0, 1);
outColor4 = vec4(1, 0, 1, 1);
outColor5 = vec4(0, 1, 1, 1);
}
`
const program = twgl.createProgram(gl, [vs, fs]);
const textures = [];
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
for (let i = 0; i < 6; ++i) {
const tex = gl.createTexture();
textures.push(tex);
gl.bindTexture(gl.TEXTURE_2D, tex);
const width = 2;
const height = 2;
const level = 0;
gl.texImage2D(gl.TEXTURE_2D, level, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);
// attach texture to framebuffer
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + i, gl.TEXTURE_2D, tex, level);
}
gl.viewport(0, 0, 2, 2);
// tell it we want to draw to all 4 attachments
gl.drawBuffers([
gl.COLOR_ATTACHMENT0,
gl.COLOR_ATTACHMENT1,
gl.COLOR_ATTACHMENT2,
gl.COLOR_ATTACHMENT3,
gl.COLOR_ATTACHMENT4,
gl.COLOR_ATTACHMENT5,
]);
// draw a single point
gl.useProgram(program); {
const offset = 0;
const count = 1
gl.drawArrays(gl.TRIANGLE, 0, 4);
}
for (var l = 0; l < 6; l++) {
var pixels = new Uint8Array(gl.canvas.width * gl.canvas.height * 4);
gl.readBuffer(gl.COLOR_ATTACHMENT0 + l);
gl.readPixels(0, 0, gl.canvas.width, gl.canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
console.log(pixels.join(' '));
}
}
main();
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
<script src="https://greggman.github.io/webgl-lint/webgl-lint.js" crossorigin="anonymous"></script>
Related
My DirectX application does not render the texture correctly. Result:
Expected from VS editor:
As you can see the cat texture is not completely drawn.
I 'm using WaveFrontReader to load the .OBJ and the .MTL files and WicTextureLoader to load the PNG/JPG.
My HLSL:
cbuffer constants : register(b0)
{
row_major float4x4 transform;
row_major float4x4 projection;
float3 lightvector;
}
struct vs_in
{
float3 position : POS;
float3 normal : NOR;
float2 texcoord : TEX;
float4 color : COL;
};
struct vs_out
{
float4 position : SV_POSITION;
float2 texcoord : TEX;
float4 color : COL;
};
Texture2D mytexture : register(t0);
SamplerState mysampler : register(s0);
vs_out vs_main(vs_in input)
{
float light = clamp(dot(normalize(mul(float4(input.normal, 0.0f), transform).xyz), normalize(-lightvector)), 0.0f, 1.0f) * 0.8f + 0.2f;
vs_out output;
output.position = mul(float4(input.position, 1.0f), mul(transform, projection));
output.texcoord = input.texcoord;
output.color = float4(input.color.rgb * light, input.color.a);
return output;
}
float4 ps_main(vs_out input) : SV_TARGET
{
return mytexture.Sample(mysampler, input.texcoord) * input.color;
}
My preparation:
void Config3DWindow()
{
const wchar_t* tf = L"1.hlsl";
d2d.m_swapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), reinterpret_cast<void**>(&frameBuffer));
d2d.device->CreateRenderTargetView(frameBuffer, nullptr, &frameBufferView);
frameBuffer->GetDesc(&depthBufferDesc); // base on framebuffer properties
depthBufferDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthBufferDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
CComPtr<ID3DBlob> vsBlob;
D3DCompileFromFile(tf, nullptr, nullptr, "vs_main", "vs_5_0", 0, 0, &vsBlob, nullptr);
d2d.device->CreateVertexShader(vsBlob->GetBufferPointer(), vsBlob->GetBufferSize(), nullptr, &vertexShader);
D3D11_INPUT_ELEMENT_DESC inputElementDesc[] =
{
{ "POS", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEX", 0, DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COL", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
d2d.device->CreateInputLayout(inputElementDesc, ARRAYSIZE(inputElementDesc), vsBlob->GetBufferPointer(), vsBlob->GetBufferSize(), &inputLayout);
///////////////////////////////////////////////////////////////////////////////////////////////
CComPtr<ID3DBlob> psBlob;
D3DCompileFromFile(tf, nullptr, nullptr, "ps_main", "ps_5_0", 0, 0, &psBlob, nullptr);
d2d.device->CreatePixelShader(psBlob->GetBufferPointer(), psBlob->GetBufferSize(), nullptr, &pixelShader);
D3D11_BUFFER_DESC constantBufferDesc = {};
constantBufferDesc.ByteWidth = sizeof(Constants) + 0xf & 0xfffffff0;
constantBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
constantBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constantBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
d2d.device->CreateBuffer(&constantBufferDesc, nullptr, &constantBuffer);
}
Loading the obj:
WaveFrontReader<UINT> wfr;
wfr.Load(L"12221_Cat_v1_l3.oobj");
wfr.LoadMTL(L"12221_Cat_v1_l3.mtl");
obj.CreateDirect3D2(wfr);
CreateDirect3D2() function:
std::vector<float> Vertices;
// float VertexDataX[] = // float3 position, float3 normal, float2 texcoord, float4 color
auto numV = wf.vertices.size();
Vertices.resize(numV * 12);
for (size_t i = 0; i < numV; i++)
{
auto& v = wf.vertices[i];
float* i2 = Vertices.data() + (i * 12);
// position
i2[0] = v.position.x;
i2[1] = v.position.y;
i2[2] = v.position.z;
// normal
i2[3] = v.normal.x;
i2[4] = v.normal.y;
i2[5] = v.normal.z;
// tx
i2[6] = v.textureCoordinate.x;
i2[7] = v.textureCoordinate.y;
// Colors
i2[8] = 1.0f;
i2[9] = 1.0f;
i2[10] = 1.0f;
i2[11] = 1.0f;
}
D3D11_BUFFER_DESC vertexBufferDesc = {};
vertexBufferDesc.ByteWidth = Vertices.size() * sizeof(float);
vertexBufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
D3D11_SUBRESOURCE_DATA vertexData = { Vertices.data() }; // in data.h
vertexBuffer = 0;
d2d.device->CreateBuffer(&vertexBufferDesc, &vertexData, &vertexBuffer);
// Indices
std::vector<UINT>& Indices = wf.indices;
D3D11_BUFFER_DESC indexBufferDesc = {};
IndicesSize = Indices.size() * sizeof(UINT);
indexBufferDesc.ByteWidth = IndicesSize;
indexBufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
D3D11_SUBRESOURCE_DATA indexData = { Indices.data() }; // in data.h
indexBuffer = 0;
d2d.device->CreateBuffer(&indexBufferDesc, &indexData, &indexBuffer);
for (auto& ma : wf.materials)
{
CComPtr<ID3D11Resource> tex;
CComPtr<ID3D11ShaderResourceView> texv;
CreateWICTextureFromFile(d2d.device, d2d.context, ma.strTexture, &tex, &texv,0);
if (tex && texv)
{
OBJFT ot;
ot.texture = tex;
ot.textureView = texv;
textures.push_back(ot);
}
tex = 0;
texv = 0;
}
The drawing function:
void Present(OBJF& o, int Count, _3DP& _3, D2D1_COLOR_F bcol)
{
float w = static_cast<float>(depthBufferDesc.Width); // width
float h = static_cast<float>(depthBufferDesc.Height); // height
float n = 1000.0f; // near
float f = 1000000.0f; // far
matrix rotateX = { 1, 0, 0, 0, 0, static_cast<float>(cos(_3.rotation[0])), -static_cast<float>(sin(_3.rotation[0])), 0, 0, static_cast<float>(sin(_3.rotation[0])), static_cast<float>(cos(_3.rotation[0])), 0, 0, 0, 0, 1 };
matrix rotateY = { static_cast<float>(cos(_3.rotation[1])), 0, static_cast<float>(sin(_3.rotation[1])), 0, 0, 1, 0, 0, -static_cast<float>(sin(_3.rotation[1])), 0, static_cast<float>(cos(_3.rotation[1])), 0, 0, 0, 0, 1 };
matrix rotateZ = { static_cast<float>(cos(_3.rotation[2])), -static_cast<float>(sin(_3.rotation[2])), 0, 0, static_cast<float>(sin(_3.rotation[2])), static_cast<float>(cos(_3.rotation[2])), 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
matrix scale = { _3.scale[0], 0, 0, 0, 0, _3.scale[1], 0, 0, 0, 0, _3.scale[2], 0, 0, 0, 0, 1 };
matrix translate = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, _3.translation[0], _3.translation[1], _3.translation[2], 1 };
///////////////////////////////////////////////////////////////////////////////////////////
D3D11_MAPPED_SUBRESOURCE mappedSubresource = {};
d2d.context->Map(constantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedSubresource);
Constants* constants = reinterpret_cast<Constants*>(mappedSubresource.pData);
constants->Transform = rotateX * rotateY * rotateZ * scale * translate;
constants->Projection = { 2 * n / w, 0, 0, 0, 0, 2 * n / h, 0, 0, 0, 0, f / (f - n), 1, 0, 0, n * f / (n - f), 0 };
constants->LightVector = { 1.0f, 1.0f, 1.0f };
d2d.context->Unmap(constantBuffer, 0);
///////////////////////////////////////////////////////////////////////////////////////////
FLOAT backgroundColor[4] = { 0.00f, 0.00f, 0.00f, 1.0f };
if (bcol.a > 0)
{
backgroundColor[0] = bcol.r;
backgroundColor[1] = bcol.g;
backgroundColor[2] = bcol.b;
backgroundColor[3] = bcol.a;
}
UINT stride = 12 * 4; // vertex size (12 floats: float3 position, float3 normal, float2 texcoord, float4 color)
UINT offset = 0;
D3D11_VIEWPORT viewport = { 0.0f, 0.0f, w, h, 0.0f, 1.0f };
///////////////////////////////////////////////////////////////////////////////////////////
auto deviceContext = d2d.context;
deviceContext->ClearRenderTargetView(frameBufferView, backgroundColor);
deviceContext->ClearDepthStencilView(depthBufferView, D3D11_CLEAR_DEPTH, 1.0f, 0);
deviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
deviceContext->IASetInputLayout(inputLayout);
deviceContext->IASetVertexBuffers(0, 1, &o.vertexBuffer.p, &stride, &offset);
deviceContext->IASetIndexBuffer(o.indexBuffer, DXGI_FORMAT_R32_UINT, 0);
deviceContext->VSSetShader(vertexShader, nullptr, 0);
deviceContext->VSSetConstantBuffers(0, 1, &constantBuffer.p);
deviceContext->RSSetViewports(1, &viewport);
deviceContext->PSSetShader(pixelShader, nullptr, 0);
std::vector<ID3D11ShaderResourceView*> rsx;
for (auto& t : o.textures)
rsx.push_back(t.textureView);
ID3D11ShaderResourceView** rr = rsx.data();
deviceContext->PSSetShaderResources(0, rsx.size(), rr);
deviceContext->PSSetSamplers(0, 1, &samplerState.p);
deviceContext->OMSetRenderTargets(1, &frameBufferView.p, depthBufferView);
deviceContext->OMSetDepthStencilState(depthStencilState, 0);
///////////////////////////////////////////////////////////////////////////////////////////
DXGI_RGBA ra = { 1,1,1,1 };
deviceContext->DrawIndexed(o.IndicesSize, 0, 0);
d2d.m_swapChain1->Present(1, 0);
}
Entire project here: https://drive.google.com/open?id=1BbW3DUd20bAwei4KjnkUPwgm5Ia1aRxl
This is what I got after I was able to reproduce the issue of OP on my side:
My only change was that I exluded lighting in the shader code:
vs_out vs_main(vs_in input)
{
float light = 1.0f;
//float light = clamp(dot(normalize(mul(float4(input.normal, 0.0f), transform).xyz), normalize(-lightvector)), 0.0f, 1.0f) * 0.8f + 0.2f;
vs_out output;
output.position = mul(float4(input.position, 1.0f), mul(transform, projection));
output.texcoord = input.texcoord;
output.color = float4(input.color.rgb * light, input.color.a);
return output;
}
Then I became aware of the cat's eye on the cat's tail.
That reminded me that a lot of image formats store the image from top to down.
OpenGL textures (and probably Direct3D as well) has usually the origin in the lower left corner. Hence, it's not un-usual that texture images are mirrored vertically (during or after loading the image from file and before sending it to GPU).
To prove my suspicion, I mirrored the image manually (in GIMP) and then (without re-compiling) got this:
It looks like my suspicion was right.
Something is wrong with the image or texture loading in the loader of OP.
With WebGL 2 we now can play with Uniform Buffer Objects.
They look like a great idea, not having to attach common uniforms to every single program (like projection and view matrices that are common to every object being rendered).
I created an helper class which I call every time I want to bind a uniform buffer object.
class UniformBuffer {
constructor(gl, data, boundLocation = 0) {
this.boundLocation = boundLocation;
this.data = new Float32Array(data);
this.buffer = gl.createBuffer();
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferData(gl.UNIFORM_BUFFER, this.data, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
gl.bindBufferBase(gl.UNIFORM_BUFFER, this.boundLocation, this.buffer);
}
update(gl, data, offset = 0) {
this.data.set(data, offset);
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferSubData(gl.UNIFORM_BUFFER, 0, this.data, 0, null);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
gl.bindBufferBase(gl.UNIFORM_BUFFER, this.boundLocation, this.buffer);
}
};
The idea if to create the uniform buffers like this
const perScene = new UniformBuffer(gl, [
...vec4.create(),
...vec4.create(),
], 0); // and bind it to bind location 0?
const perObject = new UniformBuffer(gl, [
...vec4.create(),
], 1); // and bind it to bind location 1?
In my render loop, I then update the "perScene" uniforms by calling
perScene.update(gl, [
...vec4.fromValues(1, 0, 0, 1),
], 4); // giving an offset to update only the 2nd color.
Then I'll look through all the objects in the scene and my idea is to update the perObject uniform buffer like this
for (let i = 0; i < objects.length; i++) {
perObject.update(gl, [
...vec4.fromValues(0, 0, 1, 1),
]);
}
I'm talking about vec4 just to make the example easier, but the idea is to have matrices (projection and view) on the perScene, and (object and normal matrices) on the perObject.
In my shader I have them declared as
uniform perScene {
vec4 color1;
vec4 color2;
};
uniform perModel {
vec4 color3;
};
I have a working snippet here
class UniformBuffer {
constructor(gl, data, boundLocation = 0) {
this.boundLocation = boundLocation;
this.data = new Float32Array(data);
this.buffer = gl.createBuffer();
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferData(gl.UNIFORM_BUFFER, this.data, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
gl.bindBufferBase(gl.UNIFORM_BUFFER, this.boundLocation, this.buffer);
}
update(gl, data, offset = 0) {
this.data.set(data, offset);
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferSubData(gl.UNIFORM_BUFFER, 0, this.data, 0, null);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
gl.bindBufferBase(gl.UNIFORM_BUFFER, this.boundLocation, this.buffer);
}
};
const vertex = `#version 300 es
uniform perScene {
vec4 color1;
vec4 color2;
};
uniform perModel {
vec4 color3;
};
in vec3 a_position;
out vec3 v_color;
void main() {
gl_Position = vec4(a_position, 1.0);
v_color = color1.rgb + color2.rgb; // WORKS
// v_color = color1.rgb + color2.rgb + color3.rgb; // DOESNT WORK
}
`;
const fragment = `#version 300 es
precision highp float;
precision highp int;
in vec3 v_color;
out vec4 outColor;
void main() {
outColor = vec4(v_color, 1.0);
}
`;
const geometry = {
positions: [-0.5, -0.5, 0, -0.5, 0.5, 0, 0.5, -0.5, 0, 0.5, 0.5, 0],
indices: [0, 2, 1, 1, 2, 3],
};
const renderList = [];
// STEP 1 (create canvas)
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl2");
if (!gl) {
console.log('no webgl2 buddy');
}
// STEP 2 (create program)
const v = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(v, vertex);
gl.compileShader(v);
const f = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(f, fragment);
gl.compileShader(f);
const program = gl.createProgram();
gl.attachShader(program, v);
gl.attachShader(program, f);
gl.linkProgram(program);
// STEP 3 (create VAO)
const positionAttributeLocation = gl.getAttribLocation(program, 'a_position');
const colorUniformLocation = gl.getUniformLocation(program, 'color');
const positionsBuffer = gl.createBuffer();
const indicesBuffer = gl.createBuffer();
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
// position & indices
gl.enableVertexAttribArray(positionAttributeLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, positionsBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(geometry.positions), gl.STATIC_DRAW);
gl.vertexAttribPointer(positionAttributeLocation, 3, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indicesBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(geometry.indices), gl.STATIC_DRAW);
// STEP 4 (create UBO)
// bound to location 0
const perScene = new UniformBuffer(gl, [
...vec4.create(), // color 1
...vec4.create(), // color 2
], 0);
// bound to location 1 ?
const perModel = new UniformBuffer(gl, [
...vec4.create(), // color 3
], 3);
// STEP 5 (add instances)
for (let i = 0; i < 1; i++) {
renderList.push({
id: i,
vao: vao,
program: program,
color: [0, 1, 1],
});
}
// STEP 6 (draw)
gl.clearColor(0, 0, 0, 0);
gl.enable(gl.DEPTH_TEST);
gl.viewport(0, 0, canvas.width, canvas.height);
perScene.update(gl, [
...vec4.fromValues(1, 0, 0, 1),
...vec4.fromValues(0, 1, 0, 1),
]);
for (let i = 0; i < renderList.length; i++) {
const current = renderList[i];
gl.useProgram(current.program);
gl.bindVertexArray(current.vao);
// update perObject
perModel.update(gl, [
...vec4.fromValues(0, 0, 1, 1),
]);
gl.drawElements(gl.TRIANGLES, geometry.indices.length, gl.UNSIGNED_SHORT, 0);
// unbind
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null);
}
console.log('compiled!');
canvas {
background-color: black;
}
<canvas id="canvas"></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.3.2/gl-matrix-min.js"></script>
Shouldn't I be seeing a white square since all colours added up result in a vec4(1.0, 1.0, 1.0, 1.0)? (jsfiddle line 41)
What am I doing wrong?
Thanks
So, the first thing you're doing wrong is you're not calling gl.getUniformBlockIndex. Just like uniform you have to query the location or in this case the index of each block.
The second thing is block uniforms are indirected one level and you need to call gl.uniformBlockBinding(program, uniformBlockIndex, uniformBufferIndex);
uniformBlockIndex is the index you got from gl.getUniformBlockIndex. uniformBufferIndex similar to a texture unit. There are N uniform buffer indices. You can choose any buffer index from 0 to MAX_UNIFORM_BUFFER_BINDINGS - 1.
This indirection helps if you have one program that uses blocks A, B and another that uses A and C. In this case block A might have a different index in the 2 programs but you have it pull its values from the same uniformBufferIndex.
Note that this state is per program state so can probably set it at init time if you plan to always use the same uniform buffer index for the same uniform block.
To spell it out even more. You have a shader program. It has state
var someProgram = {
uniforms: {
projectionMatrix: [1, 0, 0, 0, 0, ... ], // etc
},
uniformBlockIndcies[ // one per uniform block
0,
0,
0,
],
...
}
Next you have uniform buffer indices which are global state
glState = {
textureUnits: [ ... ],
uniformBuffers: [ null, null, null ..., ],
};
You tell the program for each uniform buffer block, which uniform buffer index to use with gl.uniformBlockBinding. You then bind a buffer to that index with gl.bindBufferBase or gl.bindBufferRange.
It's very similar to telling a program which texture unit to use and then binding a texture to that unit. When you do this, at init time or render time is really up to you. In my mind it seems more likely I could decide at init time that my perScene stuff is always on buffer index 0 and perModel stuff at index 1 and therefore I could set them up the program parts (the calls to gl.uniformBlockBinding) at init time.
class UniformBuffer {
constructor(gl, data, boundLocation = 0) {
this.boundLocation = boundLocation;
this.data = new Float32Array(data);
this.buffer = gl.createBuffer();
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferData(gl.UNIFORM_BUFFER, this.data, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
gl.bindBufferBase(gl.UNIFORM_BUFFER, this.boundLocation, this.buffer);
}
update(gl, data, offset = 0) {
this.data.set(data, offset);
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferSubData(gl.UNIFORM_BUFFER, 0, this.data, 0, null);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
gl.bindBufferBase(gl.UNIFORM_BUFFER, this.boundLocation, this.buffer);
}
};
const vertex = `#version 300 es
uniform perScene {
vec4 color1;
vec4 color2;
};
uniform perModel {
vec4 color3;
};
in vec3 a_position;
out vec3 v_color;
void main() {
gl_Position = vec4(a_position, 1.0);
v_color = color1.rgb + color2.rgb + color3.rgb;
}
`;
const fragment = `#version 300 es
precision highp float;
precision highp int;
in vec3 v_color;
out vec4 outColor;
void main() {
outColor = vec4(v_color, 1.0);
}
`;
const geometry = {
positions: [-0.5, -0.5, 0, -0.5, 0.5, 0, 0.5, -0.5, 0, 0.5, 0.5, 0],
indices: [0, 2, 1, 1, 2, 3],
};
const renderList = [];
// STEP 1 (create canvas)
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl2");
if (!gl) {
console.log('no webgl2 buddy');
}
// STEP 2 (create program)
const v = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(v, vertex);
gl.compileShader(v);
const f = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(f, fragment);
gl.compileShader(f);
const program = gl.createProgram();
gl.attachShader(program, v);
gl.attachShader(program, f);
gl.linkProgram(program);
// STEP 3 (create VAO)
const positionAttributeLocation = gl.getAttribLocation(program, 'a_position');
const colorUniformLocation = gl.getUniformLocation(program, 'color');
const positionsBuffer = gl.createBuffer();
const indicesBuffer = gl.createBuffer();
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
// position & indices
gl.enableVertexAttribArray(positionAttributeLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, positionsBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(geometry.positions), gl.STATIC_DRAW);
gl.vertexAttribPointer(positionAttributeLocation, 3, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indicesBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(geometry.indices), gl.STATIC_DRAW);
// STEP 4 (create UBO)
// bound to location 0
const perScene = new UniformBuffer(gl, [
...vec4.create(), // color 1
...vec4.create(), // color 2
], 0);
// bound to location 1 ?
const perModel = new UniformBuffer(gl, [
...vec4.create(), // color 3
], 1);
gl.uniformBlockBinding(program, gl.getUniformBlockIndex(program, "perScene"), perScene.boundLocation);
gl.uniformBlockBinding(program, gl.getUniformBlockIndex(program, "perModel"), perModel.boundLocation);
// STEP 5 (add instances)
for (let i = 0; i < 1; i++) {
renderList.push({
id: i,
vao: vao,
program: program,
color: [0, 1, 1],
});
}
// STEP 6 (draw)
gl.clearColor(0, 0, 0, 0);
gl.enable(gl.DEPTH_TEST);
gl.viewport(0, 0, canvas.width, canvas.height);
perScene.update(gl, [
...vec4.fromValues(1, 0, 0, 1),
...vec4.fromValues(0, 1, 0, 1),
]);
for (let i = 0; i < renderList.length; i++) {
const current = renderList[i];
gl.useProgram(current.program);
gl.bindVertexArray(current.vao);
// update perObject
perModel.update(gl, [
...vec4.fromValues(0, 0, 1, 1),
]);
gl.drawElements(gl.TRIANGLES, geometry.indices.length, gl.UNSIGNED_SHORT, 0);
// unbind
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null);
}
console.log('compiled!');
canvas {
background-color: black;
}
<canvas id="canvas"></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/gl-matrix/2.3.2/gl-matrix-min.js"></script>
In this example there are 5 uniform blocks.
the shared matrices like projection and view and viewProjection
the per model matrices like world and worldInverseTransform
the per light info like lightPosition and lightColor.
There are 2 lights so the 4th block is similar to the 3rd
the material data like ambient color, specularity, etc..
I'm not saying that's the perfect setup. I really have no idea. But it's pretty common to make something called a "material" and share that material among more than one model so that's like a perMaterial block which is different from a perModel block. It's also common to share lighting info. I don't know what the ideal setup is, just pointing out that perScene and perModel might not be enough for fairly common situations.
One other thing, this line
// unbind
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null);
makes no sense. ELEMENT_ARRAY_BUFFER is part of the VAO state.
As gman said, get the index of the uniform block and then bind it with the gl.bindBufferBase
Your updated class should look something like:
class UniformBuffer {
constructor(gl, data, program, uniformName, targetIndex = 0) {
this.data = new Float32Array(data);
const boundLocation = gl.getUniformBlockIndex(program, uniformName);
this.buffer = gl.createBuffer();
gl.bindBufferBase(gl.UNIFORM_BUFFER, boundLocation, this.buffer);
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferData(gl.UNIFORM_BUFFER, this.data, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
}
update(gl, data, offset = 0) {
this.data.set(data, offset);
gl.bindBuffer(gl.UNIFORM_BUFFER, this.buffer);
gl.bufferSubData(gl.UNIFORM_BUFFER, 0, this.data, 0, null);
gl.bindBuffer(gl.UNIFORM_BUFFER, null);
}
};
I am attempting to write a WebGL image filter using the PixiJS library. My filter should take an array of arrays, where each inner array represents a possible pixel color. The function will then decide which element in the outer array to use.
I have managed to write a simple GLSL function which takes a single color array as a "uniform" argument, however I can't figure out how to pass the nested array. Could you point me to the proper type declaration to accept a nested array of floats from this snippet?
var fragmentSrc = [
"uniform vec4 colorList;", // WHAT TYPE DO I NEED HERE TO PASS THE ARRY IN THE COMMENT BELOW?
"void main() {",
" float GrayScale = (gl_FragCoord.r * 299.0 / 1000.0) + (gl_FragCoord.g * 587.0 / 1000.0) + (gl_FragCoord.b * 114.0 / 1000.0);",
" float sigmoidThreshold = 1.0 / (1.0 + pow(2.7182818284590452353602874713527, (-((GrayScale - 128.0) /32.0))));",
" gl_FragColor = colorList;",
"}",
];
var renderer = PIXI.autoDetectRenderer(750, 750);
document.body.appendChild(renderer.view);
var stage = new PIXI.Container();
function CustomFilter(fragmentSource) {
PIXI.Filter.call(this,
null,
fragmentSource
);
}
CustomFilter.prototype = Object.create(PIXI.Filter.prototype);
CustomFilter.prototype.constructor = CustomFilter;
var bg = new PIXI.Graphics();
bg.drawRect(0, 0, 375, 375);
bg.endFill();
stage.addChild(bg);
var filter = new CustomFilter(fragmentSrc.join('\r\n'));
filter.uniforms.colorList = [1.0, 1.0, 0.0, 1.0] // WANT TO PASS AN ARRAY OF ARRAYS LIKE:
// [[1.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]]
bg.filters = [filter];
renderer.render(stage);
<script src="https://cdnjs.cloudflare.com/ajax/libs/pixi.js/4.5.1/pixi.min.js"></script>
var fragmentSrc = [
"uniform vec4 colorList;", // WHAT TYPE DO I NEED HERE TO PASS THE ARRY IN THE COMMENT BELOW?
"void main() {",
" float GrayScale = (gl_FragCoord.r * 299.0 / 1000.0) + (gl_FragCoord.g * 587.0 / 1000.0) + (gl_FragCoord.b * 114.0 / 1000.0);",
" float sigmoidThreshold = 1.0 / (1.0 + pow(2.7182818284590452353602874713527, (-((GrayScale - 128.0) /32.0))));",
" gl_FragColor = colorList;",
"}",
];
I changed the code to this
var fragmentSrc = `
uniform vec4 colorList[10];
void main() {
float GrayScale = (gl_FragCoord.r * 299.0 / 1000.0) + (gl_FragCoord.g * 587.0 / 1000.0) + (gl_FragCoord.b * 114.0 / 1000.0);
float sigmoidThreshold = 1.0 / (1.0 + pow(2.7182818284590452353602874713527, (-((GrayScale - 128.0) /32.0))));
gl_FragColor = colorList[9];
}
`;
var filter = new CustomFilter(fragmentSrc);
console.log(filter.uniforms);
And it prints
so this works
filter.uniforms.colorList = [
1, 0, 0, 0, // 0
1, 1, 0, 0, // 1
0, 1, 0, 0, // 2
0, 1, 1, 0, // 3
0, 0, 1, 0, // 4
1, 0, 1, 0, // 5
.5, 0, 0, 0, // 6
0, .5, 0, 0, // 7
1, 1, 0, 1, // 8
.5, .5, .7, 1., // 9
];
and this
filter.uniforms.colorList = new Float32Array([
1, 0, 0, 0, // 0
1, 1, 0, 0, // 1
0, 1, 0, 0, // 2
0, 1, 1, 0, // 3
0, 0, 1, 0, // 4
1, 0, 1, 0, // 5
.5, 0, 0, 0, // 6
0, .5, 0, 0, // 7
1, 1, 0, 1, // 8
.5, .5, .7, 1., // 9
]);
etc...
If you really want an array of arrays in JavaScript you can do this to make ArrayBufferViews into the larger array
const colorValues = [];
for (let i = 0; i < filter.uniforms.colorList.length; i += 4) {
const buffer = filter.uniforms.colorList.buffer;
const byteOffset = i * Float32Array.BYTES_PER_ELEMENT;
const length = 4;
colorValues.push(new Float32Array(buffer, byteOffset, length));
}
now you can set an array element like this
colorValues[9].set([1, 1, 0, 1]);
var fragmentSrc = `
uniform vec4 colorList[10]; // WHAT TYPE DO I NEED HERE TO PASS THE ARRY IN THE COMMENT BELOW?
void main() {
float GrayScale = (gl_FragCoord.r * 299.0 / 1000.0) + (gl_FragCoord.g * 587.0 / 1000.0) + (gl_FragCoord.b * 114.0 / 1000.0);
float sigmoidThreshold = 1.0 / (1.0 + pow(2.7182818284590452353602874713527, (-((GrayScale - 128.0) /32.0))));
gl_FragColor = colorList[9];
}
`;
var renderer = PIXI.autoDetectRenderer(750, 750);
document.body.appendChild(renderer.view);
var stage = new PIXI.Container();
function CustomFilter(fragmentSource) {
PIXI.Filter.call(this,
null,
fragmentSource
);
}
CustomFilter.prototype = Object.create(PIXI.Filter.prototype);
CustomFilter.prototype.constructor = CustomFilter;
var bg = new PIXI.Graphics();
bg.drawRect(0, 0, 375, 375);
bg.endFill();
stage.addChild(bg);
var filter = new CustomFilter(fragmentSrc);
const colorValues = [];
for (let i = 0; i < filter.uniforms.colorList.length; i += 4) {
const buffer = filter.uniforms.colorList.buffer;
const byteOffset = i * Float32Array.BYTES_PER_ELEMENT;
const length = 4;
colorValues.push(new Float32Array(buffer, byteOffset, length));
}
colorValues[9].set([1, 1, 0, 1]);
bg.filters = [filter];
renderer.render(stage);
<script src="https://cdnjs.cloudflare.com/ajax/libs/pixi.js/4.5.1/pixi.min.js"></script>
I am trying to implement picking by an int id, but it looks like my shader doesn't write anything, althought I can read the clear color properly.
vs, I skipped completely any matrix for debugging:
#version 330
#include semantic.glsl
layout (location = POSITION) in vec3 position;
uniform Transform0
{
mat4 view;
mat4 proj;
mat4 viewProj;
};
uniform Transform1
{
mat4[TRANSFORM1_SIZE] models;
};
uniform Parameters
{
// x = mesh baseVertex
// y = selected
// z = active
// w = id
ivec4[INDICES_SIZE] params;
};
out Block
{
flat int id;
} outBlock;
int getIndex()
{
int iBegin = 0;
int iEnd = params.length() - 1;
int l = iBegin;
int r = iEnd;
int i = 0;
if(params.length > 1)
{
do
{
i = int(((l + r) / 2.0f));
if (l == (r - 1))
if (l == 0 && gl_VertexID <= params[l].x || gl_VertexID <= params[l].x && gl_VertexID > params[l - 1].x)
return l;
else if(gl_VertexID > params[l].x && gl_VertexID <= params[r].x)
return r;
else
return 0;
else if (gl_VertexID == params[i].x)
return i;
else if (gl_VertexID < params[i].x)
r = i;
else if (gl_VertexID > params[i].x)
l = i;
} while (l < r);
}
return 0;
}
void main()
{
int index = getIndex();
mat4 model = models[index];
//gl_Position = proj * (view * (model * vec4(position, 1)));
gl_Position = vec4(4.0 * float(gl_VertexID % 2) - 1.0, 4.0 * float(gl_VertexID / 2) - 1.0, 0.0, 1.0);
outBlock.id = params[index].w;
}
fs, hardcoded value for the moment, output is 0 (FRAG_COLOR), tried also 1 but nothing:
#version 330
#include semantic.glsl
// Outgoing final color.
layout (location = FRAG_COLOR) out int outputColor;
in Block
{
flat int id;
} inBlock;
void main()
{
//outputColor = inBlock.id;
outputColor = 9;
}
Init phase, I have one fbo, called RESOLVE, with 3 attachements, one depth, one float color on 0 and one integer for picking on 1. Fbo is complete:
gl3.glBindTexture(GL_TEXTURE_2D, textureName.get(Texture.RESOLVE_ID));
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
gl3.glTexImage2D(GL_TEXTURE_2D, 0, GL_R32I, EC.viewer.size.x, EC.viewer.size.y, 0, GL_RED_INTEGER, GL_INT, null);
gl3.glGenFramebuffers(Framebuffer.MAX - (samples == 1 ? 1 : 0), framebufferName);
gl3.glBindFramebuffer(GL_FRAMEBUFFER, framebufferName.get(Framebuffer.RESOLVE));
gl3.glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, textureName.get(Texture.RESOLVE_DEPTH), 0);
gl3.glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, textureName.get(Texture.RESOLVE_COLOR), 0);
gl3.glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1, textureName.get(Texture.RESOLVE_ID), 0);
if (gl3.glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
System.err.println("incomplete");
return false;
}
and render & reading, added also glFlush, glFinish and glPixelStorei but nothing:
gl3.glBindFramebuffer(GL_FRAMEBUFFER, framebufferName.get(Framebuffer.RESOLVE));
gl3.glDrawBuffer(GL_COLOR_ATTACHMENT1);
gl3.glClearBufferiv(GL_COLOR, 0, clearId.put(0, 0)); // we care clearing only red
gl3.glBindVertexArray(EC.meshManager.vertexArrayName.get(0));
gl3.glEnable(GL_DEPTH_TEST);
gl3.glUseProgram(program.name);
gl3.glDrawElements(GL_TRIANGLES, EC_Gl3MeshManager.ELEMENT_OPAQUE_COUNT, GL_UNSIGNED_INT, 0);
gl3.glDisable(GL_DEPTH_TEST);
gl3.glReadBuffer(GL_COLOR_ATTACHMENT1);
glm.vec._2.i.Vec2i window = new glm.vec._2.i.Vec2i(
EC.inputListener.mousePressed.getX(),
EC.viewer.size.y - EC.inputListener.mousePressed.getY() - 1);
System.out.println("window (" + window.x + ", " + window.y + ")");
gl3.glFlush();
gl3.glFinish();
gl3.glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Get the red coordinate reading the pixel click from the color 1 buffer, we can use the clearId buffer
gl3.glReadPixels(window.x, window.y, 1, 1, GL_RED_INTEGER, GL_INT, clearId);
gl3.glReadBuffer(GL_COLOR_ATTACHMENT0);
As I said, if I clear with 10, I read 10, so that is working, the only problematic thing is my shader..
Can you spot the error, guys?
Edit: trying to debug, I am checking the attachment type on color 1
gl3.glGetFramebufferAttachmentParameteriv(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1,
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE, clearId);
System.out.println("GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE: " + clearId.get(0));
gl3.glGetFramebufferAttachmentParameteriv(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1,
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME, clearId);
System.out.println("GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME: " + clearId.get(0)+", tex: "+textureName.get(Texture.RESOLVE_ID));
I get:
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE: 5890
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME: 14, tex: 14
5890 is GL_TEXTURE, name looks correct
tried to use glFramebufferTexture2D, but I still get GL_TEXTURE type instead GL_TEXTURE_2D
Edit 2: trying to read the depth component, is always 0
Shame on me, the problem was the depth test, since I forgot to clean the buffer it was never passing
I write a program to implement environment mapping using OpenGL and Cg shader language.But the result is not very right.When calculate the color of the model,we will blend the reflection with a decal texture.A uniform parameter called reflectivity allows the application to control how reflective the material is.
Firstly I list my fragment Cg code:
void main_f(float2 texCoord : TEXCOORD0,
float3 R : TEXCOORD1,
out float4 color : COLOR,
uniform float reflectivity,
uniform sampler2D decalMap,
uniform samplerCUBE environmentMap)
{
//fetch reflected environment color
float3 reflectedColor = texCUBE(environmentMap,R);
//fetch the decal base coloe
float3 decalColor = tex2D(decalMap,texCoord);
color.xyz = lerp(reflectedColor,decalColor,reflectivity);//change !!!!!!!!
color.w = 1;
}
I set the uniform parameter reflectivity as 0.6.And the result is :
As we can see,the color information from the decal texture is lost.There is only color information from environment cube texture.And if I set reflectivity as 0,the model will be dark.
But if I change the color.xyz in the fragment cg code as :
color.xyz = decalColor;
I can get the right result(only has color from decal texture) :
And if I change the color.xyz in the fragment cg code as :
color.xyz = reflectedColor;
I can get the right result(only has color from environment cube texture) ,too:
And my question is :
Why it does not work when I blend the color information from decal texture with the color information from environment cube texture using Cg function lerp?
at last I list my cg vertex shader and cpp file:
vertex.cg:
void main_v(float4 position : POSITION,
float2 texCoord : TEXCOORD0,//decal texture
float3 normal : NORMAL,
out float4 oPosition : POSITION,
out float2 oTexCoord : TEXCOORD0,//out decal texture
out float3 R : TEXCOORD1,//reflective vector
uniform float3 eyePositionW,//eye position in world space
uniform float4x4 modelViewProj,
uniform float4x4 modelToWorld
)
{
modelViewProj = glstate.matrix.mvp;
oPosition = mul(modelViewProj,position);
oTexCoord = texCoord;
float3 positionW = mul(modelToWorld,position).xyz;
float3 N = mul((float3x3)modelToWorld,normal);
N = normalize(N);
float3 I = positionW - eyePositionW;//incident vector
R = reflect(I,N);
}
main.cpp:
#pragma comment(lib,"glew32.lib")
#pragma comment(lib,"GLAUX.LIB")
#pragma comment(lib,"cg.lib")
#pragma comment(lib,"cgGL.lib")
#include <GL/glew.h>
#include <GL/glut.h>
#include <GL/glaux.h>
#include <CG/cg.h>
#include <CG/cgGL.h>
#include "MonkeyHead.h"
#include <iostream>
#include <cmath>
using namespace std;
int loop;
/* Use enum to assign unique symbolic OpenGL texture names. */
enum {
TO_BOGUS = 0,
TO_DECAL,
TO_ENVIRONMENT,
};
const double myPi = 3.14159;
//for Cg shader
static CGcontext myCgContext;
static CGprofile myCgVertexProfile,myCgFragmentProfile;
static CGprogram myCgVertexProgram,myCgFragmentProgram;
static const char *myProgramName = "CgTest18CubeMapReflective",
*myVertexProgramFileName = "vertex.cg",
*myVertexProgramName = "main_v",
*myFragmentProgramFileName = "fragment.cg",
*myFragmentProgramName = "main_f";
static CGparameter myCgVertexParam_modelToWorld;
//bmp files for cube map
const char *bmpFile[6] = {"Data/1.bmp","Data/2.bmp","Data/3.bmp",
"Data/4.bmp","Data/5.bmp","Data/6.bmp"};
const char *decalBmpFile = "Data/decal.bmp";
static float eyeAngle = 0.53;
static float eyeHeight = 0.0f;
static float headSpain = 0.0f;
static const GLfloat vertex[4*6][3] = {
/* Positive X face. */
{ 1, -1, -1 }, { 1, 1, -1 }, { 1, 1, 1 }, { 1, -1, 1 },
/* Negative X face. */
{ -1, -1, -1 }, { -1, 1, -1 }, { -1, 1, 1 }, { -1, -1, 1 },
/* Positive Y face. */
{ -1, 1, -1 }, { 1, 1, -1 }, { 1, 1, 1 }, { -1, 1, 1 },
/* Negative Y face. */
{ -1, -1, -1 }, { 1, -1, -1 }, { 1, -1, 1 }, { -1, -1, 1 },
/* Positive Z face. */
{ -1, -1, 1 }, { 1, -1, 1 }, { 1, 1, 1 }, { -1, 1, 1 },
/* Negative Z face. */
{ -1, -1, -1 }, { 1, -1, -1 }, { 1, 1, -1 }, { -1, 1, -1 },
};
static float reflectivity = 0.6;
GLuint decalTexture;
bool animating = false;//enable animating or not
static void drawMonkeyHead()
{
static GLfloat *texCoords = NULL;
const int numVertices = sizeof(MonkeyHead_vertices)
/ (3 * sizeof(MonkeyHead_vertices[0]));
const float scaleFactor = 1.5;
//generate texcoords
texCoords = (GLfloat*)malloc(2 * numVertices * sizeof(GLfloat));
if (!texCoords)
{
cerr << "ERROR : Monkey head texcoords memory malloc failed !" << endl;
exit(1);
}
for (loop = 0;loop < numVertices;++loop)
{
texCoords[loop * 2] = scaleFactor * MonkeyHead_vertices[3 * loop];
texCoords[loop * 2 + 1] = scaleFactor * MonkeyHead_vertices[3 * loop + 1];
}
//use vertex array
//enable array
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
//assign array data
glVertexPointer(3,GL_FLOAT,3 * sizeof(GLfloat),MonkeyHead_vertices);
glNormalPointer(GL_FLOAT,3 * sizeof(GLfloat),MonkeyHead_normals);
glTexCoordPointer(2,GL_FLOAT,2 * sizeof(GLfloat),texCoords);
glDrawElements(GL_TRIANGLES,3 * MonkeyHead_num_of_triangles,
GL_UNSIGNED_SHORT,MonkeyHead_triangles);
}
//read bmp image file
AUX_RGBImageRec *LoadBMP(const char *FileName)
{
FILE *File = NULL;
if(!FileName)
return NULL;
File = fopen(FileName,"r");
if (File)
{
fclose(File);
return auxDIBImageLoad(FileName);
}
return NULL;
}
//load decal texture from a bmp file
int loadDecalTexture()
{
int status = 1;
AUX_RGBImageRec *TextureImage = NULL;
if ((TextureImage = LoadBMP(decalBmpFile)))
{
glGenTextures(1,&decalTexture);
glBindTexture(GL_TEXTURE_2D,decalTexture);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,TextureImage->sizeX,
TextureImage->sizeY,0,GL_RGB,GL_UNSIGNED_BYTE,
TextureImage->data);//指定纹理
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);//指定过滤模式
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
}
else
status = 0;
if (TextureImage)
{
if (TextureImage->data)
free(TextureImage->data);
free(TextureImage);
}
return status;
}
//load cube map from 6 bmp files
int loadCubeMap()
{
int status = 1;
AUX_RGBImageRec *TextureImage[6] = {NULL,NULL,NULL,NULL,NULL,NULL};
for (loop = 0;loop < 6;++loop)
{
if (!(TextureImage[loop] = LoadBMP(bmpFile[loop])))
{
cout << "ERROR :load bmp file " << loop << " failed !" << endl;
status = 0;
}
}
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGB, TextureImage[0] ->sizeX, TextureImage[0] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[0] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGB, TextureImage[1] ->sizeX, TextureImage[1] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[1] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGB, TextureImage[2] ->sizeX, TextureImage[2] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[2] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGB, TextureImage[3] ->sizeX, TextureImage[3] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[3] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGB, TextureImage[4] ->sizeX, TextureImage[4] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[4] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGB, TextureImage[5] ->sizeX, TextureImage[5] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[5] ->data);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//free memory
for (loop = 0;loop < 6;++loop)
{
if (TextureImage[loop])
{
if (TextureImage[loop] ->data)
{
free(TextureImage[loop] ->data);
}
free(TextureImage[loop]);
}
}
return status;
}
//draw th surroundings as a cube with each face of
//the cube environment map applied.
void drawSurroundings(const GLfloat *eyePosition)
{
const float surroundingsDistance = 8;
glLoadIdentity();
gluLookAt(eyePosition[0],eyePosition[1],eyePosition[2],
0,0,0,0,1,0);
glScalef(surroundingsDistance,
surroundingsDistance,
surroundingsDistance);
glEnable(GL_TEXTURE_CUBE_MAP);
glBindTexture(GL_TEXTURE_CUBE_MAP,TO_ENVIRONMENT);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
glBegin(GL_QUADS);
for (loop = 0;loop < 4 * 6;++loop)
{
glTexCoord3fv(vertex[loop]);
glVertex3fv(vertex[loop]);
}
glEnd();
}
static void checkForCgError(const char *situation)
{
CGerror error;
const char *string = cgGetLastErrorString(&error);
if (error != CG_NO_ERROR) {
cout << "ERROR : " << myProgramName << situation << string << endl;
if (error == CG_COMPILER_ERROR) {
cout << cgGetLastListing(myCgContext) << endl;
}
exit(1);
}
}
//init Cg shaders
void initCg()
{
myCgContext = cgCreateContext();
myCgVertexProfile = cgGLGetLatestProfile(CG_GL_VERTEX);
cgGLSetOptimalOptions(myCgVertexProfile);
checkForCgError("selecting vertex profile");
myCgVertexProgram = cgCreateProgramFromFile(
myCgContext,
CG_SOURCE,
myVertexProgramFileName,
myCgVertexProfile,
myVertexProgramName,
NULL);
checkForCgError("Creating vertex Cg program from file");
cgGLLoadProgram(myCgVertexProgram);
checkForCgError("loading vertex program");
myCgFragmentProfile = cgGLGetLatestProfile(CG_GL_FRAGMENT);
cgGLSetOptimalOptions(myCgFragmentProfile);
checkForCgError("selecting fragment profile");
myCgFragmentProgram = cgCreateProgramFromFile(
myCgContext,
CG_SOURCE,
myFragmentProgramFileName,
myCgFragmentProfile,
myFragmentProgramName,
NULL);
checkForCgError("Creating fragment Cg program from file");
cgGLLoadProgram(myCgFragmentProgram);
checkForCgError("loading fragment program");
}
//compute rotate transformation matrix
void makeRotateMatrix(float angle,
float ax,float ay,float az,
float m[16])
{
float radians, sine, cosine, ab, bc, ca, tx, ty, tz;
float axis[3];
float mag;
axis[0] = ax;
axis[1] = ay;
axis[2] = az;
mag = sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]);
if (mag) {
axis[0] /= mag;
axis[1] /= mag;
axis[2] /= mag;
}
radians = angle * myPi / 180.0;
sine = sin(radians);
cosine = cos(radians);
ab = axis[0] * axis[1] * (1 - cosine);
bc = axis[1] * axis[2] * (1 - cosine);
ca = axis[2] * axis[0] * (1 - cosine);
tx = axis[0] * axis[0];
ty = axis[1] * axis[1];
tz = axis[2] * axis[2];
m[0] = tx + cosine * (1 - tx);
m[1] = ab + axis[2] * sine;
m[2] = ca - axis[1] * sine;
m[3] = 0.0f;
m[4] = ab - axis[2] * sine;
m[5] = ty + cosine * (1 - ty);
m[6] = bc + axis[0] * sine;
m[7] = 0.0f;
m[8] = ca + axis[1] * sine;
m[9] = bc - axis[0] * sine;
m[10] = tz + cosine * (1 - tz);
m[11] = 0;
m[12] = 0;
m[13] = 0;
m[14] = 0;
m[15] = 1;
}
//compute translation transformation matrix
static void makeTranslateMatrix(float x, float y, float z, float m[16])
{
m[0] = 1; m[1] = 0; m[2] = 0; m[3] = x;
m[4] = 0; m[5] = 1; m[6] = 0; m[7] = y;
m[8] = 0; m[9] = 0; m[10] = 1; m[11] = z;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
}
//multiply a floar4x4 matrix by another float4x4 matrix
static void multMatrix(float dst[16],const float src1[16],const float src2[16])
{
for (int i = 0;i < 4;++i)
{
for (int j = 0;j < 4;++j)
{
dst[i * 4 + j] = src1[i * 4 + 0] * src2[0 * 4 + j] +
src1[i * 4 + 1] * src2[1 * 4 + j] +
src1[i * 4 + 2] * src2[2 * 4 + j] +
src1[i * 4 + 3] * src2[3 * 4 + j];
}
}
}
void init()
{
glewInit();
glClearColor(0.0,0.0,0.0,1.0);
glShadeModel(GL_SMOOTH);
glEnable(GL_DEPTH_TEST);
if (!loadDecalTexture())
{
cout << "ERROR : load decal texture from bmp file failed !" << endl;
exit(1);
}
glBindTexture(GL_TEXTURE_CUBE_MAP,TO_ENVIRONMENT);
if (!loadCubeMap())
{
cout << "ERROR : load cube map from bmp file failed !" << endl;
exit(1);
}
initCg();
}
void display()
{
const GLfloat eyePosition[4] = {6 * sin(eyeAngle),
eyeHeight,
6 * cos(eyeAngle),
1};
float tranlateMatrix[16],rotateMatrix[16],modelMatrix[16];
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
cgGLEnableProfile(myCgVertexProfile);
checkForCgError("enabling vertex profile");
cgGLEnableProfile(myCgFragmentProfile);
checkForCgError("enabling fragment profile");
cgGLBindProgram(myCgVertexProgram);
checkForCgError("binding vertex program");
cgGLBindProgram(myCgFragmentProgram);
checkForCgError("binding fragment program");
glLoadIdentity();
glTranslatef(0.0,0.0,-5.0);
glRotatef(headSpain,0,1,0);
//set some uniform parameters in Cg shader
cgGLSetParameter3fv(
cgGetNamedParameter(myCgVertexProgram,"eyePositionW"),
eyePosition);
checkForCgError("setting eyePositionW parameter");
makeRotateMatrix(headSpain,0,1,0,rotateMatrix);
makeTranslateMatrix(0.0,0.0,-5.0,tranlateMatrix);
multMatrix(modelMatrix,tranlateMatrix,rotateMatrix);
//set the Cg matrix parameter : modelToWorld
cgSetMatrixParameterfr(
cgGetNamedParameter(myCgVertexProgram,"modelToWorld"),
modelMatrix);
checkForCgError("setting modelToWorld parameter");
cgGLSetParameter1f(
cgGetNamedParameter(myCgFragmentProgram,"reflectivity"),
reflectivity);
checkForCgError("setting reflectivity parameter");
cgGLSetTextureParameter(
cgGetNamedParameter(myCgFragmentProgram,"decalMap"),
decalTexture);
checkForCgError("setting decalTexture parameter");
cgGLSetTextureParameter(
cgGetNamedParameter(myCgFragmentProgram,"environmentMap"),
TO_ENVIRONMENT);
checkForCgError("setting environmentMap parameter");
drawMonkeyHead();
cgGLDisableProfile(myCgVertexProfile);
checkForCgError("disabling vertex profile");
cgGLDisableProfile(myCgFragmentProfile);
checkForCgError("disabling fragment profile");
drawSurroundings(eyePosition);
glutSwapBuffers();
}
static void idle()
{
headSpain += 0.5;
if (headSpain > 360)
{
headSpain -= 360;
}
glutPostRedisplay();
}
static void keyboard(unsigned char key,int x,int y)
{
switch(key)
{
case ' ':
animating = !animating;
if (animating)
{
glutIdleFunc(idle);
}
else
glutIdleFunc(NULL);
break;
case 'r':
reflectivity += 0.1;
if (reflectivity > 1.0)
{
reflectivity = 1.0;
}
cout << "reflectivity : " << reflectivity << endl;
glutPostRedisplay();
break;
case 'R':
reflectivity -= 0.1;
if (reflectivity < 0.0)
{
reflectivity = 0.0;
}
cout << "reflectivity : " << reflectivity << endl;
glutPostRedisplay();
break;
case 27:
cgDestroyProgram(myCgVertexProgram);
cgDestroyContext(myCgContext);
exit(0);
break;
}
}
void reshape(int w,int h)
{
glViewport(0,0,(GLsizei)w,(GLsizei)h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0,1,1.0,20.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
int main(int argc,char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowPosition(0,0);
glutInitWindowSize(600,600);
glutCreateWindow("CubeMapReflection");
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutMainLoop();
return 0;
}
The first thing I see is that the lerp statement needs to have it's values reversed.
color.xyz = lerp(reflectedColor,decalColor,reflectivity);//change !!!!!!!!
should be
color.xyz = lerp(decalColor, reflectedColor, reflectivity);
because the lerp documentation says:
lerp(a, b, w) returns a when w = 0 and b when w = 1 and you want full decal when reflectivity = 0 and full reflected when reflectivity = 1.
I see that the effect you're trying to achieve is akin to GL_MODULATE. You will need to multiple the values together, not lerp between them. Try this, it should work and give you the effect you want.
color.xyz = (reflectedColor.xyz * reflectivity) * decalColor;