Rendering compute-shader output to screen - opengl

I'm trying to set up a pipeline to do some raymarching-rendering.
At first I set up a vertex and a geometry shader to take just 1 arbitrary float, and make a quad so I can use just the fragment shader, and input data via passing it through all shaders or uniforms.
But then I came across the compute shader, and found some tutorials but all just did nearly the same, making a quad and rendering compute-shaders output to it, I think that's pretty stupid, if you have the possibility to render how you want with the compute shader and still do tricks to get your result.
After some further research i found the function 'glFramebufferImage2D' and as far as I understood it, it attaches an ImageTexture (the one I wrote to with my compute shader in my case) to the Framebuffer (the buffer thats displayed as far as i understood it). So i dont have to do the quad generating trick.
But the code I wrote just shows a black screen.
Did i get something wrong? or did i missed something in my code?
This is my code:
(I'm not worrying about warnings and detaching shader-programs yet. I just wanted to test the concept for now.)
main.rs
extern crate sdl2;
extern crate gl;
use sdl2::pixels::Color;
use std::ffi::{CStr, CString};
fn main() {
let width = 512;
let height = 512;
let sdl = sdl2::init().unwrap();
let video_sys = sdl.video().unwrap();
let gl_attr = video_sys.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(4, 1);
let window = video_sys.window("test", width, height).opengl().build().unwrap();
let gl_ctx = window.gl_create_context().unwrap();
let gl = gl::load_with(|s| video_sys.gl_get_proc_address(s) as *const std::os::raw::c_void);
unsafe {
gl::Viewport(0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei);
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
}
let shader = unsafe { gl::CreateShader(gl::COMPUTE_SHADER) };
unsafe {
gl::ShaderSource(shader, 1, &CString::new(include_str!("screen.comp")).unwrap().as_ptr(), std::ptr::null());
gl::CompileShader(shader);
}
let program = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program, shader);
gl::LinkProgram(program);
gl::UseProgram(program);
}
let mut tex_out : gl::types::GLuint = 0;
unsafe {
gl::GenTextures(1, &mut tex_out);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA32F as gl::types::GLint,
width as gl::types::GLsizei,
height as gl::types::GLsizei,
0,
gl::RGBA,
gl::FLOAT,
std::ptr::null()
);
gl::BindImageTexture(0, tex_out, 0, gl::FALSE, 0, gl::WRITE_ONLY, gl::RGBA32F);
}
let mut event_pump = sdl.event_pump().unwrap();
'main: loop {
for event in event_pump.poll_iter() {
match event {
sdl2::event::Event::Quit {..} => break 'main,
_ => {},
}
}
unsafe {
gl::DispatchCompute(width as gl::types::GLuint, height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0);
}
window.gl_swap_window();
}
}
screen.comp
#version 430
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img_output;
void main() {
vec4 pixel = vec4(1.0, 1.0, 1.0, 1.0);
ivec2 pixel_coords = ivec2(gl_GlobalInvocationID.xy);
imageStore(img_output, pixel_coords, pixel);
}
EDIT:
the working code:
(with some color test)
main.rs:
extern crate sdl2;
extern crate gl;
use std::ffi::{CStr, CString};
struct Renderer {
width : u32 ,
height : u32 ,
shader : gl::types::GLuint ,
program : gl::types::GLuint ,
}
impl Renderer {
fn new(width : u32, height : u32, shader_name : &CStr) -> Self {
unsafe {
gl::Viewport(0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei);
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
}
let shader = unsafe { gl::CreateShader(gl::COMPUTE_SHADER) };
unsafe {
gl::ShaderSource(shader, 1, &shader_name.as_ptr(), std::ptr::null());
gl::CompileShader(shader);
}
let program = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program, shader);
gl::LinkProgram(program);
gl::UseProgram(program);
}
let mut tex_out : gl::types::GLuint = 0;
unsafe {
gl::GenTextures(1, &mut tex_out);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as gl::types::GLint);
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA32F as gl::types::GLint,
width as gl::types::GLsizei,
height as gl::types::GLsizei,
0,
gl::RGBA,
gl::FLOAT,
std::ptr::null()
);
gl::BindImageTexture(0, tex_out, 0, gl::FALSE, 0, gl::WRITE_ONLY, gl::RGBA32F);
}
let mut fbo : gl::types::GLuint = 0;
unsafe {
gl::GenFramebuffers(1, &mut fbo );
gl::BindFramebuffer(gl::FRAMEBUFFER, fbo );
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0);
gl::BindFramebuffer(gl::READ_FRAMEBUFFER, fbo);
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, 0);
}
let resolution = unsafe { gl::GetUniformLocation(program, CString::new("iResolution").unwrap().as_ptr()) };
unsafe { gl::Uniform2i(resolution, width as gl::types::GLint, height as gl::types::GLint); }
Self {
width : width ,
height : height ,
shader : shader ,
program : program ,
}
}
fn get_input(&self, name : &str) -> gl::types::GLint {
unsafe { gl::GetUniformLocation(self.program, CString::new(name).unwrap().as_ptr()) }
}
fn input1f(&self, ptr : gl::types::GLint, f : gl::types::GLfloat) {
unsafe { gl::Uniform1f(ptr, f) };
}
fn draw(&self) {
unsafe {
gl::DispatchCompute(self.width as gl::types::GLuint, self.height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::BlitFramebuffer(0, 0, self.width as gl::types::GLint, self.height as gl::types::GLint, 0, 0, self.width as gl::types::GLint, self.height as gl::types::GLint, gl::COLOR_BUFFER_BIT, gl::LINEAR);
}
}
}
impl Drop for Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteShader(self.shader);
gl::DeleteProgram(self.program);
}
}
}
fn main() {
let width = 512;
let height = 512;
let sdl = sdl2::init().unwrap();
let video_sys = sdl.video().unwrap();
let gl_attr = video_sys.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(4, 1);
let window = video_sys.window("test", width, height).opengl().build().unwrap();
let _gl_ctx = window.gl_create_context().unwrap();
let _gl = gl::load_with(|s| video_sys.gl_get_proc_address(s) as *const std::os::raw::c_void);
let render = Renderer::new(width, height, &CString::new(include_str!("screen.comp")).unwrap());
let time_attr = render.get_input("time");
let mut event_pump = sdl.event_pump().unwrap();
let mut time = 0.0;
'main: loop {
for event in event_pump.poll_iter() {
match event {
sdl2::event::Event::Quit {..} => break 'main,
_ => {},
}
}
time += 0.01;
if time > 1.0 {
time = 0.0;
}
render.input1f(time_attr, time);
render.draw();
window.gl_swap_window();
}
}
screen.comp:
#version 430
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img;
uniform ivec2 iResolution;
uniform float time;
void main() {
ivec2 iCoords = ivec2(gl_GlobalInvocationID.xy);
vec2 uv = vec2(iCoords) / vec2(iResolution);
vec4 pixel = vec4(uv, time, 1.0);
imageStore(img, iCoords, pixel);
}

There is an issue when you generate the texture. The initial value of TEXTURE_MIN_FILTER is NEAREST_MIPMAP_LINEAR. If you don't change it and you don't create mipmaps, then the texture is not "complete".
Set the TEXTURE_MIN_FILTER to either NEAREST or LINEAR to solve the issue:
gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR)
You've rendered an image from a compute shader to a texture object, but in order to blit that texture to the screen, you must copy that texture image to the color plane of the default framebuffer. It is not possible to change the default framebuffer's color plane simply by attaching a texture.
Instead, you need to create a named Framebuffer Object (glGenFramebuffers, glBindFramebuffer) and attach the texture object to the color plane of the framebuffer (glFramebufferTexture2D).
let mut fbo : gl::types::GLuint = 0;
gl::GenFramebuffers(1, &mut fbo);
gl::BindFramebuffer(gl::FRAMEBUFFER, fbo);
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0)
Bind this framebuffer to the target GL_READ_FRAMEBUFFER (glBindFramebuffer) and bind the default frame buffer (zero) to the target GL_DRAW_FRAMEBUFFER.
gl::BindFramebuffer(gl::READ_FRAMEBUFFER, fbo);
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, 0);
Finally, in the render loop, use glBlitFramebuffer to copy the content of the named frambuffer object (the texture) to the color plane of the default framebuffer.
gl::DispatchCompute(width as gl::types::GLuint, height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::BlitFramebuffer(
0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei,
0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei,
gl::COLOR_BUFFER_BIT, gl::LINEAR);
Note, by the use of glBlitFramebuffer, the size of the viewport can be different to the size of the texture image.
This approach won't work if the format of the texture would be integral. It works, because the format of the texture image is floating point and the format of the default framebuffer is fixed-point.

Here's a revision with the suggested fixes and a few polishes:
extern crate sdl2;
extern crate gl;
//use sdl2::pixels::Color;
//use std::ffi::{CStr, CString};
use std::ffi::{CString};
fn main() {
let width = 512;
let height = 512;
let sdl = sdl2::init().unwrap();
let video_sys = sdl.video().unwrap();
let gl_attr = video_sys.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(4, 1);
let window = video_sys.window("test", width, height).opengl().build().unwrap();
let _gl_ctx = window.gl_create_context().unwrap();
let _gl = gl::load_with(|s| video_sys.gl_get_proc_address(s) as *const std::os::raw::c_void);
unsafe {
gl::Viewport(0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei);
gl::ClearColor(0.1, 0.30, 0.50, 1.0);
let shader = gl::CreateShader(gl::COMPUTE_SHADER);
gl::ShaderSource(shader, 1, &CString::new(include_str!("screen.comp")).unwrap().as_ptr(), std::ptr::null());
gl::CompileShader(shader);
let program = gl::CreateProgram();
gl::AttachShader(program, shader);
gl::LinkProgram(program);
gl::UseProgram(program);
let mut tex_out : gl::types::GLuint = 0;
gl::GenTextures(1, &mut tex_out);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, tex_out);
gl::TexImage2D( gl::TEXTURE_2D, 0, gl::RGBA32F as gl::types::GLint, width as gl::types::GLsizei, height as gl::types::GLsizei, 0, gl::RGBA, gl::FLOAT, std::ptr::null() );
let mut event_pump = sdl.event_pump().unwrap();
let mut fbo: gl::types::GLuint = 0;
gl::BindImageTexture(0, tex_out, 0, gl::FALSE, 0, gl::WRITE_ONLY, gl::RGBA32F);
gl::GenFramebuffers(1, &mut fbo);
gl::BindFramebuffer(gl::FRAMEBUFFER, fbo);
gl::FramebufferTexture2D(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, tex_out, 0);
gl::BindFramebuffer(gl::READ_FRAMEBUFFER, fbo);
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, 0);
'main: loop {
for event in event_pump.poll_iter() {
match event {
sdl2::event::Event::Quit {..} => break 'main,
_ => {},
}
}
gl::Clear(gl::COLOR_BUFFER_BIT);
gl::DispatchCompute(width as gl::types::GLuint, height as gl::types::GLuint, 1);
gl::MemoryBarrier(gl::SHADER_IMAGE_ACCESS_BARRIER_BIT);
gl::BlitFramebuffer(
0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei,
0, 0, width as gl::types::GLsizei, height as gl::types::GLsizei,
gl::COLOR_BUFFER_BIT, gl::LINEAR);
window.gl_swap_window();
}
}
}

Related

Shaders: How to draw 3D point verts without generating geometry?

I have a 3D Webgl scene. I am using Regl http://regl.party/ . Which is WebGL. So I am essentially writing straight GLSL.
This is a game project. I have an array of 3D positions [[x,y,z] ...] which are bullets, or projectiles. I want to draw these bullets as a simple cube, sphere, or particle. No requirement on the appearance.
How can I make shaders and a draw call for this without having to create a repeated duplicate set of geometry for the bullets?
Preferring an answer with a vert and frag shader example that demonstrates the expected data input and can be reverse engineered to handle the CPU binding layer
You create an regl command which encapsulates a bunch of data. You can then call it with an object.
Each uniform can take an optional function to supply its value. That function is passed a regl context as the first argument and then the object you passed as the second argument so you can call it multiple times with a different object to draw the same thing (same vertices, same shader) somewhere else.
var regl = createREGL()
const objects = [];
const numObjects = 100;
for (let i = 0; i < numObjects; ++i) {
objects.push({
x: rand(-1, 1),
y: rand(-1, 1),
speed: rand(.5, 1.5),
direction: rand(0, Math.PI * 2),
color: [rand(0, 1), rand(0, 1), rand(0, 1), 1],
});
}
function rand(min, max) {
return Math.random() * (max - min) + min;
}
const starPositions = [[0, 0, 0]];
const starElements = [];
const numPoints = 5;
for (let i = 0; i < numPoints; ++i) {
for (let j = 0; j < 2; ++j) {
const a = (i * 2 + j) / (numPoints * 2) * Math.PI * 2;
const r = 0.5 + j * 0.5;
starPositions.push([
Math.sin(a) * r,
Math.cos(a) * r,
0,
]);
}
starElements.push([
0, 1 + i * 2, 1 + i * 2 + 1,
]);
}
const drawStar = regl({
frag: `
precision mediump float;
uniform vec4 color;
void main () {
gl_FragColor = color;
}`,
vert: `
precision mediump float;
attribute vec3 position;
uniform mat4 mat;
void main() {
gl_Position = mat * vec4(position, 1);
}`,
attributes: {
position: starPositions,
},
elements: starElements,
uniforms: {
mat: (ctx, props) => {
const {viewportWidth, viewportHeight} = ctx;
const {x, y} = props;
const aspect = viewportWidth / viewportHeight;
return [.1 / aspect, 0, 0, 0,
0, .1, 0, 0,
0, 0, 0, 0,
x, y, 0, 1];
},
color: (ctx, props) => props.color,
}
})
regl.frame(function () {
regl.clear({
color: [0, 0, 0, 1]
});
objects.forEach((o) => {
o.direction += rand(-0.1, 0.1);
o.x += Math.cos(o.direction) * o.speed * 0.01;
o.y += Math.sin(o.direction) * o.speed * 0.01;
o.x = (o.x + 3) % 2 - 1;
o.y = (o.y + 3) % 2 - 1;
drawStar(o);
});
})
<script src="https://cdnjs.cloudflare.com/ajax/libs/regl/1.3.11/regl.min.js"></script>
You can draw all of the bullets as point sprites, in which case you just need to provide the position and size of each bullet and draw them as GL_POINTS. Each “point” is rasterized to a square based on the output of your vertex shader (which runs once per point). Your fragment shader is called for each fragment in that square, and can color the fragment however it wants—with a flat color, by sampling a texture, or however else you want.
Or you can provide a single model for all bullets, a separate transform for each bullet, and draw them as instanced GL_TRIANGLES or GL_TRIANGLE_STRIP or whatever. Read about instancing on the OpenGL wiki.
Not a WebGL coder so read with prejudice...
Encode the vertexes in a texture
beware of clamping use texture format that does not clamp to <0.0,+1.0> like GL_LUMINANCE32F_ARB or use vertexes in that range only. To check for clamping use:
GLSL debug prints
Render single rectangle covering whole screen
and use the texture from #1 as input. This will ensure that a fragment shader is called for each pixel of the screen/view exactly once.
Inside fragment shader read the texture and check the distance of a fragment to your vertexes
based on it render your stuff or dicard() fragment... spheres are easy, but boxes and other shapes might be complicated to render based on the distance of vertex especially if they can be arbitrary oriented (which need additional info in the input texture).
To ease up this you can prerender them into some texture and use the distance as texture coordinates ...
This answer of mine is using this technique:
raytrace through 3D mesh
You can sometimes get away with using GL_POINTS with a large gl_PointSize and a customized fragment shader.
An example shown here using distance to point center for fragment alpha. (You could also just as well sample a texture)
The support for large point sizes might be limited though, so check that before deciding on this route.
var canvas = document.getElementById('cvs');
gl = canvas.getContext('webgl');
var vertices = [
-0.5, 0.75,0.0,
0.0, 0.5, 0.0,
-0.75,0.25,0.0,
];
var vertex_buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
var vertCode =
`attribute vec3 coord;
void main(void) {
gl_Position = vec4(coord, 1.0);
gl_PointSize = 50.0;
}`;
var vertShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertShader, vertCode);
gl.compileShader(vertShader);
var fragCode =
`void main(void) {
mediump float ds = distance(gl_PointCoord.xy, vec2(0.5,0.5))*2.0;
mediump vec4 fg_color=vec4(0.0, 0.0, 0.0,1.0- ds);
gl_FragColor = fg_color;
}`;
var fragShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragShader, fragCode);
gl.compileShader(fragShader);
var shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertShader);
gl.attachShader(shaderProgram, fragShader);
gl.linkProgram(shaderProgram);
gl.useProgram(shaderProgram);
gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
var coord = gl.getAttribLocation(shaderProgram, "coord");
gl.vertexAttribPointer(coord, 3, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(coord);
gl.viewport(0,0,canvas.width,canvas.height);
gl.drawArrays(gl.POINTS, 0, 3);
<!doctype html>
<html>
<body>
<canvas width = "400" height = "400" id = "cvs"></canvas>
</body>
</html>

Modify a IDirect3DSurface9* with shader

I got a IDirect3DSurface9 * from a DXVA2 video decoder. I'd like to modify that surface with a shader. I' m able to render the video frames without shader by "drawing" the surface in the back buffer.
I use the following code to render the frames without shader:
void Draw(Example* exps){
IDirect3DSurface9* backbuffer;
hwctx->d3d9device->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
hwctx->d3d9device->BeginScene();
hwctx->swap_chain->GetBackBuffer(0, D3DBACKBUFFER_TYPE_MONO, &backbuffer);
hwctx->d3d9device->StretchRect(videoSurface, NULL, backbuffer, NULL, D3DTEXF_LINEAR);
hwctx->d3d9device->EndScene();
hwctx->swap_chain->Present(0, 0, 0, 0, 0);
backbuffer->Release();
}
Up until here, everithing works.
I would modify the Draw function to render video frames with the following shader:
uniform extern float4x4 gWVP;
uniform extern texture gTexRGB;
uniform extern texture gTexAlpha;
sampler TexRGB = sampler_state
{
Texture = <gTexRGB>;
AddressU = WRAP;
AddressV = WRAP;
};
sampler TexAlpha = sampler_state
{
Texture = <gTexAlpha>;
AddressU = WRAP;
AddressV = WRAP;
};
struct OutputVS
{
float4 posH : POSITION0;
float2 tex0 : TEXCOORD0;
};
OutputVS DirLightTexVS(float3 posL : POSITION0, float3 normalL : NORMAL0, float2 tex0: TEXCOORD0)
{
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Transform to homogeneous clip space.
outVS.posH = mul(float4(posL, 1.0f), gWVP);
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 DirLightTexPS(float4 c : COLOR0, float4 spec : COLOR1, float2 tex0 : TEXCOORD0) : COLOR
{
float3 rgb = tex2D(TexRGB, tex0).rgb;
float alpha = tex2D(TexAlpha, tex0).g;
return float4(rgb, alpha);
}
technique DirLightTexTech
{
pass P0
{
// Specify the vertex and pixel shader associated with this pass.
vertexShader = compile vs_2_0 DirLightTexVS();
pixelShader = compile ps_2_0 DirLightTexPS();
}
}
where TexRGB is the texture associated to the video frame, while TexAlpha is another texture that contains alpha values.
How can I pass the decoded surface to the shader? I never used Directx9 so an example is appreciated and could help me to solve the problem.
UPDATE 1:
I created the InitEffect function to load the effect from file
void InitEffect(Example* ctx) {
auto hwctx = ctx->decoder->stream->HWAccelCtx;
// Create the FX from a .fx file.
ID3DXBuffer* errors = 0;
D3DXCreateEffectFromFile(hwctx->d3d9device, "basicEffect.fx", 0, 0, D3DXSHADER_DEBUG, 0, &ctx->mFX, &errors);
if (errors)
MessageBox(0, (LPCSTR)errors->GetBufferPointer(), 0, 0);
// Obtain handles.
ctx->mhTech = ctx->mFX->GetTechniqueByName("DirLightTexTech");
ctx->mhWVP = ctx->mFX->GetParameterByName(0, "gWVP");
ctx->mhTexAlpha = ctx->mFX->GetParameterByName(0, "gTexAlpha");
ctx->mhTexRGB = ctx->mFX->GetParameterByName(0, "gTexRGB");
}
and changed the rendering fuction to:
void Draw(Example* ctx) {
InitMatrices(ctx);
IDirect3DSurface9* backbuffer;
hwctx->d3d9device->Clear(0, 0, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0xffeeeeee, 1.0f, 0);
hwctx->d3d9device->BeginScene();
ctx->mFX->SetTechnique(ctx->mhTech);
ctx->mFX->SetMatrix(ctx->mhWVP, &(ctx->mCrateWorld*ctx->mView*ctx->mProj));
ctx->texRGB->GetSurfaceLevel(0, &ctx->surfRGB);
hwctx->d3d9device->SetRenderTarget(0, ctx->surfRGB);
hwctx->d3d9device->StretchRect((IDirect3DSurface9*)s->frame->data[3], NULL, ctx->surfRGB, NULL, D3DTEXF_LINEAR);
ctx->mFX->SetTexture(ctx->mhTexAlpha, ctx->texAlpha);
ctx->mFX->SetTexture(ctx->mhTexRGB, ctx->texRGB);
// Enable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, true);
hwctx->d3d9device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
hwctx->d3d9device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
hwctx->d3d9device->SetVertexDeclaration(VertexPNT::Decl);
hwctx->d3d9device->SetStreamSource(0, ctx->mBoxVB, 0, sizeof(VertexPNT));
hwctx->d3d9device->SetIndices(ctx->mBoxIB);
UINT numPasses = 0;
ctx->mFX->Begin(&numPasses, 0);
for (UINT i = 0; i < numPasses; ++i){
ctx->mFX->BeginPass(i);
hwctx->d3d9device->DrawIndexedPrimitive(D3DPT_TRIANGLELIST, 0, 0, 24, 0, 12);
ctx->mFX->EndPass();
}
ctx->mFX->End();
hwctx->d3d9device->EndScene();
hwctx->swap_chain->Present(0, 0, 0, 0, 0);
backbuffer->Release();
// Disable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
}
but it still doesn't work.
UPDATE 2
I modified the code by following the code Asesh shared. In the InitEffect function I added the following lines to create a render target surface:
ctx->texRGB->GetSurfaceLevel(0, &ctx->surfRGB);
// store orginal rendertarget
hwctx->d3d9device->GetRenderTarget(0, &ctx->origTarget_);
D3DSURFACE_DESC desc;
ctx->origTarget_->GetDesc(&desc);
// create our surface as render target
hwctx->d3d9device->CreateRenderTarget(1920, 1080, D3DFMT_X8R8G8B8,
desc.MultiSampleType, desc.MultiSampleQuality,
false, &ctx->surfRGB, NULL);
the draw function is:
void drawScene(Example* ctx) {
InitMatrices(ctx);
auto hwctx = ctx->decoder->stream->HWAccelCtx;
auto s = (VdrStreamContext*)ctx->decoder->stream->vdrCodecCtx->opaque;
IDirect3DSurface9* backbuffer;
hwctx->d3d9device->SetRenderTarget(0, ctx->surfRGB);
hwctx->d3d9device->Clear(0, 0, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0xffeeeeee, 1.0f, 0);
hwctx->d3d9device->BeginScene();
hwctx->d3d9device->StretchRect((IDirect3DSurface9*)s->vdrFrame->data[3], NULL, ctx->surfRGB, NULL, D3DTEXF_NONE);
hwctx->d3d9device->SetRenderTarget(0, ctx->origTarget_);
if (!hwctx->d3d9device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &backbuffer)) {
hwctx->d3d9device->StretchRect(ctx->surfRGB, NULL, backbuffer, NULL, D3DTEXF_NONE);
}
ctx->mFX->SetTechnique(ctx->mhTech);
ctx->mFX->SetMatrix(ctx->mhWVP, &(ctx->mCrateWorld*ctx->mView*ctx->mProj));
ctx->mFX->SetTexture(ctx->mhTexAlpha, ctx->texAlpha);
ctx->mFX->SetTexture(ctx->mhTexRGB, ctx->texRGB);
// Enable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, true);
hwctx->d3d9device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
hwctx->d3d9device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
hwctx->d3d9device->SetVertexDeclaration(VertexPNT::Decl);
hwctx->d3d9device->SetStreamSource(0, ctx->mBoxVB, 0, sizeof(VertexPNT));
hwctx->d3d9device->SetIndices(ctx->mBoxIB);
UINT numPasses = 0;
ctx->mFX->Begin(&numPasses, 0);
for (UINT i = 0; i < numPasses; ++i){
ctx->mFX->BeginPass(i);
hwctx->d3d9device->DrawIndexedPrimitive(D3DPT_TRIANGLELIST, 0, 0, 24, 0, 12);
ctx->mFX->EndPass();
}
ctx->mFX->End();
hwctx->d3d9device->EndScene();
hwctx->d3d9device->Present(0, 0, 0, 0);
backbuffer->Release();
// Disable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
}
by drawing in the backbuffer by hwctx->d3d9device->StretchRect(ctx->surfRGB, NULL, backbuffer, NULL, D3DTEXF_NONE); even if the ctx->surfRGB is associated to the texture passed to the shader, the video frame is showed on the screen but alpha blending is not applied. If I remove hwctx->d3d9device->StretchRect(ctx->surfRGB, NULL, backbuffer, NULL, D3DTEXF_NONE); video frame is not showed even if the ctx->surfRGB is not empty.

LWJGL texture with shaders produced skewed image

I'm trying to do 2D graphics in orthogonal mode. The code loads a picture of a cat and 2 simple shaders, which just pass through their input, unmodified. I expect the program to display the picture of the cat (or at least a part of it) in the middle of the screen, without any rotation or skew.
The program executes successfully, but I can't figure out why the result looks like this:
An OpenGL guru might spot the problem quickly, but I can't find it. I have the feeling that the problem might be at the "Create buffer for vertex and texture coordinates" part, but everything looked okay.
The cat image:
Vertex shader:
#version 150 core
in vec4 in_Position;
in vec2 in_TextureCoord;
out vec2 pass_TextureCoord;
void main(void) {
gl_Position = in_Position;
pass_TextureCoord = in_TextureCoord;
}
Pixel shader:
#version 150 core
uniform sampler2D texture_diffuse;
in vec2 pass_TextureCoord;
out vec4 out_Color;
void main(void) {
out_Color = texture(texture_diffuse, pass_TextureCoord);
}
Java (LWJGL) code:
package lwjgl_test1;
import java.awt.image.BufferedImage;
import java.io.*;
import java.nio.*;
import javax.imageio.ImageIO;
import org.lwjgl.*;
import org.lwjgl.opengl.*;
import static org.lwjgl.glfw.GLFW.*;
import java.util.concurrent.TimeUnit;
import static org.lwjgl.opengl.GL11.*;
public class Main {
public static void main(String[] args) {
try {
if (!glfwInit()) {
throw(new Exception("Can't init glfw."));
}
/*
* Create Window
*/
glfwWindowHint(GLFW_RESIZABLE, 0);
long windowGlID = glfwCreateWindow(1024, 768, "Example OpenGL App", 0, 0);
glfwSetWindowPos(windowGlID, 50, 50);
glfwMakeContextCurrent(windowGlID);
glfwShowWindow(windowGlID);
/*
* Initialize OpenGL
*/
GL.createCapabilities();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 1024, 768, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
/*
* Load texture
*/
int cat = loadTexture("cat.png");
/*
* Load shaders
*/
int vertexShader = loadShader("vertex_shader.txt", GL20.GL_VERTEX_SHADER);
int pixelShader = loadShader("pixel_shader.txt", GL20.GL_FRAGMENT_SHADER);
int pId = GL20.glCreateProgram();
GL20.glAttachShader(pId, vertexShader);
GL20.glAttachShader(pId, pixelShader);
// Position information will be attribute 0
GL20.glBindAttribLocation(pId, 0, "in_Position");
// Textute information will be attribute 1
GL20.glBindAttribLocation(pId, 1, "in_TextureCoord");
GL20.glLinkProgram(pId);
GL20.glValidateProgram(pId);
exitOnGLError("Compiling shaders failed.");
/*
* Create buffer for vertex and texture coordinates
*/
float size = 120.0f;
FloatBuffer vertex_data = BufferUtils.createFloatBuffer(20);
vertex_data.put(new float[] { -size, -size, 0f, 0f, 0f }); // (Vx, Vy, Vz, Tx, Ty)
vertex_data.put(new float[] { size, -size, 0f, 0f, 1f });
vertex_data.put(new float[] { size, size, 0f, 1f, 1f });
vertex_data.put(new float[] { -size, size, 0f, 1f, 0f });
vertex_data.flip();
int vbo_vertex_handle = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, vbo_vertex_handle);
GL15.glBufferData(GL15.GL_ARRAY_BUFFER, vertex_data, GL15.GL_STATIC_DRAW);
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 2 * 4, 0); // mark vertex coordinates
GL20.glVertexAttribPointer(1, 2, GL11.GL_FLOAT, false, 3 * 4, 3 * 4); // mark texture coordinates
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
GL30.glBindVertexArray(0);
exitOnGLError("Creating buffers failed.");
/*
* Main rendering loop
*/
while(true) {
/*
* Clear screen
*/
glClearColor(0.0f, 1.0f, 1.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/*
* Apply shader program
*/
GL20.glUseProgram(pId);
// Bind the texture
GL13.glActiveTexture(GL13.GL_TEXTURE0);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, cat);
/*
* Draw (use buffers)
*/
GL20.glEnableVertexAttribArray(0);
GL20.glEnableVertexAttribArray(1);
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, vbo_vertex_handle);
GL11.glDrawArrays(GL11.GL_QUADS, 0, 4); // Draw an entity with 4 vertices
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
exitOnGLError("Draw failed.");
GL11.glBindTexture(GL11.GL_TEXTURE_2D, 0);
GL20.glUseProgram(0); // deselect
/*
* Swap buffers
*/
glfwSwapBuffers(windowGlID);
/*
* Events
*/
glfwPollEvents();
if (glfwWindowShouldClose(windowGlID)) {
break;
}
TimeUnit.MILLISECONDS.sleep(10);
}
} catch (Exception e) {
e.printStackTrace();
}
}
private static int loadTexture(String path) throws Exception {
int[] pixels = null;
BufferedImage image = null;
image = ImageIO.read(new FileInputStream(path));
int width = image.getWidth();
int height = image.getHeight();
pixels = new int[width * height];
image.getRGB(0, 0, width, height, pixels, 0, width);
int[] data = new int[width * height];
for (int i = 0; i < width * height; i++) {
int a = (pixels[i] & 0xff000000) >> 24;
int r = (pixels[i] & 0xff0000) >> 16;
int g = (pixels[i] & 0xff00) >> 8;
int b = (pixels[i] & 0xff);
data[i] = a << 24 | b << 16 | g << 8 | r;
}
IntBuffer intBuffer1 = ByteBuffer.allocateDirect(data.length << 2).order(ByteOrder.nativeOrder()).asIntBuffer();
intBuffer1.put(data).flip();
int result = glGenTextures();
glBindTexture(GL_TEXTURE_2D, result);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, intBuffer1);
glBindTexture(GL_TEXTURE_2D, 0);
exitOnGLError("Loading texture '"+ path +"' failed.");
return result;
}
private static int loadShader(String filename, int type) {
StringBuilder shaderSource = new StringBuilder();
int shaderID = 0;
try {
BufferedReader reader = new BufferedReader(new FileReader(filename));
String line;
while ((line = reader.readLine()) != null) {
shaderSource.append(line).append("\n");
}
reader.close();
} catch (IOException e) {
System.err.println("Could not read file.");
e.printStackTrace();
System.exit(-1);
}
shaderID = GL20.glCreateShader(type);
GL20.glShaderSource(shaderID, shaderSource);
GL20.glCompileShader(shaderID);
if (GL20.glGetShaderi(shaderID, GL20.GL_COMPILE_STATUS) == GL11.GL_FALSE) {
System.err.println("Could not compile shader.");
System.exit(-1);
}
return shaderID;
}
private static void exitOnGLError(String errorMessage) throws Exception {
int errorValue = GL11.glGetError();
if (errorValue != GL11.GL_NO_ERROR) {
throw new Exception(errorMessage);
}
}
}
The problem lies in the stride parameter in this lines:
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 2 * 4, 0);
GL20.glVertexAttribPointer(1, 2, GL11.GL_FLOAT, false, 3 * 4, 3 * 4);
Stride tells OpenGL how many bytes apart from each other the begin of two consecutive entries are. Since you are using 5 floats per vertex, this has to be 5 * 4 in both lines:
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 5 * 4, 0);
GL20.glVertexAttribPointer(1, 2, GL11.GL_FLOAT, false, 5 * 4, 3 * 4);

How to make align objects, autorotated to screen, to pixel grid?

I'm trying to show a rectangular object so that it doesn't change its look on rotations and zoom in OpenSceneGraph. I've found that osg::AutoTransform should work for me.
But with the following code it appears to give broken results even if I set texture filters to NEAREST instead of default LINEAR. With LINEAR the result is simply blurry, but with NEAREST it sometimes lacks some texel lines.
#include <osg/Node>
#include <osgViewer/Viewer>
#include <osg/Texture2D>
#include <osg/Geode>
#include <osg/AutoTransform>
osg::ref_ptr<osg::Node> createFixedSizeTexture(osg::Image *image,int W,int H)
{
osg::Vec3Array& verts = *new osg::Vec3Array(4);
verts[0] = osg::Vec3(-W/2., -H/2., 0);
verts[1] = osg::Vec3(+W/2., -H/2., 0);
verts[2] = osg::Vec3(+W/2., +H/2., 0);
verts[3] = osg::Vec3(-W/2., +H/2., 0);
osg::Vec2Array& texcoords = *new osg::Vec2Array(4);
texcoords[0].set(0,0);
texcoords[1].set(1,0);
texcoords[2].set(1,1);
texcoords[3].set(0,1);
osg::Geometry*const geometry = new osg::Geometry;
geometry->setVertexArray( &verts );
geometry->setTexCoordArray(0, &texcoords);
geometry->addPrimitiveSet( new osg::DrawArrays(GL_QUADS, 0, 4));
osg::Texture2D*const texture = new osg::Texture2D( image );
texture->setResizeNonPowerOfTwoHint(false);
geometry->getOrCreateStateSet()->setTextureAttributeAndModes(0, texture, osg::StateAttribute::ON);
osg::Geode*const geode = new osg::Geode;
geode->addDrawable( geometry );
return geode;
}
int main()
{
static constexpr int W=21, H=15;
unsigned bits[W*H];
for(int x=0;x<W;++x)
for(int y=0;y<H;++y)
bits[x+W*y] = (x&y&1)*0xffffffff;
osg::Image *formImage = new osg::Image;
formImage->setImage(W, H, 1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE,
reinterpret_cast<unsigned char*>(bits), osg::Image::NO_DELETE);
osg::AutoTransform *at = new osg::AutoTransform;
at->setAutoScaleToScreen(true);
at->setAutoRotateMode( osg::AutoTransform::ROTATE_TO_SCREEN );
at->addChild(createFixedSizeTexture(formImage,W,H));
osgViewer::Viewer viewer;
viewer.setUpViewInWindow(0, 0, 800, 600);
viewer.setSceneData(at);
return viewer.run();
}
This is due to non-integer screen coordinates of the final object. So to fix this I'll have to align the object to pixel grid. How can I achieve this?
The misalignment problem can be solved by using a custom vertex shader, which rounds screen coordinates to integers.
The following example is based on the code in OP, but instead of using osg::AutoTransform it does the whole thing of removing rotation and scaling + rounding screen coordinates in a single shader:
#include <osg/Node>
#include <osgViewer/Viewer>
#include <osg/Texture2D>
#include <osg/Geode>
const char*const vertexShader=R"(
#version 120
uniform vec2 screenSize;
vec4 roundCoords(vec4 clipPos)
{
vec2 halfScreenSize=screenSize/2.;
vec2 ndcPos=clipPos.xy/clipPos.w; // transform to normalized device coordinates
vec2 screenPos=(ndcPos+1)*halfScreenSize; // transform from NDC space to screen space
vec2 screenPosRounded=floor(screenPos); // round the screen coordinates
ndcPos=screenPosRounded.xy/halfScreenSize-1; // transform back to NDC space
clipPos.xy=ndcPos*clipPos.w; // transform back to clip space
return clipPos;
}
void main()
{
gl_TexCoord[0]=gl_MultiTexCoord0;
gl_FrontColor=gl_Color;
vec4 translCol=gl_ModelViewProjectionMatrix[3];
// Prevent rotation and unneeded scaling
mat4 mvp=mat4(vec4(2./screenSize.x, 0, 0,0),
vec4( 0, 2./screenSize.y,0,0),
vec4( 0, 0, 1,0),
vec4(translCol.xyz/translCol.w, 1));
gl_Position=roundCoords(mvp*gl_Vertex);
}
)";
static constexpr int windowWidth=800, windowHeight=600;
osg::ref_ptr<osg::Node> createFixedSizeTexture(osg::Image *image,int W,int H)
{
osg::Vec3Array& verts = *new osg::Vec3Array(4);
verts[0] = osg::Vec3(-W/2., -H/2., 0);
verts[1] = osg::Vec3(+W/2., -H/2., 0);
verts[2] = osg::Vec3(+W/2., +H/2., 0);
verts[3] = osg::Vec3(-W/2., +H/2., 0);
osg::Vec2Array& texcoords = *new osg::Vec2Array(4);
texcoords[0].set(0,0);
texcoords[1].set(1,0);
texcoords[2].set(1,1);
texcoords[3].set(0,1);
osg::Geometry*const geometry = new osg::Geometry;
geometry->setVertexArray( &verts );
geometry->setTexCoordArray(0, &texcoords);
geometry->addPrimitiveSet( new osg::DrawArrays(GL_QUADS, 0, 4));
osg::Texture2D*const texture = new osg::Texture2D( image );
texture->setResizeNonPowerOfTwoHint(false);
geometry->getOrCreateStateSet()->setTextureAttributeAndModes(0, texture, osg::StateAttribute::ON);
osg::Geode*const geode = new osg::Geode;
geode->addDrawable( geometry );
osg::Program*const program = new osg::Program;
program->addShader(new osg::Shader(osg::Shader::VERTEX, vertexShader));
osg::StateSet*const set = geode->getOrCreateStateSet();
set->setAttributeAndModes(program, osg::StateAttribute::ON);
set->addUniform(new osg::Uniform("screenSize" , osg::Vec2(windowWidth,windowHeight)));
return geode;
}
int main()
{
static constexpr int W=21, H=15;
unsigned bits[W*H];
for(int x=0;x<W;++x)
for(int y=0;y<H;++y)
bits[x+W*y] = (x&y&1)*0xffffffff;
osg::Image *formImage = new osg::Image;
formImage->setImage(W, H, 1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE,
reinterpret_cast<unsigned char*>(bits), osg::Image::NO_DELETE);
osgViewer::Viewer viewer;
viewer.setUpViewInWindow(0, 0, windowWidth, windowHeight);
viewer.setSceneData(createFixedSizeTexture(formImage,W,H));
return viewer.run();
}

Why is vertex position being taken from colour buffer instead of position buffer?

Today I thought I'd modify a small example of how to use gl-rs (OpenGL bindings for Rust) adding a colour array and drawing some points instead of a triangle. Trivial, I thought...
However, my COLOUR_DATA appears to be being used for the vertex positions somehow.
Given
static VERTEX_DATA: [GLfloat, ..6] = [
0.2, 0.0,
0.0, 0.2,
0.0, 0.0];
static COLOUR_DATA: [GLfloat, ..12] = [
0.0, 0.5, 0.0, 1.0,
0.5, 0.0, 0.0, 1.0,
0.0, 0.0, 5.0, 1.0];
it is obvious that the points in the screenshot below are the first 6 values of COLOUR_DATA, not VERTEX_DATA. The 'problem' disappears when I comment out the BindBuffer and BufferData calls pertaining to my colour buffer object.
Source code is below the screenshot, and of course removing BindBuffer/BufferData also means removing the EnableVertexAttribArray and VertexAttribPointer class in order to compile, but they have no impact on the situation at hand).
Why is this happening and how can I avoid it? Am I just missing something obvious? Or am I dealing with something deeper (e.g. a bug in gl-rs) here?
#![feature(globs)]
extern crate gl;
extern crate glfw;
extern crate native;
use gl::types::*;
use glfw::Context;
use std::mem;
use std::ptr;
use std::str;
// Vertex data
static VERTEX_DATA: [GLfloat, ..6] = [
0.2, 0.0,
0.0, 0.2,
0.0, 0.0];
static COLOUR_DATA: [GLfloat, ..12] = [
0.0, 0.5, 0.0, 1.0,
0.5, 0.0, 0.0, 1.0,
0.0, 0.0, 5.0, 1.0];
// Shader sources
static VS_SRC: &'static str =
"#version 150\n\
in vec2 position;\n\
in vec4 vertexColor;\n\
out vec4 fragmentColor;\n\
void main() {\n\
gl_Position = vec4(position, 0.0, 1.0);\n\
fragmentColor = vertexColor;\n\
}";
static FS_SRC: &'static str =
"#version 150\n\
in vec4 fragmentColor;\n\
out vec4 out_color;\n\
void main() {\n\
out_color = vec4(1.0, 0.0, 0.0, 1.0);\n\
}";
//out_color = fragmentColor;\n\
// the above line removed from shader string for debugging this
fn compile_shader(src: &str, ty: GLenum) -> GLuint {
let shader;
unsafe {
shader = gl::CreateShader(ty);
// Attempt to compile the shader
src.with_c_str(|ptr| gl::ShaderSource(shader, 1, &ptr, ptr::null()));
gl::CompileShader(shader);
// Get the compile status
let mut status = gl::FALSE as GLint;
gl::GetShaderiv(shader, gl::COMPILE_STATUS, &mut status);
// Fail on error
if status != (gl::TRUE as GLint) {
let mut len = 0;
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::from_elem(len as uint - 1, 0u8); // subtract 1 to skip the trailing null character
gl::GetShaderInfoLog(shader, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
panic!("{}", str::from_utf8(buf.as_slice()).expect("ShaderInfoLog not valid utf8"));
}
}
shader
}
fn link_program(vs: GLuint, fs: GLuint) -> GLuint {
unsafe {
let program = gl::CreateProgram();
gl::AttachShader(program, vs);
gl::AttachShader(program, fs);
gl::LinkProgram(program);
// Get the link status
let mut status = gl::FALSE as GLint;
gl::GetProgramiv(program, gl::LINK_STATUS, &mut status);
// Fail on error
if status != (gl::TRUE as GLint) {
let mut len: GLint = 0;
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::from_elem(len as uint - 1, 0u8); // subtract 1 to skip the trailing null character
gl::GetProgramInfoLog(program, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
panic!("{}", str::from_utf8(buf.as_slice()).expect("ProgramInfoLog not valid utf8"));
}
program
}
}
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
native::start(argc, argv, main)
}
fn main() {
let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
// Choose a GL profile that is compatible with OS X 10.7+
glfw.window_hint(glfw::ContextVersion(3, 2));
glfw.window_hint(glfw::OpenglForwardCompat(true));
glfw.window_hint(glfw::OpenglProfile(glfw::OpenGlCoreProfile));
let (window, _) = glfw.create_window(800, 600, "OpenGL", glfw::Windowed)
.expect("Failed to create GLFW window.");
// It is essential to make the context current before calling `gl::load_with`.
window.make_current();
// Load the OpenGL function pointers
gl::load_with(|s| window.get_proc_address(s));
// Create GLSL shaders
let vs = compile_shader(VS_SRC, gl::VERTEX_SHADER);
let fs = compile_shader(FS_SRC, gl::FRAGMENT_SHADER);
let program = link_program(vs, fs);
let mut vao = 0;
let mut vbo = 0;
let mut cbo = 0;
unsafe {
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut vao);
gl::BindVertexArray(vao);
// Set up vertex buffer object
gl::GenBuffers(1, &mut vbo);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(gl::ARRAY_BUFFER,
(VERTEX_DATA.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
mem::transmute(&VERTEX_DATA[0]),
gl::STATIC_DRAW);
// Set up colour buffer object
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(gl::ARRAY_BUFFER,
(COLOUR_DATA.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
mem::transmute(&COLOUR_DATA[0]),
gl::STATIC_DRAW);
gl::UseProgram(program);
// Bind fragment shader
"out_color".with_c_str(|ptr| gl::BindFragDataLocation(program, 0, ptr));
// Configure vertex buffer
let pos_attr = "position".with_c_str(|ptr| gl::GetAttribLocation(program, ptr));
println!("{}", pos_attr);
gl::EnableVertexAttribArray(pos_attr as GLuint);
gl::VertexAttribPointer(pos_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
gl::PointSize(10.0);
// Configure colour buffer
let col_attr = "vertexColor".with_c_str(|ptr| gl::GetAttribLocation(program, ptr));
println!("{}", col_attr);
gl::EnableVertexAttribArray(col_attr as GLuint);
gl::VertexAttribPointer(col_attr as GLuint, 4, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
}
while !window.should_close() {
glfw.poll_events();
unsafe {
gl::ClearColor(0.3, 0.3, 0.3, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
gl::DrawArrays(gl::POINTS, 0, 3);
}
window.swap_buffers();
}
unsafe {
gl::DeleteProgram(program);
gl::DeleteShader(fs);
gl::DeleteShader(vs);
gl::DeleteBuffers(1, &cbo);
gl::DeleteBuffers(1, &vbo);
gl::DeleteVertexArrays(1, &vao);
}
}
Note: Code depends on gl-rs and glfw-rs. Running Windows 8.1 and Rust 0.13 nightly 40fb87d40). gl-rs does not appear to have anything like this in the issues tracker.
Because you need to have the right buffer bound (BindBuffer) before calling VertexAttribPointer.
// Configure vertex buffer
let pos_attr = "position".with_c_str(|ptr| gl::GetAttribLocation(program, ptr));
println!("{}", pos_attr);
gl::EnableVertexAttribArray(pos_attr as GLuint);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::VertexAttribPointer(pos_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
gl::PointSize(10.0);
// Configure colour buffer
let col_attr = "vertexColor".with_c_str(|ptr| gl::GetAttribLocation(program, ptr));
println!("{}", col_attr);
gl::EnableVertexAttribArray(col_attr as GLuint);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::VertexAttribPointer(col_attr as GLuint, 4, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
With no buffer bound, the final argument to VertexAttribPointer is a pointer to system memory. With vertex buffer objects, it becomes an offset into the currently bound buffer. In your case, the colour buffer was the last to be bound during initialization and was being used for both vertex attributes.