OpenGL: Geometry Shader performance with a lot of cubes - opengl

So I wrote a really simple OpenGL program to draw 100x100x100 points drawn as cubes using the Geometry Shader. I wanted to do it to benchmark it against what I could currently do using DirectX11.
With DirectX11, I can easily render these cubes at 60fps (vsync). However, with OpenGL I'm stuck at 40fps.
In both applications, I am:
Using a point tolopology to represent just the position of the cube (stride = 12 bytes).
Only mapping to the Vertex Buffer in the initialise function, only ever once.
Using only two draw calls in total: one to render the cubes, one to render frametime.
Using back-face culling, and depth testing.
Limiting state changes to the minimum I need to draw the cubes (VBO's/Shader Program).
Here is my draw call:
GLboolean CCubeApplication::Draw()
{
auto program = m_ppBatches[0]->GetShaders()->GetProgram(0);
program->Bind();
{
glUniformMatrix4fv(program->GetUniform("g_uWVP"), 1, false, glm::value_ptr(m_matMatrices[MATRIX_WVP]));
glDrawArrays(GL_POINTS, 0, m_uiTotal);
}
return true;
}
This function calls glBindVertexArray and glUseProgram
program->Bind();
And the rest is straight-forward. My Update function does nothing but update the camera's position and view matrix, and is identical in DirectX/OpenGL versions.
My vertex shader is a pass-through, and my fragment shader returns a constant colour. This is my geometry shader:
#version 440 core
// GS_LAYOUT
layout(points) in;
layout(triangle_strip, max_vertices = 36) out;
// GS_IN
in vec4 vOut_pos[];
// GS_OUT
// UNIFORMS
uniform mat4 g_uWVP;
const float f = 0.1f;
const int elements[] = int[]
(
0,2,1,
2,3,1,
1,3,5,
3,7,5,
5,7,4,
7,6,4,
4,6,0,
6,2,0,
3,2,7,
2,6,7,
5,4,1,
4,0,1
);
// GS
void main()
{
vec4 vertices[] = vec4[]
(
g_uWVP * (vOut_pos[0] + vec4(-f,-f,-f, 0)),
g_uWVP * (vOut_pos[0] + vec4(-f,-f,+f, 0)),
g_uWVP * (vOut_pos[0] + vec4(-f,+f,-f, 0)),
g_uWVP * (vOut_pos[0] + vec4(-f,+f,+f, 0)),
g_uWVP * (vOut_pos[0] + vec4(+f,-f,-f, 0)),
g_uWVP * (vOut_pos[0] + vec4(+f,-f,+f, 0)),
g_uWVP * (vOut_pos[0] + vec4(+f,+f,-f, 0)),
g_uWVP * (vOut_pos[0] + vec4(+f,+f,+f, 0))
);
uint uiIndex = 0;
for(uint uiTri = 0; uiTri < 12; ++uiTri)
{
for(uint uiVert = 0; uiVert < 3; ++uiVert)
{
gl_Position = vertices[elements[uiIndex++]];
EmitVertex();
}
EndPrimitive();
}
}
I've seen people talk about instancing or other such rendering methods, but I'm primarily interested in understanding why I can't get at least the same performance from OpenGL as I do with DirectX - seeing as the way I do it in both seem to be virtually identical to me. Identical data, identical shaders. Help?
UPDATE
So I downloaded gDEBugger, and here is my call stack for one frame:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
// Drawing cubes
glBindVertexArray(1)
glUseProgram(1)
glUniformMatrix4fv(0, 1, FALSE, {matrixData})
glDrawArrays(GL_POINTS, 0, 1000000)
// Drawing text
glBindVertexArray(2);
glUseProgram(5);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, 2);
glBindBuffer(GL_ARRAY_BUFFER, 2);
glBufferData(GL_ARRAY_BUFFER, 212992, {textData}, GL_DYNAMIC_DRAW);
glDrawArrays(GL_POINTS, 0, 34);
// Swap buffers
wglSwapBuffers();

Related

How can I draw a circle in OpenGL Core 3.3 with orthographic projection?

I'm a complete beginner to OpenGL programming and am trying to follow the Breakout tutorial at learnopengl.com but would like to draw the ball as an actual circle, instead of using a textured quad like Joey suggests. However, every result that Google throws back at me for "draw circle opengl 3.3" or similar phrases seems to be at least a few years old, and using even-older-than-that versions of the API :-(
The closest thing that I've found is this SO question, but of course the OP just had to use a custom VertexFormat object to abstract some of the details, without sharing his/her implementation of such! Just my luck! :P
There's also this YouTube tutorial that uses a seemingly-older version of the API, but copying the code verbatim (except for the last few lines which is where the code looks old) still got me nowhere.
My version of SpriteRenderer::initRenderData() from the tutorial:
void SpriteRenderer::initRenderData() {
GLuint vbo;
auto attribSize = 0;
GLfloat* vertices = nullptr;
// Determine whether this sprite is a circle or
// quad and setup the vertices array accordingly
if (!this->isCircle) {
attribSize = 4;
vertices = new GLfloat[24] {...} // works for rendering quads
} else {
// This code is adapted from the YouTube tutorial that I linked
// above and is where things go pear-shaped for me...or at least
// **not** circle-shaped :P
attribSize = 3;
GLfloat x = 0.0f;
GLfloat y = 0.0f;
GLfloat z = 0.0f;
GLfloat r = 100.0f;
GLint numSides = 6;
GLint numVertices = numSides + 2;
GLfloat* xCoords = new GLfloat[numVertices];
GLfloat* yCoords = new GLfloat[numVertices];
GLfloat* zCoords = new GLfloat[numVertices];
xCoords[0] = x;
yCoords[0] = y;
zCoords[0] = z;
for (auto i = 1; i < numVertices; i++) {
xCoords[i] = x + (r * cos(i * (M_PI * 2.0f) / numSides));
yCoords[i] = y + (r * sin(i * (M_PI * 2.0f) / numSides));
zCoords[i] = z;
}
vertices = new GLfloat[numVertices * 3];
for (auto i = 0; i < numVertices; i++) {
vertices[i * 3] = xCoords[i];
vertices[i * 3 + 1] = yCoords[i];
vertices[i * 3 + 2] = zCoords[i];
}
}
// This is where I go back to the learnopengl.com code. Once
// again, the following works for quads but not circles!
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 24 * sizeof(
GLfloat), vertices, GL_STATIC_DRAW);
glBindVertexArray(vao);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, attribSize, GL_FLOAT, GL_FALSE,
attribSize * sizeof(GLfloat), (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
And here's the SpriteRenderer::DrawSprite() method (the only difference from the original being lines 24 - 28):
void SpriteRenderer::Draw(vec2 position, vec2 size, GLfloat rotation, vec3 colour) {
// Prepare transformations
shader.Use();
auto model = mat4(1.0f);
model = translate(model, vec3(position, 0.0f));
model = translate(model, vec3(0.5f * size.x, 0.5f * size.y, 0.0f)); // Move origin of rotation to center
model = rotate(model, rotation, vec3(0.0f, 0.0f, 1.0f)); // Rotate quad
model = translate(model, vec3(-0.5f * size.x, -0.5f * size.y, 0.0f)); // Move origin back
model = scale(model, vec3(size, 1.0f)); // Lastly, scale
shader.SetMatrix4("model", model);
// Render textured quad
shader.SetVector3f("spriteColour", colour);
glActiveTexture(GL_TEXTURE0);
texture.Bind();
glBindVertexArray(vao);
if (!isCircular) {
glDrawArrays(GL_TRIANGLES, 0, 6);
} else {
glDrawArrays(GL_TRIANGLE_FAN, 0, 24); // also tried "12" and "8" for the last param, to no avail
}
glBindVertexArray(0);
}
And finally, the shaders (different to the ones used for quads):
// Vertex shader
#version 330 core
layout (location = 0) in vec3 position;
uniform mat4 model;
uniform mat4 projection;
void main() {
gl_Position = projection * model *
vec4(position.xyz, 1.0f);
}
// Fragment shader
#version 330 core
out vec4 colour;
uniform vec3 spriteColour;
void main() {
colour = vec4(spriteColour, 1.0);
}
P.S. I know I could just use a quad but I'm trying to learn how to draw all primitives in OpenGL, not just quads and triangles (thanks anyway Joey)!
P.P.S I just realised that the learnopengl.com site has a whole section devoted to debugging OpenGL apps, so I set that up but to no avail :-( I don't think the error handling is supported by my driver (Intel UHD Graphics 620 latest driver) since the GL_CONTEXT_FLAG_DEBUG_BIT was not set after following the instructions:
Requesting a debug context in GLFW is surprisingly easy as all we have to do is pass a hint to GLFW that we'd like to have a debug output context. We have to do this before we call glfwCreateWindow:
glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GL_TRUE);
Once we initialize GLFW we should have a debug context if we're using OpenGL version 4.3 or higher, or else we have to take our chances and hope the system is still able to request a debug context. Otherwise we have to request debug output using its OpenGL extension(s).
To check if we successfully initialized a debug context we can query OpenGL:
GLint flags; glGetIntegerv(GL_CONTEXT_FLAGS, &flags);
if (flags & GL_CONTEXT_FLAG_DEBUG_BIT) {
// initialize debug output
}
That if statement is never entered into!
Thanks to #Mykola's answer to this question I have gotten half-way there:
numVertices = 43;
vertices = new GLfloat[numVertices];
auto i = 2;
auto x = 0.0f,
y = x,
z = x,
r = 0.3f;
auto numSides = 21;
auto TWO_PI = 2.0f * M_PI;
auto increment = TWO_PI / numSides;
for (auto angle = 0.0f; angle <= TWO_PI; angle += increment) {
vertices[i++] = r * cos(angle) + x;
vertices[i++] = r * sin(angle) + y;
}
Which gives me .
Two questions I still have:
Why is there an extra line going from the centre to the right side and how can I fix it?
According to #user1118321's comment on a related SO answer, I should be able to prepend another vertex to the array at (0, 0) and use GL_TRIANGLE_FAN instead of GL_LINE_LOOP
to get a coloured circle. But this results in no output for me :-( Why?

Shaders: How to draw 3D point verts without generating geometry?

I have a 3D Webgl scene. I am using Regl http://regl.party/ . Which is WebGL. So I am essentially writing straight GLSL.
This is a game project. I have an array of 3D positions [[x,y,z] ...] which are bullets, or projectiles. I want to draw these bullets as a simple cube, sphere, or particle. No requirement on the appearance.
How can I make shaders and a draw call for this without having to create a repeated duplicate set of geometry for the bullets?
Preferring an answer with a vert and frag shader example that demonstrates the expected data input and can be reverse engineered to handle the CPU binding layer
You create an regl command which encapsulates a bunch of data. You can then call it with an object.
Each uniform can take an optional function to supply its value. That function is passed a regl context as the first argument and then the object you passed as the second argument so you can call it multiple times with a different object to draw the same thing (same vertices, same shader) somewhere else.
var regl = createREGL()
const objects = [];
const numObjects = 100;
for (let i = 0; i < numObjects; ++i) {
objects.push({
x: rand(-1, 1),
y: rand(-1, 1),
speed: rand(.5, 1.5),
direction: rand(0, Math.PI * 2),
color: [rand(0, 1), rand(0, 1), rand(0, 1), 1],
});
}
function rand(min, max) {
return Math.random() * (max - min) + min;
}
const starPositions = [[0, 0, 0]];
const starElements = [];
const numPoints = 5;
for (let i = 0; i < numPoints; ++i) {
for (let j = 0; j < 2; ++j) {
const a = (i * 2 + j) / (numPoints * 2) * Math.PI * 2;
const r = 0.5 + j * 0.5;
starPositions.push([
Math.sin(a) * r,
Math.cos(a) * r,
0,
]);
}
starElements.push([
0, 1 + i * 2, 1 + i * 2 + 1,
]);
}
const drawStar = regl({
frag: `
precision mediump float;
uniform vec4 color;
void main () {
gl_FragColor = color;
}`,
vert: `
precision mediump float;
attribute vec3 position;
uniform mat4 mat;
void main() {
gl_Position = mat * vec4(position, 1);
}`,
attributes: {
position: starPositions,
},
elements: starElements,
uniforms: {
mat: (ctx, props) => {
const {viewportWidth, viewportHeight} = ctx;
const {x, y} = props;
const aspect = viewportWidth / viewportHeight;
return [.1 / aspect, 0, 0, 0,
0, .1, 0, 0,
0, 0, 0, 0,
x, y, 0, 1];
},
color: (ctx, props) => props.color,
}
})
regl.frame(function () {
regl.clear({
color: [0, 0, 0, 1]
});
objects.forEach((o) => {
o.direction += rand(-0.1, 0.1);
o.x += Math.cos(o.direction) * o.speed * 0.01;
o.y += Math.sin(o.direction) * o.speed * 0.01;
o.x = (o.x + 3) % 2 - 1;
o.y = (o.y + 3) % 2 - 1;
drawStar(o);
});
})
<script src="https://cdnjs.cloudflare.com/ajax/libs/regl/1.3.11/regl.min.js"></script>
You can draw all of the bullets as point sprites, in which case you just need to provide the position and size of each bullet and draw them as GL_POINTS. Each “point” is rasterized to a square based on the output of your vertex shader (which runs once per point). Your fragment shader is called for each fragment in that square, and can color the fragment however it wants—with a flat color, by sampling a texture, or however else you want.
Or you can provide a single model for all bullets, a separate transform for each bullet, and draw them as instanced GL_TRIANGLES or GL_TRIANGLE_STRIP or whatever. Read about instancing on the OpenGL wiki.
Not a WebGL coder so read with prejudice...
Encode the vertexes in a texture
beware of clamping use texture format that does not clamp to <0.0,+1.0> like GL_LUMINANCE32F_ARB or use vertexes in that range only. To check for clamping use:
GLSL debug prints
Render single rectangle covering whole screen
and use the texture from #1 as input. This will ensure that a fragment shader is called for each pixel of the screen/view exactly once.
Inside fragment shader read the texture and check the distance of a fragment to your vertexes
based on it render your stuff or dicard() fragment... spheres are easy, but boxes and other shapes might be complicated to render based on the distance of vertex especially if they can be arbitrary oriented (which need additional info in the input texture).
To ease up this you can prerender them into some texture and use the distance as texture coordinates ...
This answer of mine is using this technique:
raytrace through 3D mesh
You can sometimes get away with using GL_POINTS with a large gl_PointSize and a customized fragment shader.
An example shown here using distance to point center for fragment alpha. (You could also just as well sample a texture)
The support for large point sizes might be limited though, so check that before deciding on this route.
var canvas = document.getElementById('cvs');
gl = canvas.getContext('webgl');
var vertices = [
-0.5, 0.75,0.0,
0.0, 0.5, 0.0,
-0.75,0.25,0.0,
];
var vertex_buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
var vertCode =
`attribute vec3 coord;
void main(void) {
gl_Position = vec4(coord, 1.0);
gl_PointSize = 50.0;
}`;
var vertShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertShader, vertCode);
gl.compileShader(vertShader);
var fragCode =
`void main(void) {
mediump float ds = distance(gl_PointCoord.xy, vec2(0.5,0.5))*2.0;
mediump vec4 fg_color=vec4(0.0, 0.0, 0.0,1.0- ds);
gl_FragColor = fg_color;
}`;
var fragShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragShader, fragCode);
gl.compileShader(fragShader);
var shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertShader);
gl.attachShader(shaderProgram, fragShader);
gl.linkProgram(shaderProgram);
gl.useProgram(shaderProgram);
gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
var coord = gl.getAttribLocation(shaderProgram, "coord");
gl.vertexAttribPointer(coord, 3, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(coord);
gl.viewport(0,0,canvas.width,canvas.height);
gl.drawArrays(gl.POINTS, 0, 3);
<!doctype html>
<html>
<body>
<canvas width = "400" height = "400" id = "cvs"></canvas>
</body>
</html>

Sharp triangles appearing in interpolated height map

As a project, I have to generate a random NxN rough terrain in modern opengl. For this, I use a height map, rendering each 2xN row with triangle strip.
Shaders are basic, specifying a shade of yellow corresponding to the height(so I can see the bends; I have a top-down camera). Interpolation is on, but for some reason, weird sharp triangular shapes get rendered.
1) They always appear on the right side of the screen.
2) They are bigger than the unit triangle I render.
eg: I don't have the reputation to post images, so...
8x8 http://imgbox.com/flC187WW
128x128 http://i.imgbox.com/f1ebrk0V.png
And here's the code:
void drawMeshRow(int rno, float oy) {
GLfloat meshVert[MESHSIZE * 2 * 3];
for(int i = 0; i < 2 * MESHSIZE; ++i) {
meshVert[3*i] = (i/2)*(2.0/(MESHSIZE-1)) - 1;
if(i & 1) {
meshVert[3*i + 1] = oy;
meshVert[3*i + 2] = heightMap[rno][i/2];
}
else {
meshVert[3*i + 1] = oy + (2.0/(MESHSIZE-1));
meshVert[3*i + 2] = heightMap[rno + 1][i/2];
}
}
glBufferData(GL_ARRAY_BUFFER, 2 * 3 * MESHSIZE * sizeof(GLfloat), meshVert, GL_STREAM_DRAW);
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2 * 3);
}
void drawMesh() {
glUseProgram(shader);
glBindBuffer(GL_ARRAY_BUFFER, meshBuff);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
for(int i = 0; i < MESHSIZE - 1; ++i)
drawMeshRow(i, (2.0 / (MESHSIZE - 1)) * i - 1);
glDisableVertexAttribArray(0);
}
drawMesh is called each iteration of the main loop.
Shaders:
Vertex shader
#version 330 core
layout(location = 0) in vec3 pos;
smooth out float height;
void main() {
gl_Position.xyz = pos;
height = pos.z;
gl_Position.w = 1.0;
}
Fragment Shader
#version 330 core
out vec3 pcolor;
smooth in float height;
void main() {
pcolor = vec3(1.0, 1.0, height);
}
You're passing the wrong count to glDrawArrays():
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2 * 3);
The last argument is the vertex count, while the value you pass is the total number of coordinates. The correct call is:
glDrawArrays(GL_TRIANGLE_STRIP, 0, MESHSIZE * 2);

Modern equivalent of `gluOrtho2d `

What is the modern equivalent of the OpenGL function gluOrtho2d? clang is giving me deprecation warnings. I believe I need to write some kind of vertex shader? What should it look like?
I started off this answer thinking "It's not that different, you just have to...".
I started writing some code to prove myself right, and ended up not really doing so. Anyway, here are the fruits of my efforts: a minimal annotated example of "modern" OpenGL.
There's a good bit of code you'll need before modern OpenGL will start to act like old-school OpenGL. I'm not going to get into the reasons why you might like to do it the new way (or not) -- there are countless other answers that give a pretty good rundown. Instead I'll post some minimal code that can get you running if you're so inclined.
You should end up with this stunning piece of art:
Basic Render Process
Part 1: Vertex buffers
void TestDraw(){
// create a vertex buffer (This is a buffer in video memory)
GLuint my_vertex_buffer;
glGenBuffers(1 /*ask for one buffer*/, &my_vertex_buffer);
const float a_2d_triangle[] =
{
200.0f, 10.0f,
10.0f, 200.0f,
400.0f, 200.0f
};
// GL_ARRAY_BUFFER indicates we're using this for
// vertex data (as opposed to things like feedback, index, or texture data)
// so this call says use my_vertex_data as the vertex data source
// this will become relevant as we make draw calls later
glBindBuffer(GL_ARRAY_BUFFER, my_vertex_buffer);
// allocate some space for our buffer
glBufferData(GL_ARRAY_BUFFER, 4096, NULL, GL_DYNAMIC_DRAW);
// we've been a bit optimistic, asking for 4k of space even
// though there is only one triangle.
// the NULL source indicates that we don't have any data
// to fill the buffer quite yet.
// GL_DYNAMIC_DRAW indicates that we intend to change the buffer
// data from frame-to-frame.
// the idea is that we can place more than 3(!) vertices in the
// buffer later as part of normal drawing activity
// now we actually put the vertices into the buffer.
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(a_2d_triangle), a_2d_triangle);
Part 2: Vertex Array Object:
We need to define how the data contained in my_vertex_array is structured. This state is contained in a vertex array object (VAO). In modern OpenGL there needs to be at least one of these
GLuint my_vao;
glGenVertexArrays(1, &my_vao);
//lets use the VAO we created
glBindVertexArray(my_vao);
// now we need to tell the VAO how the vertices in my_vertex_buffer
// are structured
// our vertices are really simple: each one has 2 floats of position data
// they could have been more complicated (texture coordinates, color --
// whatever you want)
// enable the first attribute in our VAO
glEnableVertexAttribArray(0);
// describe what the data for this attribute is like
glVertexAttribPointer(0, // the index we just enabled
2, // the number of components (our two position floats)
GL_FLOAT, // the type of each component
false, // should the GL normalize this for us?
2 * sizeof(float), // number of bytes until the next component like this
(void*)0); // the offset into our vertex buffer where this element starts
Part 3: Shaders
OK, we have our source data all set up, now we can set up the shader which will transform it into pixels
// first create some ids
GLuint my_shader_program = glCreateProgram();
GLuint my_fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
GLuint my_vertex_shader = glCreateShader(GL_VERTEX_SHADER);
// we'll need to compile the vertex shader and fragment shader
// and then link them into a full "shader program"
// load one string from &my_fragment_source
// the NULL indicates that the string is null-terminated
const char* my_fragment_source = FragmentSourceFromSomewhere();
glShaderSource(my_fragment_shader, 1, &my_fragment_source, NULL);
// now compile it:
glCompileShader(my_fragment_shader);
// then check the result
GLint compiled_ok;
glGetShaderiv(my_fragment_shader, GL_COMPILE_STATUS, &compiled_ok);
if (!compiled_ok){ printf("Oh Noes, fragment shader didn't compile!\n"); }
else{
glAttachShader(my_shader_program, my_fragment_shader);
}
// and again for the vertex shader
const char* my_vertex_source = VertexSourceFromSomewhere();
glShaderSource(my_vertex_shader, 1, &my_vertex_source, NULL);
glCompileShader(my_vertex_shader);
glGetShaderiv(my_vertex_shader, GL_COMPILE_STATUS, &compiled_ok);
if (!compiled_ok){ printf("Oh Noes, vertex shader didn't compile!\n"); }
else{
glAttachShader(my_shader_program, my_vertex_shader);
}
//finally, link the program, and set it active
glLinkProgram(my_shader_program);
glUseProgram(my_shader_program);
Part 4: Drawing things on the screen
//get the screen size
float my_viewport[4];
glGetFloatv(GL_VIEWPORT, my_viewport);
//now create a projection matrix
float my_proj_matrix[16];
MyOrtho2D(my_proj_matrix, 0.0f, my_viewport[2], my_viewport[3], 0.0f);
//"uProjectionMatrix" refers directly to the variable of that name in
// shader source
GLuint my_projection_ref =
glGetUniformLocation(my_shader_program, "uProjectionMatrix");
// send our projection matrix to the shader
glUniformMatrix4fv(my_projection_ref, 1, GL_FALSE, my_proj_matrix );
//clear the background
glClearColor(0.3, 0.4, 0.4, 1.0);
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
// *now* after that tiny setup, we're ready to draw the best 24 bytes of
// vertex data ever.
// draw the 3 vertices starting at index 0, interpreting them as triangles
glDrawArrays(GL_TRIANGLES, 0, 3);
// now just swap buffers however your window manager lets you
}
And That's it!
... except for the actual
Shaders
I started to get a little tired at this point, so the comments are a bit lacking. Let me know if you'd like anything clarified.
const char* VertexSourceFromSomewhere()
{
return
"#version 330\n"
"layout(location = 0) in vec2 inCoord;\n"
"uniform mat4 uProjectionMatrix;\n"
"void main()\n"
"{\n"
" gl_Position = uProjectionMatrix*(vec4(inCoord, 0, 1.0));\n"
"}\n";
}
const char* FragmentSourceFromSomewhere()
{
return
"#version 330 \n"
"out vec4 outFragColor;\n"
"vec4 DebugMagenta(){ return vec4(1.0, 0.0, 1.0, 1.0); }\n"
"void main() \n"
"{\n"
" outFragColor = DebugMagenta();\n"
"}\n";
}
The Actual Question you asked: Orthographic Projection
As noted, the actual math is just directly from Wikipedia.
void MyOrtho2D(float* mat, float left, float right, float bottom, float top)
{
// this is basically from
// http://en.wikipedia.org/wiki/Orthographic_projection_(geometry)
const float zNear = -1.0f;
const float zFar = 1.0f;
const float inv_z = 1.0f / (zFar - zNear);
const float inv_y = 1.0f / (top - bottom);
const float inv_x = 1.0f / (right - left);
//first column
*mat++ = (2.0f*inv_x);
*mat++ = (0.0f);
*mat++ = (0.0f);
*mat++ = (0.0f);
//second
*mat++ = (0.0f);
*mat++ = (2.0*inv_y);
*mat++ = (0.0f);
*mat++ = (0.0f);
//third
*mat++ = (0.0f);
*mat++ = (0.0f);
*mat++ = (-2.0f*inv_z);
*mat++ = (0.0f);
//fourth
*mat++ = (-(right + left)*inv_x);
*mat++ = (-(top + bottom)*inv_y);
*mat++ = (-(zFar + zNear)*inv_z);
*mat++ = (1.0f);
}
Modern OpenGL is significantly different. You won't be able to just drop in a new function. Read up...
http://duriansoftware.com/joe/An-intro-to-modern-OpenGL.-Chapter-1:-The-Graphics-Pipeline.html
http://www.arcsynthesis.org/gltut/index.html
http://www.opengl-tutorial.org/beginners-tutorials/tutorial-2-the-first-triangle/

Is it possible to use OpenGL point sprites to simulate billboard sprites?

I was trying to set point sprites in OpenGL to change size with distance just as a billboarded sprite would, but I can't get the values in GL_POINT_DISTANCE_ATTENUATION_ARB to do anything useful. Is there a correlation of values to this that would match a given projection? Is what I'm trying to do even possible?
Render code being used:
glPointParameterfARB = (PFNGLPOINTPARAMETERFARBPROC)wglGetProcAddress("glPointParameterfARB");
glPointParameterfvARB = (PFNGLPOINTPARAMETERFVARBPROC)wglGetProcAddress("glPointParameterfvARB");
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
gluPerspective(100.0, 800.0/600.0, 0.1, 10.0);
float quadratic[] = { 5.0f, 0.1f, 10.0f };
glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, quadratic );
float maxSize = 0.0f;
glGetFloatv( GL_POINT_SIZE_MAX_ARB, &maxSize );
if( maxSize > 100.0f ) maxSize = 100.0f;
glPointSize( maxSize );
glPointParameterfARB( GL_POINT_FADE_THRESHOLD_SIZE_ARB, 0.1f );
glPointParameterfARB( GL_POINT_SIZE_MIN_ARB, 0.1f );
glPointParameterfARB( GL_POINT_SIZE_MAX_ARB, maxSize );
glTexEnvf( GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE );
glEnable( GL_POINT_SPRITE_ARB );
glScalef(0.75,1,1);
glTranslatef(0.00,0.0,-1.0);
glScalef(0.5,0.5,0.5);
glRotatef(counter*0.1+0.5,1.0,1.0,0.0);
glBegin( GL_POINTS );
for( int i = 0; i < 100; ++i )
{
glColor4f( i%10*0.1, i/10*0.1, 0.5, 1.0f );
glVertex3f( i%10*0.2-1.0,i/10*0.2-1.0,
((i%10-5)*(i%10-5)+(i/10-5)*(i/10-5))*0.01 );
}
glEnd();
glDisable( GL_POINT_SPRITE_ARB );
Here's how I make my poor man's approach to scaling the point size:
void render() {
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE_ARB);
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glEnable(GL_POINT_SPRITE);
glActiveTexture(GL_TEXTURE0);
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE);
/* Activate shader program here */
/* Send pointSize to shader program */
glBegin(GL_POINTS);
/* Render points here */
glVertex3f(...);
glEnd(GL_POINTS);
}
Vertex shader:
uniform float pointSize;
void main() {
gl_Position = ftransform();
gl_PointSize = pointSize / gl_Position.w;
}
You can do whatever you want in the fragment shader, but you'll have to compute the color, lighting and texturing yourself.
GLSL aside, doing what you want is pretty simple with distance attenuation. Seeing as how the projected size of things decreases quadratically with their distance in perspective projections, you only need to use the quadratic factor.
If you want to use the point size you manually set at a distance of, say, 150 units from the eye, just use 1/(150^2) as the quadratic factor (and zero for the constant and linear factors -- if anything, you may want to use some small number like 0.01 for the constant factor just to avoid potential divisions by zero).
In my experience point size attenuation isn't worth the trouble. You're much better off writing a very simple GLSL vertex shader that sets the point size manually according to some calculation you perform on your own. It took me about half a day to learn from scratch all the GLSL I needed to make this happen.
The GLSL code may be as simple as these few lines:
attribute float psize;
void main()
{
gl_FrontColor = gl_Color;
gl_PointSize = psize;
gl_Position = ftransform();
}
Where psize is the point size parameter the user chooses.
Just have a look in pmviewer.sourceforge.net the code is using point sprites and each point has a own color and size to simulate volume rendering:
The vertex shader is:
vertexShader
// with ATI hardware, uniform variable MUST be used by output
// variables. That's why win_height is used by gl_FrontColor
attribute float a_hsml1;
uniform float win_height;
uniform vec4 cameralocin;
void main()
{
vec4 position=gl_ModelViewMatrix*gl_Vertex;
vec4 cameraloc=gl_ModelViewMatrix*cameralocin;
float d=distance(vec3(cameraloc),vec3(position));
float a_hsml=gl_Normal.x;
float pointSize=win_height*a_hsml/d; // <- point diameter in
//pixels (drops like sqrt(1/r^2))
gl_PointSize=pointSize;
gl_TexCoord[0]=gl_MultiTexCoord0;
gl_Position=ftransform();
gl_FrontColor=vec4(gl_Color.r,gl_Color.g,gl_Color.b,gl_Color.a);
}
pixelShader
uniform sampler2D splatTexture;
void main()
{
vec4 color = gl_Color * texture2D(splatTexture, gl_TexCoord[0].st);
gl_FragColor = color;\n"
}
Just to send particles to gpu:
void PutOneArrayToGPU(unsigned int m_vbo, float *hArray, unsigned int num)
{
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * num, hArray, GL_STATIC_DRAW);
int size = 0;
glGetBufferParameteriv(GL_ARRAY_BUFFER, GL_BUFFER_SIZE, &size);
if ((unsigned)size != (sizeof(float) *num))
{
fprintf(stderr, "WARNING: Pixel Buffer Object allocation failed!\n");
fprintf(stderr, "TurningOff the GPU accelerated rendering\n");
flag_GpuRender=false;
}
return flag_GpuRender;
}
Then render them:
void DrawPointsByGPU()
{
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, m_vboPos);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, m_vboColor);
glColorPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, m_vboHSML);
glNormalPointer( GL_FLOAT, 3*sizeof(float), 0);
glDrawArrays(GL_POINTS, 0, m_numParticles);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
};