I realize that I am not using Glu, nor setting the perspective, but I am using the same normalized device coordinates as McKeeson uses http://www.arcsynthesis.org/gltut/, so I should see the same triangle he gets, only red. I get a black screen and no warnings (I wish they left glDraw functionality for debugging; GL3 is like flying blind until you get out of the smoke!). The code for the drawing is:
module ShaderHub;
import std.stdio;
import std.string;
import derelict.opengl3.gl3;
class ShaderHub{
private bool ok=true;
private GLuint shad=0, vshad=0, fshad=0;
private int voff=0;
private GLuint vbo=0, vao=0;
const float[] v = [ 0.75f, 0.75f, 0.0f, 1.0f,
0.75f, -0.75f, 0.0f, 1.0f,
-0.75f, -0.75f, 0.0f, 1.0f];
public this(){
immutable string vshader = `
#version 330
layout(location = 1) in vec4 pos;
void main(void)
{
gl_Position = pos;
}
`;
immutable string fshader = `
#version 330
void main(void)
{
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
`;
shad=glCreateProgram();
if(shad==0){
writeln("Error: GL did not assigh main shader program id");
ok=false;
}
vshad=glCreateShader(GL_VERTEX_SHADER);
const char *vptr=toStringz(vshader);
glShaderSource(vshad, 1, &vptr, null);
glCompileShader(vshad);
int status, len;
glGetShaderiv(vshad, GL_COMPILE_STATUS, &status);
if(status==GL_FALSE){
glGetShaderiv(vshad, GL_INFO_LOG_LENGTH, &len);
char[] error=new char[len];
glGetShaderInfoLog(vshad, len, null, cast(char*)error);
writeln(error);
ok=false;
}
fshad=glCreateShader(GL_FRAGMENT_SHADER);
const char *fptr=toStringz(fshader);
glShaderSource(fshad, 1, &fptr, null);
glCompileShader(fshad);
glGetShaderiv(vshad, GL_COMPILE_STATUS, &status);
if(status==GL_FALSE){
glGetShaderiv(fshad, GL_INFO_LOG_LENGTH, &len);
char[] error=new char[len];
glGetShaderInfoLog(fshad, len, null, cast(char*)error);
writeln(error);
ok=false;
}
glAttachShader(shad, vshad);
glAttachShader(shad, fshad);
glLinkProgram(shad);
glGetShaderiv(shad, GL_LINK_STATUS, &status);
if(status==GL_FALSE){
glGetShaderiv(shad, GL_INFO_LOG_LENGTH, &len);
char[] error=new char[len];
glGetShaderInfoLog(shad, len, null, cast(char*)error);
writeln(error);
ok=false;
}
glGenVertexArrays(1, &vao);
if(vao<1){
writeln("Error: GL failed to assign vao id");
ok=false;
}
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
if(vbo<1){
writeln("Error: GL failed to assign vbo id");
ok=false;
}
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, v.length * GL_FLOAT.sizeof, &v[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, cast(void*)voff);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
public void draw(){
glUseProgram(shad);
writeln(glGetAttribLocation(shad, "pos"));//prints 1
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindVertexArray(0);
glUseProgram(0);
}
}
Note that the debug writing pos' binding location correctly prints 1. The code setting up SDL and GL is:
import std.stdio;
import derelict.sdl2.sdl;
import derelict.opengl3.gl3;
import EventHub;
import ExposeApp;
pragma(lib, "DerelictUtil.lib");
pragma(lib, "DerelictSDL2.lib");
pragma(lib, "DerelictGL3.lib");
class App{
private ExposeApp funcPtrs;
private EventHub ehub;
private SDL_Window *win;
private SDL_GLContext context;
private int w=600, h=480, fov=55;
private bool running=true;
public this(){
if(!initSDL()){
writeln("Error initializing SDL");
SDL_Quit();
}
initGL();
funcPtrs=new ExposeApp();
funcPtrs.stop=&stopLoop;
funcPtrs.grabMouse=&grabMouse;
funcPtrs.releaseMouse=&releaseMouse;
ehub=new EventHub(funcPtrs);
while(running){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
ehub.tick();
SDL_GL_SwapWindow(win);
}
SDL_GL_DeleteContext(context);
SDL_DestroyWindow(win);
SDL_Quit();
}
private void stopLoop(){
running=false;
}
private void grabMouse(){
SDL_ShowCursor(SDL_DISABLE);
SDL_SetWindowGrab(win, SDL_TRUE);
}
private void releaseMouse(){
SDL_ShowCursor(SDL_ENABLE);
SDL_SetWindowGrab(win, SDL_FALSE);
}
private bool initSDL(){
if(SDL_Init(SDL_INIT_VIDEO)< 0){
writefln("Error initializing SDL");
SDL_Quit();
return false;
}
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
win=SDL_CreateWindow("3Doodle", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, w, h, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN);
if(!win){
writefln("Error creating SDL window");
SDL_Quit();
return false;
}
context=SDL_GL_CreateContext(win);
SDL_GL_SetSwapInterval(1);
DerelictGL3.reload();
return true;
}
private void initGL(){
resize(w, h);
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glDepthFunc(GL_LEQUAL);
glClearColor(0.0, 0.0, 0.0, 1.0);
glClearDepth(1.0);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
}
private void resize(int w, int h){
//this will contain the makings of the projection matrix, which we go into next tut
glViewport(0, 0, w, h);
}
}
void main(){
try{
DerelictSDL2.load();
}catch(Exception e){
writeln("Error loading SDL2 lib");
}
try{
DerelictGL3.load();
}catch(Exception e){
writeln("Error loading GL3 lib");
}
App a=new App();
}
If anyone has some Derelict3 openGL3 code that actually displays something on screen and were willing to share, because I have googled up and down and can't find any.
Comment to David:
As I said, until I can get something on screen I am flying dark, so what's the point in extending that darkness to cover model loading? I am after the "BASIC" template here; I already have a model loader for the work in C#. The code you linked to is Derelict 1 and doesn't require openGL3, which from what I read, demands vao (hence the reason for their inclusion). But even without that reason, to be able to bind vbo to attribute locations in the vertex shader at initialization, rather than make the glEnableVertexAttribArray plus the glVertexAttribPointer call for every loop for every vbo drawn is a function saving. Culling isn't the problem I checked. So I guess I am still waiting a considered answer!
PS. OK, I am sorry about not clicking the answered button, I didn't realize there was such a thing. I have gone back through old posts and corrected. But your answer to this question Dav1d misses the mark.
Sorry, but this code makes me cry.
I guess this is the first example of the tutorial (according to the shader), my D solution with Derelict2 (also not the best code). Btw, you should disable resizing, this doesn't work most of the time and makes things just harder, especially for a beginner.
I recommend you not to use VAOs, first of all, they bring you nearly no performance boost, make your application harder to maintain and it conflicts with the DSA approach.
Furthermore I also recommend you use some kind of wrapper for the OpenGL API, at least for loading meshes (the Tutorial uses its own helper, if you wanna reimplement it, you have lots of work!). You could use gljm, a library that I wrote, this works pretty well for meshes in the .obj format (it's able to load the sponza scene), also for non-complex meshes in the ply format.
Later on you also need a vector/matrix/quaternion math library, the tutorial uses glm (which you can't really port to D, since it's a C++ template library). gl3n could be an alternative (also written by me).
PS: Please accept answers when you ask questions, not just because I am a reputation-whore, but also so that people can see the best answer immediately (you should also do this with your older questions).
PS²: Maybe your problem is just caused by enabling face-culling.
Found the answer thanks to David at DMD site. The &v in glBufferData(GL_ARRAY_BUFFER, v.length * GL_FLOAT.sizeof, &v, GL_STATIC_DRAW); needs to be changed to a &v[0].
For starters, I don't believe you ever call your draw function (unless that's done via ehub.tick().
Related
I would like to work through the OpenGL Red Book, The OpenGL Programming Guide, 8th edition, using Xcode on Mac OS X.
I am unable to run the first code example, triangles.cpp. I have tried including the GLUT and GL frameworks that come with Xcode and I have searched around enough to see that I am not likely to figure this out on my own.
Assuming that I have a fresh installation of Mac OS X, and I have freshly installed Xcode with Xcode command-line tools, what are the step-by-step instructions to be able to run triangles.cpp in that environment?
Unlike this question, my preference would be not to use Cocoa, Objective-C or Swift. My preference would be to stay in C++/C only. An answer is only correct if I can follow it step-by-step and end up with a running triangles.cpp program.
My preference is Mac OS X 10.9, however a correct answer can assume 10.9, 10.10 or 10.11.
Thank you.
///////////////////////////////////////////////////////////////////////
//
// triangles.cpp
//
///////////////////////////////////////////////////////////////////////
#include <iostream>
using namespace std;
#include "vgl.h"
#include "LoadShader.h"
enum VAO_IDs { Triangles, NumVAOs };
enum Buffer_IDs { ArrayBuffer, NumBuffers };
enum Attrib_IDs { vPosition = 0 };
GLuint VAOs[NumVAOs];
GLuint Buffers[NumBuffers];
const GLuint NumVertices = 6;
//---------------------------------------------------------------------
//
// init
//
void
init(void)
{
glGenVertexArrays(NumVAOs, VAOs);
glBindVertexArray(VAOs[Triangles]);
GLfloat vertices[NumVertices][2] = {
{ -0.90, -0.90 }, // Triangle 1
{ 0.85, -0.90 },
{ -0.90, 0.85 },
{ 0.90, -0.85 }, // Triangle 2
{ 0.90, 0.90 },
{ -0.85, 0.90 }
};
glGenBuffers(NumBuffers, Buffers);
glBindBuffer(GL_ARRAY_BUFFER, Buffers[ArrayBuffer]);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices),
vertices, GL_STATIC_DRAW);
ShaderInfo shaders[] = {
{ GL_VERTEX_SHADER, "triangles.vert" },
{ GL_FRAGMENT_SHADER, "triangles.frag" },
{ GL_NONE, NULL }
};
GLuint program = LoadShaders(*shaders);
glUseProgram(program);
glVertexAttribPointer(vPosition, 2, GL_FLOAT,
GL_FALSE, 0, BUFFER_OFFSET(0));
glEnableVertexAttribArray(vPosition);
}
//---------------------------------------------------------------------
//
// display
//
void
display(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glBindVertexArray(VAOs[Triangles]);
glDrawArrays(GL_TRIANGLES, 0, NumVertices);
glFlush();
}
//---------------------------------------------------------------------
//
// main
//
int
main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(512, 512);
glutInitContextVersion(4, 3);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutCreateWindow(argv[0]);
glewExperimental = GL_TRUE;
if (glewInit()) {
cerr << "Unable to initialize GLEW ... exiting" << endl;
exit(EXIT_FAILURE);
}
init();
glutDisplayFunc(display);
glutMainLoop();
}
Edit 1: In response to the first comment, here is the naive effort.
Open Xcode 5.1.1 on Mac OS X 10.9.5
Create a new C++ Command-line project.
Paste over the contents of main.cpp with the contents of triangles.cpp.
Click on the project -> Build Phases -> Link Binary with Libraries
Add OpenGL.framework and GLUT.framework
Result: "/Users/xxx/Desktop/Triangles/Triangles/main.cpp:10:10: 'vgl.h' file not found"
Edit 2: Added the vgh translation unit and LoadShaders translation unit, also added libFreeGlut.a and libGlew32.a to my projects compilation/linking. Moved all of the OpenGL Book's Include contents to my projects source directory. Had to change several include statements to use quoted includes instead of angled includes. It feels like this is closer to working but it is unable to find LoadShader.h. Note that the translation unit in the OpenGL download is called LoadShaders (plural). Changing triangles.cpp to reference LoadShaders.h fixed the include problem but the contents of that translation unit don't seem to match the signatures of whats being called from triangles.cpp.
There are some issues with the source and with the files in oglpg-8th-edition.zip:
triangles.cpp uses non-standard GLUT functions that aren't included in glut, and instead are only part of the freeglut implementation (glutInitContextVersion and glutInitContextProfile). freeglut doesn't really support OS X and building it instead relies on additional X11 support. Instead of telling you how to do this I'm just going to modify the source to build with OS X's GLUT framework.
The code depends on glew, and the book's source download apparently doesn't include a binary you can use, so you'll need to build it for yourself.
Build GLEW with the following commands:
git clone git://git.code.sf.net/p/glew/code glew
cd glew
make extensions
make
Now:
Create a C++ command line Xcode project
Set the executable to link with the OpenGL and GLUT frameworks and the glew dylib you just built.
Modify the project "Header Search Paths" to include the location of the glew headers for the library you built, followed by the path to oglpg-8th-edition/include
Add oglpg-8th-edition/lib/LoadShaders.cpp to your xcode project
Paste the triangles.cpp source into the main.cpp of your Xcode project
Modify the source: replace #include "vgl.h" with:
#include <GL/glew.h>
#include <OpenGL/gl3.h>
#include <GLUT/glut.h>
#define BUFFER_OFFSET(x) ((const void*) (x))
Also make sure that the typos in the version of triangle.cpp that you include in your question are fixed: You include "LoadShader.h" when it should be "LoadShaders.h", and LoadShaders(*shaders); should be LoadShaders(shaders). (The code printed in my copy of the book doesn't contain these errors.)
Delete the calls to glutInitContextVersion and glutInitContextProfile.
Change the parameter to glutInitDisplayMode to GLUT_RGBA | GLUT_3_2_CORE_PROFILE
At this point the code builds, links, and runs, however running the program displays a black window for me instead of the expected triangles.
about fixing the black window issue as mentioned in Matthew and Bames53 comments
Follow bames53's answer
Define shader as string
const char *pTriangleVert =
"#version 410 core\n\
layout(location = 0) in vec4 vPosition;\n\
void\n\
main()\n\
{\n\
gl_Position= vPosition;\n\
}";
const char *pTriangleFrag =
"#version 410 core\n\
out vec4 fColor;\n\
void\n\
main()\n\
{\n\
fColor = vec4(0.0, 0.0, 1.0, 1.0);\n\
}";
OpenGl 4.1 supported on my iMac so i change version into 410
ShaderInfo shaders[] = {
{ GL_VERTEX_SHADER, pTriangleVert},
{ GL_FRAGMENT_SHADER, pTriangleFrag },
{ GL_NONE, NULL }
};
Modify the ShaderInfo struct slightly
change
typedef struct {
GLenum type;
const char* filename;
GLuint shader;
} ShaderInfo;
into
typedef struct {
GLenum type;
const char* source;
GLuint shader;
} ShaderInfo;
Modify loadShader function slightly
comment the code about reading shader from file
/*
const GLchar* source = ReadShader( entry->filename );
if ( source == NULL ) {
for ( entry = shaders; entry->type != GL_NONE; ++entry ) {
glDeleteShader( entry->shader );
entry->shader = 0;
}
return 0;
}
glShaderSource( shader, 1, &source, NULL );
delete [] source;*/
into
glShaderSource(shader, 1, &entry->source, NULL);
you'd better turning on DEBUG in case some shader compiling errors
you can use example from this link. It's almost the same. It uses glfw instead of glut.
http://www.tomdalling.com/blog/modern-opengl/01-getting-started-in-xcode-and-visual-cpp/
/*
main
Copyright 2012 Thomas Dalling - http://tomdalling.com/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//#include "platform.hpp"
// third-party libraries
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
// standard C++ libraries
#include <cassert>
#include <iostream>
#include <stdexcept>
#include <cmath>
// tdogl classes
#include "Program.h"
// constants
const glm::vec2 SCREEN_SIZE(800, 600);
// globals
GLFWwindow* gWindow = NULL;
tdogl::Program* gProgram = NULL;
GLuint gVAO = 0;
GLuint gVBO = 0;
// loads the vertex shader and fragment shader, and links them to make the global gProgram
static void LoadShaders() {
std::vector<tdogl::Shader> shaders;
shaders.push_back(tdogl::Shader::shaderFromFile("vertex-shader.txt", GL_VERTEX_SHADER));
shaders.push_back(tdogl::Shader::shaderFromFile("fragment-shader.txt", GL_FRAGMENT_SHADER));
gProgram = new tdogl::Program(shaders);
}
// loads a triangle into the VAO global
static void LoadTriangle() {
// make and bind the VAO
glGenVertexArrays(1, &gVAO);
glBindVertexArray(gVAO);
// make and bind the VBO
glGenBuffers(1, &gVBO);
glBindBuffer(GL_ARRAY_BUFFER, gVBO);
// Put the three triangle verticies into the VBO
GLfloat vertexData[] = {
// X Y Z
0.0f, 0.8f, 0.0f,
-0.8f,-0.8f, 0.0f,
0.8f,-0.8f, 0.0f,
};
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexData), vertexData, GL_STATIC_DRAW);
// connect the xyz to the "vert" attribute of the vertex shader
glEnableVertexAttribAxrray(gProgram->attrib("vert"));
glVertexAttribPointer(gProgram->attrib("vert"), 3, GL_FLOAT, GL_FALSE, 0, NULL);
// unbind the VBO and VAO
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
// draws a single frame
static void Render() {
// clear everything
glClearColor(0, 0, 0, 1); // black
glClear(GL_COLOR_BUFFER_BIT);
// bind the program (the shaders)
glUseProgram(gProgram->object());
// bind the VAO (the triangle)
glBindVertexArray(gVAO);
// draw the VAO
glDrawArrays(GL_TRIANGLES, 0, 3);
// unbind the VAO
glBindVertexArray(0);
// unbind the program
glUseProgram(0);
// swap the display buffers (displays what was just drawn)
glfwSwapBuffers(gWindow);
}
void OnError(int errorCode, const char* msg) {
throw std::runtime_error(msg);
}
// the program starts here
void AppMain() {
// initialise GLFW
glfwSetErrorCallback(OnError);
if(!glfwInit())
throw std::runtime_error("glfwInit failed");
// open a window with GLFW
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
gWindow = glfwCreateWindow((int)SCREEN_SIZE.x, (int)SCREEN_SIZE.y, "OpenGL Tutorial", NULL, NULL);
if(!gWindow)
throw std::runtime_error("glfwCreateWindow failed. Can your hardware handle OpenGL 3.2?");
// GLFW settings
glfwMakeContextCurrent(gWindow);
// initialise GLEW
glewExperimental = GL_TRUE; //stops glew crashing on OSX :-/
if(glewInit() != GLEW_OK)
throw std::runtime_error("glewInit failed");
// print out some info about the graphics drivers
std::cout << "OpenGL version: " << glGetString(GL_VERSION) << std::endl;
std::cout << "GLSL version: " << glGetString(GL_SHADING_LANGUAGE_VERSION) << std::endl;
std::cout << "Vendor: " << glGetString(GL_VENDOR) << std::endl;
std::cout << "Renderer: " << glGetString(GL_RENDERER) << std::endl;
// make sure OpenGL version 3.2 API is available
if(!GLEW_VERSION_3_2)
throw std::runtime_error("OpenGL 3.2 API is not available.");
// load vertex and fragment shaders into opengl
LoadShaders();
// create buffer and fill it with the points of the triangle
LoadTriangle();
// run while the window is open
while(!glfwWindowShouldClose(gWindow)){
// process pending events
glfwPollEvents();
// draw one frame
Render();
}
// clean up and exit
glfwTerminate();
}
int main(int argc, char *argv[]) {
try {
AppMain();
} catch (const std::exception& e){
std::cerr << "ERROR: " << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
I have adapted the project for MAC here:
https://github.com/badousuan/openGLredBook9th
The project can build successfully and most demo can run as expected. However the original code is based on openGL 4.5,while MAC only support version 4.1,some new API calls may fail. If some target not work well, you should consider this version issue and make some adaptation
I use the code from this tutorial: http://antongerdelan.net/opengl/hellotriangle.html, and it works on my mac.
Here is the code I run.
#include <GL/glew.h> // include GLEW and new version of GL on Windows
#include <GLFW/glfw3.h> // GLFW helper library
#include <stdio.h>
int main() {
// start GL context and O/S window using the GLFW helper library
if (!glfwInit()) {
fprintf(stderr, "ERROR: could not start GLFW3\n");
return 1;
}
// uncomment these lines if on Apple OS X
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow* window = glfwCreateWindow(640, 480, "Hello Triangle", NULL, NULL);
if (!window) {
fprintf(stderr, "ERROR: could not open window with GLFW3\n");
glfwTerminate();
return 1;
}
glfwMakeContextCurrent(window);
// start GLEW extension handler
glewExperimental = GL_TRUE;
glewInit();
// get version info
const GLubyte* renderer = glGetString(GL_RENDERER); // get renderer string
const GLubyte* version = glGetString(GL_VERSION); // version as a string
printf("Renderer: %s\n", renderer);
printf("OpenGL version supported %s\n", version);
// tell GL to only draw onto a pixel if the shape is closer to the viewer
glEnable(GL_DEPTH_TEST); // enable depth-testing
glDepthFunc(GL_LESS); // depth-testing interprets a smaller value as "closer"
/* OTHER STUFF GOES HERE NEXT */
float points[] = {
0.0f, 0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, -0.5f, 0.0f
};
GLuint vbo = 0; // vertex buffer object
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 9 * sizeof(float), points, GL_STATIC_DRAW);
GLuint vao = 0; // vertex array object
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
const char* vertex_shader =
"#version 400\n"
"in vec3 vp;"
"void main() {"
" gl_Position = vec4(vp, 1.0);"
"}";
const char* fragment_shader =
"#version 400\n"
"out vec4 frag_colour;"
"void main() {"
" frag_colour = vec4(0.5, 0.0, 0.5, 1.0);"
"}";
GLuint vs = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs, 1, &vertex_shader, NULL);
glCompileShader(vs);
GLuint fs = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fs, 1, &fragment_shader, NULL);
glCompileShader(fs);
GLuint shader_programme = glCreateProgram();
glAttachShader(shader_programme, fs);
glAttachShader(shader_programme, vs);
glLinkProgram(shader_programme);
while(!glfwWindowShouldClose(window)) {
// wipe the drawing surface clear
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(shader_programme);
glBindVertexArray(vao);
// draw points 0-3 from the currently bound VAO with current in-use shader
glDrawArrays(GL_TRIANGLES, 0, 3);
// update other events like input handling
glfwPollEvents();
// put the stuff we've been drawing onto the display
glfwSwapBuffers(window);
}
// close GL context and any other GLFW resources
glfwTerminate();
return 0;
}
I'm attempting to draw a single large triangle in a window in OpenGL. My program compiles and runs, but I get just a black screen in my window.
I've checked and double-checked multiple tutorials and it seems like my steps are correct... Am I missing something obvious?
Here is the program in its entirety:
#include <stdlib.h>
#include <stdio.h>
#include <GL/glew.h>
#include <GLUT/glut.h>
GLuint VBO;
struct vector {
float _x;
float _y;
float _z;
vector() { }
vector(float x, float y, float z) { _x = x; _y = y; _z = z; }
};
void render()
{
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(0);
glutSwapBuffers();
}
void create_vbo()
{
vector verts[3];
verts[0] = vector(-1.0f, -1.0f, 0.0f);
verts[1] = vector(1.0f, -1.0f, 0.0f);
verts[2] = vector(0.0f, 1.0f, 0.0f);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGBA);
glutInitWindowSize(1024, 768);
glutInitWindowPosition(100, 100);
glutCreateWindow("Triangle Test");
glutDisplayFunc(render);
glewInit();
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
create_vbo();
glutMainLoop();
return 0;
}
Update: It turns out that drawing this way without a "program" (that is, compiled shader files) produces undefined behavior (the newer your graphics card, the more likely it is to work, however).
Because my card is right on the edge and only supports OpenGL 2.1, it was a little difficult to find an appropriate shader example that would work -- seems like there are many different tutorials out there written at different stages in the evolution of OpenGL.
My vertex shader (entire file):
void main()
{
gl_Position = ftransform();
}
My fragment shader (entire file):
void main()
{
gl_FragColor = vec4(0.4,0.4,0.8,1.0);
}
I used the example LoadShaders function from this OpenGL Tutorial Site to create the program, and now, I, too, can see the triangle!
(Thanks to #chbaker0 for pointing me in the right direction.)
I do not know if this will help you or not but in your create_vbo() function where you have:
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
try this instead:
glBufferData( GL_ARRAY_BUFFER, sizeof( verts[0] * 3 ), &verts[0], GL_STATIC_DRAW );
And after this function call add in this function call to the end of your create_vbo() function
// This MUST BE LAST! Used to Stop The Buffer!
glBindBuffer( GL_ARRAY_BUFFER, 0 );
It is hard for me to see your error. In my projects I do have some vbos, but I am also using vaos as well. My code is able to working in OpenGL 2.0 - 4.5 but for the older versions there is a split in logic because of the deprecated functions within the API. I also do not use glut. I hope this helps.
The other thing I noticed too is did you pay attention to your vertex winding order? Meaning are they being used by OpenGL in a CCW order or CW order? Is back face culling turned on or off? There are a lot of elements to consider when setting up and configuring an OpenGL context. It has been a while since I worked with older versions of OpenGL but I do know that once you start working with a specific version or newer you will have to supply your own model view projection matrix, just something to consider.
The issue I ran into was using pipeline features without defining a shader program. The spec says this should work, but on my graphics card it did not did. (See my update in the question for more specifics).
Thanks to all the commenters for nudging me in the right direction.
I have problem with openGL debugging. I find that a lot of the time, OpenGL will show you it failed by not drawing anything. Every time code looks fine but it is not drawing anything on GL window.
For e.g consider the below code.I write it to draw the cube but it is not drawing anything and i am unable to find the cause.
========================================================
// cube_vertex_array.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include <glut.h>
static GLfloat vertex[]=
{
100.0,100.0,0.0,
400.0,100.0,0.0,
400.0,400.0,0.0,
100.0,400.0,0.0,
100.0,100.0,-300.0,
400.0,100.0,-300.0,
400.0,400.0,-300.0,
100.0,400.0,-300.0
};
static GLfloat color[]=
{
1.0,0.0,0.0,
0.0,1.0,0.0,
0.0,0.0,1.0,
1.0,1.0,0.0,
1.0,0.0,1.0,
0.0,1.0,1.0
};
static GLubyte frontIndices[] = {0,1,2,3};
static GLubyte leftIndices[] = {1,5,6,2};
static GLubyte backIndices[] = {4,7,6,5};
static GLubyte rightIndices[] = {0,3,7,4};
static GLubyte topIndices[] = {3,2,6,7};
static GLubyte bottomIndices[] = {0,4,5,1};
void init(void)
{
glClearColor(0.0,0.0,0.0,0.0); //Set default background color to black.
glClearDepth(2.0); //Set the depth level for clearing depth buffer.
glShadeModel(GL_FLAT); //Set the shading model to FLAT
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Clear the color and depth buffer.
}
void Display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Clear the color and depth buffer.
glColor3f(1.0,0.0,0.0);
//glBegin(GL_LINE_STRIP);
// glVertex3f(0.0,0.0,0.0);
// glVertex3f(200.0,100.0,0.0);
//glEnd();
glEnableClientState(GL_VERTEX_ARRAY); //Enable vertex array.
glEnableClientState(GL_COLOR_ARRAY); //Enable vertex array color.
glColorPointer(3,GL_FLOAT,0,color); //Specify the array for colors.
glVertexPointer(3,GL_FLOAT,0,vertex); //Specify the array for vertex.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,frontIndices); //Draw front face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,leftIndices); //Draw left face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,backIndices); //Draw back face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,rightIndices); //Draw right face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,topIndices); //Draw top face.
glDrawElements(GL_QUADS,4,GL_UNSIGNED_BYTE,bottomIndices); //Draw bottom face.
glutSwapBuffers(); //Swap the buffers.
}
void Reshape(int w,int h)
{
glViewport(0.0,(GLsizei)w,0.0,(GLsizei)h); //Set the viewport according to new window size.
glMatrixMode(GL_PROJECTION); //Set matrix mode to projection.
glLoadIdentity(); //Replace the top matrix in the stack to the identity matrix.
gluOrtho2D(0.0,(GLdouble)w,0.0,(GLdouble)h); //Set the orthographic projection.
glMatrixMode(GL_MODELVIEW); //Set matrix mode to modelview.
}
int main(int argc, char **argv)
{
glutInit(&argc,argv); //Initialize the glut.
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); //Set display mode and also enable double buffering.
glutInitWindowSize(500,500); //Set the initial window size.
glutCreateWindow("Cube"); //Create the window and also assign name to it.
init(); //Initialize the app.
glutDisplayFunc(Display); //Register the Display function.
glutReshapeFunc(Reshape); //Register the Reshape function.
glutMainLoop(); //Start the main loop.
return 0;
}
You have put GL_UNSIGNED_BYTE as the type parameter in glDrawElements(). This will cause openGL to interpret the array of indices you throw in as one byte per index. You should use GL_UNSIGNED_INT here instead.
Here's the working code based on the code your provided (I did port it to java though):
import java.nio.ByteBuffer;
import org.lwjgl.BufferUtils;
import org.lwjgl.LWJGLException;
import org.lwjgl.opengl.Display;
import org.lwjgl.opengl.DisplayMode;
import static org.lwjgl.opengl.GL11.*;
public class GLTest {
public static void main(String[] args) {
try {
Display.create();
Display.setDisplayMode(new DisplayMode(500, 500));
Display.setResizable(true);
//the same arrays as the ones you specified.
float[] vertices = new float[]{100.0f,100.0f,0.0f,
400.0f,100.0f,0.0f,
400.0f,400.0f,0.0f,
100.0f,400.0f,0.0f,
100.0f,100.0f,-300.0f,
400.0f,100.0f,-300.0f,
400.0f,400.0f,-300.0f,
100.0f,400.0f,-300.0f};
float[] color = new float[]{1,0,0,
0,1,0,
0,0,1,
1,1,0,
1,0,1,
0,1,1};
int[] frontIndices = new int[]{0, 1, 2, 3};
//JWJGL bookkeeping..
ByteBuffer vertexBuffer = BufferUtils.createByteBuffer(vertices.length * 4);
ByteBuffer colourBuffer = BufferUtils.createByteBuffer(color.length * 4);
for(int i = 0; i < vertices.length; i++) {
vertexBuffer.putFloat(vertices[i]);
}
vertexBuffer.rewind();
for(int i = 0; i < color.length; i++) {
colourBuffer.putFloat(color[i]);
}
colourBuffer.rewind();
ByteBuffer indexBuffer = BufferUtils.createByteBuffer(4 * frontIndices.length);
for(int i = 0; i < frontIndices.length; i++) {
indexBuffer.putInt(frontIndices[i]);
}
indexBuffer.rewind();
//back you your code
glClearColor(1,1,1,1);
glShadeModel(GL_SMOOTH);
while(!Display.isCloseRequested()) {
glViewport(0, 0, Display.getWidth(), Display.getHeight());
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0,Display.getWidth(), 0, Display.getHeight(), -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(3, GL_FLOAT, 0, colourBuffer);
glVertexPointer(3, GL_FLOAT, 0, vertexBuffer);
glDrawElements(GL_QUADS, 4, GL_UNSIGNED_INT, indexBuffer);
Display.update();
Display.sync(60);
}
} catch (LWJGLException e) {
e.printStackTrace();
}
}
}
Which results in:
use tools like glTrace / glIntercept (to look at OpenGL call trace), gDebugger (to visualize textures, shaders, OGL state etc.)
There is a list of OpenGL debugging tools here : https://www.opengl.org/wiki/Debugging_Tools
Also your code is using the old fixed pipeline which is considered deprecated since OpenGL 3.3, so i would recommend either not putting the tag "opengl-3" on your questions, or using opengl 3.3 core context and learning the "modern" OpenGL (which is more powerful and more difficult to learn but makes you understand how the GPU works).
I'm trying to get a small triangle to display.
Here is my initialization code:
void PlayerInit(Player P1)
{
glewExperimental = GL_TRUE;
glewInit();
//initialize buffers needed to draw the player
GLuint vao;
GLuint buffer;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &buffer);
//bind the buffer and vertex objects
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
//Set up a buffer to hold 6 floats for position, and 9 floats for color
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*18, NULL, GL_STATIC_DRAW);
//push the vertices of the player into the buffer
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float)*9, CalcPlayerPoints(P1.GetPosition()));
//push the color of each player vertex into the buffer
glBufferSubData(GL_ARRAY_BUFFER, sizeof(float)*9, sizeof(float)*9, CalcPlayerColor(1));
//create and compile the vertex/fragment shader objects
GLuint vs = create_shader("vshader.glsl" ,GL_VERTEX_SHADER);
GLuint fs = create_shader("fshader.glsl" ,GL_FRAGMENT_SHADER);
//create a program object and link the shaders
GLuint program = glCreateProgram();
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
//error checking for linking
GLint linked;
glGetProgramiv(program, GL_LINK_STATUS, &linked);
if(!linked)
{
std::cerr<< "Shader program failed to link" <<std::endl;
GLint logSize;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &logSize);
char* logMsg = new char[logSize];
glGetProgramInfoLog(program, logSize, NULL, logMsg);
std::cerr<< logMsg << std::endl;
delete [] logMsg;
exit(EXIT_FAILURE);
}
glUseProgram(program);
//create attributes for color and position to pass to shaders
//enable each attribute
GLuint Pos = glGetAttribLocation( program, "vPosition");
glEnableVertexAttribArray(Pos);
GLuint Col = glGetAttribLocation( program, "vColor");
glEnableVertexAttribArray(Col);
//set a pointer at the proper offset into the buffer for each attribute
glVertexAttribPointer(Pos, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) (sizeof(float)*0));
glVertexAttribPointer(Col, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) (sizeof(float)*9));
}
I don't have much experience writing shader linkers, so I think that is where the problem might be. I have some error checking in the shader loader, and nothing comes up. So I think that is fine.
Next I have my display and main function:
//display function for the game
void GameDisplay( void )
{
//set the background color
glClearColor(1.0, 0.0, 1.0, 1.0);
//clear the screen
glClear(GL_COLOR_BUFFER_BIT);
//Draw the Player
glDrawArrays(GL_TRIANGLES, 0, 3);
glutSwapBuffers();
}
//main function
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(500, 500);
glutCreateWindow("Asteroids");
Player P1 = Player(0.0, 0.0);
PlayerInit(P1);
glutDisplayFunc(GameDisplay);
glutMainLoop();
return 0;
}
Vertex shader :
attribute vec3 vPosition;
attribute vec3 vColor;
varying vec4 color;
void main()
{
color = vec4(vColor, 1.0);
gl_Position = vec4(vPosition, 1.0);
}
Fragment shader
varying vec4 color;
void main()
{
gl_FragColor = color;
}
That's all of the relevant code. CalcPlayerPoints just returns a float array of size 9 to hold the triangle coordinates. CalcPlayerColor does something similar.
One last problem that may help with diagnosing the problem is that whenever I try to exit the program by closing the window of the application, I get a breakpoint in the glutmainloop, however if I close the console window, it exits fine.
Edit: I added the shaders for reference.
Edit: I am using opengl version 3.1
Without the shaders, we can't say if the faulty code isn't GLSL (bad vertices transformations, etc.)
Have you tried checking glGetError to see if the problem doesn't come from your initialization code ?
Maybe try to set the output of your fragment shader to, say, vec4(1.0, 0.0, 0.0, 1.0) to check if its normal output is ill-formed.
Your last problem seems to unveil an undefined behavior, like bad memory allocation/deallocation, which may take place in your Player class (by the way consider passing the object as a reference in your initialization code, because it may at the moment trigger a shallow copy and then a double-free of some pointer).
Turns out there was something wrong with how I was returning the array of vertices from the function used to compute them. The rest of the code worked fine after that fix.
Hi I am using VBO to load image texture and then draw it in C++. VBO id generate and bind and draw occurs here
void ViewManager::render(){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if(decompressTileImage->tileTexure == 0)
{
loadTexture(decompressTileImage);
glGenBuffers(1,&decompressTileImage->VBOId);
glBindBuffer(GL_ARRAY_BUFFER,decompressTileImage->VBOId);
glBufferData(GL_ARRAY_BUFFER,sizeof(*(this->tileCoordList))+sizeof(*(this->tileTextureCoordList)),0,GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER,0,sizeof(*(this->tileCoordList)),this->tileCoordList);
glBufferSubData(GL_ARRAY_BUFFER,sizeof(*(this->tileCoordList)),sizeof(*(this->tileTextureCoordList)),this->tileTextureCoordList);
}
else
{
glBindBuffer(GL_ARRAY_BUFFER,decompressTileImage->VBOId);
glBindTexture(GL_TEXTURE_2D, decompressTileImage->tileTexure);
}
glColor4f(1.0f, 1.0f, 1.0f, textureAlpha);
if(textureAlpha < 1.0)
{
textureAlpha = textureAlpha + .03;
this->tiledMapView->renderNow();
}
glTexCoordPointer(3, GL_FLOAT, 0, (void*)sizeof(*(this->tileCoordList)));
glVertexPointer(3, GL_FLOAT, 0, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindBuffer(GL_ARRAY_BUFFER,0);
glDisable(GL_BLEND);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
}
This function is in a class named MapTile. MapTile is created 35 times for 35 images downloaded from the internet. And then a thread calls this method 35 times for 35 MapTile object and keeps doing it. That is why I first check if the method is called for the first time so that I can load data and generate VBO id only once for each MapTile object. I check this with if(decompressTileImage->tileTexure == 0) this line. and then each time I just bind the vbo id to draw. No need to load the data again.
Here decompressTileImage is a TextureImageInfo class. The implementation is
#include "TextureImageInfo.h"
TextureImageInfo::TextureImageInfo(unsigned char * image,GLuint format,int texWidth,int texHeight,int imageWidth,int imageHeight,float tex_x,float tex_y)
{
// TODO Auto-generated constructor stub
this->format = format;
this->image = image;
this->imageHeight = imageHeight;
this->imageWidth = imageWidth;
this->texHeight = texHeight;
this->texWidth = texWidth;
this->tileTexure = 0;
this->VBOId = 0;
this->time = 0;
}
TextureImageInfo::~TextureImageInfo()
{
if(VBOId!=0)
glDeleteBuffers(1,&VBOId);
}
It draws and does everything fine but crashes when I try to clean up the memory in the destructor of TextureImageInfo class which is given here. I don't understand why. I checked to see if the VBOId is generated and loaded in the memory with the if condition in the destructor too.
As indicated in the comments, OpendGL ES commands should be submitted from the same thread where the context was created.
From the Blackberry docs Parallel processing with OpenGL ES:
It is important to note that each OpenGL ES rendering context targets
a single thread of execution.
If you want to render multiple scenes, you can separate each scene
into its own thread, making sure each thread has its own context