strange opengl rendering stutter - c++

I'm experiencing a strange stutter in my simple opengl (via GLFW3) app. Although vsync is enabled (frame rate is almost steady 60 fps), the motion of the spinning triangle is not always smooth - it's almost like some frames are skipped sometimes. I tried looking at the time difference between consecutive calls to glSwapBuffers(), but those seem pretty consistent.
Am I doing something wrong? Should I use some kind of motion blur filtering to make it appear smoother?
The code:
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <cfloat>
#include <cassert>
#include <minmax.h>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <Windows.h>
#include <GL/glew.h>
#include <gl/GLU.h>
//#include <GL/GL.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#ifdef _WIN32
#pragma warning(disable:4996)
#endif
static int swap_interval;
static double frame_rate;
GLuint LoadShaders(const char * vertex_file_path,const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if(VertexShaderStream.is_open()){
std::string Line = "";
while(getline(VertexShaderStream, Line))
VertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}else{
printf("Impossible to open %s. Are you in the right directory ? Don't forget to read the FAQ !\n", vertex_file_path);
return 0;
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(getline(FragmentShaderStream, Line))
FragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const * VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
if (Result != GL_TRUE)
{
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> VertexShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
}
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const * FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
if (Result != GL_TRUE)
{
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> FragmentShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
}
// Link the program
printf("Linking program\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
if (Result != GL_TRUE)
{
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> ProgramErrorMessage(InfoLogLength+1);
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
printf("%s\n", &ProgramErrorMessage[0]);
}
}
#ifdef _DEBUG
glValidateProgram(ProgramID);
#endif
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
static void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
static void set_swap_interval(GLFWwindow* window, int interval)
{
swap_interval = interval;
glfwSwapInterval(swap_interval);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_SPACE && action == GLFW_PRESS)
set_swap_interval(window, 1 - swap_interval);
}
static bool init(GLFWwindow** win)
{
if (!glfwInit())
exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_COMPAT_PROFILE);
// creating a window using the monitor param will open it full screen
const bool useFullScreen = false;
GLFWmonitor* monitor = useFullScreen ? glfwGetPrimaryMonitor() : NULL;
*win = glfwCreateWindow(640, 480, "", monitor, NULL);
if (!(*win))
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(*win);
GLenum glewError = glewInit();
if( glewError != GLEW_OK )
{
printf( "Error initializing GLEW! %s\n", glewGetErrorString( glewError ) );
return false;
}
//Make sure OpenGL 2.1 is supported
if( !GLEW_VERSION_2_1 )
{
printf( "OpenGL 2.1 not supported!\n" );
return false;
}
glfwMakeContextCurrent(*win);
glfwSetFramebufferSizeCallback(*win, framebuffer_size_callback);
glfwSetKeyCallback(*win, key_callback);
// get version info
const GLubyte* renderer = glGetString (GL_RENDERER); // get renderer string
const GLubyte* version = glGetString (GL_VERSION); // version as a string
printf("Renderer: %s\n", renderer);
printf("OpenGL version supported %s\n", version);
return true;
}
std::string string_format(const std::string fmt, ...) {
int size = 100;
std::string str;
va_list ap;
while (1) {
str.resize(size);
va_start(ap, fmt);
int n = vsnprintf((char *)str.c_str(), size, fmt.c_str(), ap);
va_end(ap);
if (n > -1 && n < size) {
str.resize(n);
return str;
}
if (n > -1)
size = n + 1;
else
size *= 2;
}
return str;
}
int main(int argc, char* argv[])
{
srand(9); // constant seed, for deterministic results
unsigned long frame_count = 0;
GLFWwindow* window;
init(&window);
// An array of 3 vectors which represents 3 vertices
static const GLfloat g_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
};
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// acclocate GPU memory and copy data
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
unsigned int vao = 0;
glGenVertexArrays (1, &vao);
glBindVertexArray (vao);
glEnableVertexAttribArray (0);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer (0, 3, GL_FLOAT, GL_FALSE, 0, 0);
// Create and compile our GLSL program from the shaders
GLuint programID = LoadShaders( "1.vert", "1.frag" );
// Use our shader
glUseProgram(programID);
GLint locPosition = glGetAttribLocation(programID, "vertex");
assert(locPosition != -1);
glm::mat4 world(1.0f);
GLint locWorld = glGetUniformLocation(programID, "gWorld");
assert(locWorld != -1 && "Error getting address (was it optimized out?)!");
glUniformMatrix4fv(locWorld, 1, GL_FALSE, glm::value_ptr(world));
GLenum err = glGetError();
GLint loc = glGetUniformLocation(programID, "time");
assert(loc != -1 && "Error getting uniform address (was it optimized out?)!");
bool isRunning = true;
while (isRunning)
{
static float time = 0.0f;
static float oldTime = 0.0f;
static float fpsLastUpdateTime = 0.0f;
oldTime = time;
time = (float)glfwGetTime();
static std::string fps;
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram (programID);
glUniform1f(loc, time);
glBindVertexArray (vao);
glDrawArrays (GL_TRIANGLES, 0, 3);
glfwSwapBuffers(window);
glfwPollEvents();
isRunning = !glfwWindowShouldClose(window);
float dT = time-oldTime;
if (time-fpsLastUpdateTime > 0.5)
{
static const char* fmt = "frame rate: %.1f frames per second";
glfwSetWindowTitle(window, string_format(fmt, 1.0f/(dT)).c_str());
fpsLastUpdateTime = time;
}
}
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
////////////////////////////////////////
// 1.frag
////////////////////////////////////////
#version 330 core
// Ouput data
out vec3 color;
void main()
{
// Output color = red
color = vec3(1,0,0);
}
//////////////////////////////////////////////
// 1.vert
//////////////////////////////////////////////
#version 330 core
// Input vertex data, different for all executions of this shader.
in vec3 vertex;
uniform mat4 gWorld;
uniform float time;
void main()
{
gl_Position = gWorld * vec4(vertex, 1.0f);
gl_Position.x += sin(time);
gl_Position.y += cos(time)/2.0f;
gl_Position.w = 1.0;
}
OK. I got home and did more testing.
First I tried to disable the V-Sync, but I couldn't! I had to disable the windows' desktop effects (Aero) to be able to do so, and lo and behold - once Aero was disabled, the stutter disappeared (with V-Sync on).
Then I tested it with V-Sync off, and of course, I got much higher frame rate with the occasional expected tearing.
Then I tested it in full screen. The rendering was smooth with Aero and without it.
I couldn't find anyone else who share this problem. Do you think it's a GLFW3 bug? a driver/hardware issue (I have GTS450 with the latest drivers)?
Thank you all for you answers. I learned a lot, but my problem is still unsolved.

It's a strange Windows dwm (Desktop Window Manager) composition mode and glfwSwapBuffers() interaction problem. I didn't got down to the root of the problem yet. But you can workaround the stuttering by doing one of the following:
go fullscreen
disable dwm window composition (see my answer to Linear movement stutter)
enable multi sampling: glfwWindowHint(GLFW_SAMPLES, 4);

Without seeing this stutter problem it is difficult to say what the problem is. But the first impression of your program is ok.
So I guess you observe that a frame once in a while is shown twice. Leading to a very small stutter. This happens usually when you try to output 60 frames on 60Hz Monitor with vsync.
In such a setup you must not miss one vsync period or you will see a stutter, because of the frame shown twice.
On the other hand it is nearly impossible to guarantee this because the scheduler on a windows platforms schedules threads for 15ms(about that I don't know the correct value by heart).
So it is possible that a higher priority thread will use the CPU and your presenting thread is not able to swap the buffers for a new frame in time. When you increase the values e.g. 120 frames on 120 Hz monitor you will see those stutters even more often.
So I don't know any solution how you can prevent this on the windows platform. But If someone else knows I would be happy to know it too.

It's hard to tell without visualizing your problem but unless we are talking about some severe stuttering it's rarely a rendering issue. The motion/physics in your program is handled/processed by the CPU. The way you are implementing your animation, is handled in a way that is solely depended on the CPU.
What this means is that:
Say you are rotating your triangle by a fixed amount every CPU cycle. This is very depended on the time a CPU cycle takes to complete. Things like cpu workload can have huge impact on your screen result (not necessarily though). And it doesn't even take huge CPU occupation to notice a difference. All it takes is a background process to wake up and query for updates. This could result in a 'spike' of which could be observed as a tiny pause in your animation flow (due to the small delay the CPU can cause in your animation cycle). This can be interpreted as a stutter.
Now understanding the above there are a few ways to solve your issue (but in my opinion it doesn't worth investing for what you are trying to do above). You need to find a way to have consistent animation steps (with a small margin for variation).
This is a great article to explore:
http://gafferongames.com/game-physics/fix-your-timestep/
Ultimately most of the methods implemented above will result in a better rendering flow. But still not all of them guarantee physics-rendering precision. Without trying it out myself yet, i would say that one would have to go as far as implementing interpolation in his/her rendering process to guarantee smooth drawing as best as possible.
Now what i wanted to explain to you most, is that stuttering is usually caused by the CPU because it intervenes directly with your way of handling physics. But overall, using time for handling your physics and interpolating inside your rendering cycles is a topic definitely worth to explore.

Related

OpenGL Hello Triangle Errors

I am trying to run the simple shaders to run; however when I try to run the code, I get an error saying "Exception thrown at 0x00000000 in OpenGL_Triangle.exe: 0xC0000005: Access violation executing location 0x00000000." in the separate loadShader file.
I tried running it underneath the GLEW initialization in my main file without using "GLuint LoadShaders(const char* vertex_file_path, const char* fragment_file_path){}" and "return ProgramID;" and defining LoadShaders as a separate unsigned int, and setting the "program" integer equal to the "ProgramID" integer from the loadShader file.
I don't exactly know what to do now.
*Just as a note I am using Visual Studio 2019.
//
// This is my main file (titled: main.cpp)
//
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
using namespace glm;
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <sstream>
#include <string.h>
#include <iostream>
using namespace std;
#include <common/shader.hpp>
// Create and compile our GLSL program from the shaders
GLuint program = LoadShaders("vertex.shader", "fragment.shader");
int main() {
// Initialise GLFW
glewExperimental = true; // Needed for core profile
if (!glfwInit())
{
cout << "Failed to initialize GLFW\n";
return -1;
}
cout << "Hello Triangle \n" ;
glfwWindowHint(GLFW_SAMPLES, 4); // 4x antialiasing
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); // We want OpenGL 3.3
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // We don't want the old OpenGL
// Open a window and create its OpenGL context
GLFWwindow* window; // (In the accompanying source code, this variable is global for simplicity)
window = glfwCreateWindow(500, 500, "Test", NULL, NULL);
if (window == NULL) {
cout << "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n";
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
// Initialize GLEW
glewExperimental = true; // Needed in core profile
if (glewInit() != GLEW_OK) {
cout << "Failed to initialize GLEW from main\n";
return -1;
}
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// An array of 3 vectors which represents 3 vertices
static const GLfloat verts[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
};
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
do {
// Clear the screen. It's not mentioned before Tutorial 02, but it can cause flickering, so it's there nonetheless.
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use our shader
glUseProgram(program);
// Drawing
// 1st attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
}
//
// This is my LoadShaders file (titled: loadShader.cpp)
//
#include <stdio.h>
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <sstream>
using namespace std;
#include <string.h>
#include <stdlib.h>
#include <GL/glew.h>
#include "common/shader.hpp"
GLuint LoadShaders(const char* vertex_file_path, const char* fragment_file_path) {
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if (VertexShaderStream.is_open()) {
std::stringstream sstr;
sstr << VertexShaderStream.rdbuf();
VertexShaderCode = sstr.str();
VertexShaderStream.close();
}
else {
printf("Impossible to open %s. Are you in the right directory ? Don't forget to read the FAQ !\n", vertex_file_path);
getchar();
return 0;
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if (FragmentShaderStream.is_open()) {
std::stringstream sstr;
sstr << FragmentShaderStream.rdbuf();
FragmentShaderCode = sstr.str();
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const* VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer, NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if (InfoLogLength > 0) {
std::vector<char> VertexShaderErrorMessage(InfoLogLength + 1);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const* FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer, NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if (InfoLogLength > 0) {
std::vector<char> FragmentShaderErrorMessage(InfoLogLength + 1);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
// Link the program
printf("Linking program\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if (InfoLogLength > 0) {
std::vector<char> ProgramErrorMessage(InfoLogLength + 1);
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
printf("%s\n", &ProgramErrorMessage[0]);
}
glDetachShader(ProgramID, VertexShaderID);
glDetachShader(ProgramID, FragmentShaderID);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
//
// This is the Shader header file (titled; shader.hpp)
// It was placed int the include directories folder in another folder titled "common"
//
#ifndef SHADER_HPP
#define SHADER_HPP
#include "GL/glew.h"
GLuint LoadShaders(const char* vertex_path, const char* fragment_path);
#endif
GLuint program = LoadShaders("vertex.shader", "fragment.shader");
int main() {
...
Don't try to execute GL commands before you have initialized GLEW. Otherwise you're going to call a NULL function-pointer like glCreateShader() and your process is Going To Have A Bad Time.
Move that LoadShaders() call to after glewInit().

glGetProgramIv claims that shader linking failed, but glGetProgramInfoLog is empty

I am currently attempting to write a basic graphics engine with OpenGl and C++. In doing so, OpenGl is behaving in a very strange way with no apparent explanation. It claims that a shader failed to compile, but then refuses to give any sort of error explaining why. Specifically, when I call glGetProgramiv the returned value is 0, but glGetProgramInfoLog gives an empty string¹.
Here is a minimum complete verifiable example:
#include <SDL2/SDL.h>
#include <GL/glew.h>
#include <sstream>
#include <stdexcept>
#include <fstream>
GLuint loadShader(const std::string &path, GLenum type);
int main(int argc, char *argv[]){
//show window & other housekeeping
SDL_Init(SDL_INIT_EVERYTHING);
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 32);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_Window *window = SDL_CreateWindow("test window", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 800, 600, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
SDL_GLContext glContext = SDL_GL_CreateContext(window);
SDL_GL_MakeCurrent(window, glContext);
GLenum c = glewInit();
if(c != GLEW_OK){
std::ostringstream sout;
sout << "Failed to initialize OpenGl: " << glewGetErrorString(c);
throw std::runtime_error(sout.str());
}
//**** -- BEGIN IMPORTANT BIT -- ****//
//This is where the shaders are loaded
//load shaders
GLuint vertexShader = loadShader("shader.vert", GL_VERTEX_SHADER);
GLuint fragmentShader = loadShader("shader.frag", GL_FRAGMENT_SHADER);
//create & link program
GLuint program = glCreateProgram();
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
//check for errors
GLint success;
glGetProgramiv(program, GL_LINK_STATUS, &success);
if(!success){
GLchar infoLog[512];
GLint size; //gives 0 when checked in debugger
glGetProgramInfoLog(program, 512, &size, infoLog);
throw std::runtime_error(std::string("Failed to link shader program: ") + infoLog);
}
//cleanup
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
//**** -- END IMPORTANT BIT -- ****//
//update loop
bool isClosed = false;
while(!isClosed){
SDL_GL_SwapWindow(window);
SDL_Event e;
while(SDL_PollEvent(&e)){
switch(e.type){
case SDL_QUIT:
isClosed = true;
break;
}
}
}
}
GLuint loadShader(const std::string &path, GLenum type){
//load file
std::ifstream fin(path);
std::string line, total = "";
while(std::getline(fin, line)){
total += (line + "\n");
}
//reformat source into OpenGl's ridiculous format
const char *cArray = total.c_str();
const char **pointerToCArray = &cArray;
//create shader
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, pointerToCArray, nullptr);
glCompileShader(shader);
//check for erros
GLint success;
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
if(!success){
GLchar infoLog[512];
glGetShaderInfoLog(shader, 512, nullptr, infoLog);
throw std::runtime_error(std::string("Failed to compile shader: ") + infoLog);
}
}
MCVE testshader.vert:
#version 330 core
layout (location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position.x, position.y, position.z, 1.0);
}
MCVE testshader.frag:
#version 330 core
out vec4 color;
void main()
{
color = vec4(1.0f, 0.5f, 0.2f, 1.0f);
}
When I run this I get the following output in addition to a brief flash of a window:
terminate called after throwing an instance of 'std::runtime_error'
what(): Failed to link shader program:
Aborted (core dumped)
Extra Notes:
I am using Ubuntu 16.10 x64
I am using a Nvidia GTX 1070 with driver version 375.39
The SDL and glew versions are those installed from the libsdl2-dev and libglew-dev packages respectively. I am using pkg-config to link.
During my research to solve this error I mostly found various forum and StackOverflow posts that found specific errors in other people's code. None of these apply to my code, and after finding this error I compared my code to this supposedly working example. This example also compiles and runs correctly on my computer, which rules out any sort of configuration error.
¹: It is not possible that glGetProgramInfoLog is doing nothing with external consequences and that the infoLog array happened to be allocated with a null-terminator character at its first address because the size return of glGetProgramInfoLog returns 0 even if set to a different value before glGetProgramInfoLog is called.
I have found the issue: a missing return statement in the loadShader() method. This, however, as I experienced when attempting to solve this was not helpful. As such, I recommend to any future person reading this that you find a working example (ie. this one) that does something similar as what you are trying to do and copy/paste progressively smaller parts of its code into yours (or visa-versa) to do a kind of binary search to find your error.

Creating and addressing large buffers in OpenGL (on the scale of GBs)

I was surprised to find my shaders would start reading zeroes out of buffers when addressing higher indices. I'm guessing this has something to do with the precision of the addressing internals in the driver. I don't ever get any out of memory error, shaders just seem to silently stop accessing them. Correct me if I'm wrong but I believe CUDA supports 64 bit pointers and large amounts of memory just fine.
I've built a MWE (below) where I create a buffer one vec4 shy of 2GB. If I hit or go over 2GB the shaders don't write anything even to the first element. Using image_load_store to write to the buffer in a shader only works up to 512MiB. I have much more luck with bindless graphics, which correctly writes to the entire buffer, but am still stuck with a max of 2GB even though I can create a larger buffer and it seems bindless graphics uses 64 bit addressing, so I don't see any reason this limit should exist.
How can I create and use buffers larger than 2GB with OpenGL?
I'm using a GTX Titan (6GB).
//#include <windows.h>
#include <assert.h>
#include <stdio.h>
#include <memory.h>
#include <GL/glew.h>
#include <GL/glut.h>
const char* imageSource =
"#version 440\n"
"uniform layout(rgba32f) imageBuffer data;\n"
"uniform float val;\n"
"void main() {\n"
" imageStore(data, gl_VertexID, vec4(val));\n"
" gl_Position = vec4(0.0);\n"
"}\n";
const char* bindlessSource =
"#version 440\n"
"#extension GL_NV_gpu_shader5 : enable\n"
"#extension GL_NV_shader_buffer_load : enable\n"
"uniform vec4* data;\n"
"uniform float val;\n"
"void main() {\n"
" data[gl_VertexID] = vec4(val);\n"
" gl_Position = vec4(0.0);\n"
"}\n";
GLuint compile(GLenum type, const char* shaderSrc)
{
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, (const GLchar**)&shaderSrc, NULL);
glCompileShader(shader);
int success = 0;
int loglen = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &loglen);
GLchar* log = new GLchar[loglen];
glGetShaderInfoLog(shader, loglen, &loglen, log);
if (!success)
{
printf("%s\n", log);
exit(0);
}
GLuint program = glCreateProgram();
glAttachShader(program, shader);
glLinkProgram(program);
return program;
}
int main(int argc, char** argv)
{
float* check;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutCreateWindow("test");
glewInit();
GLsizeiptr bufferSize = 1024 * 1024 * 1024; //1GB
bufferSize *= 2;
bufferSize -= 16;
GLsizeiptr numFloats = bufferSize/sizeof(float);
GLsizeiptr numVec4s = bufferSize/(sizeof(float)*4);
float testVal = 123.123f;
glEnable(GL_RASTERIZER_DISCARD);
float* dat = new float[numFloats];
memset(dat, 0, bufferSize);
//create a buffer with data
GLuint buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_TEXTURE_BUFFER, buffer);
glBufferData(GL_TEXTURE_BUFFER, bufferSize, NULL, GL_STATIC_DRAW);
//get a bindless address
GLuint64 address;
glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);
//make a texture alias for it
GLuint bufferTexture;
glGenTextures(1, &bufferTexture);
glBindTexture(GL_TEXTURE_BUFFER, bufferTexture);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, buffer); //should be GL_RGBA32F (see update)
glBindImageTextureEXT(0, bufferTexture, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32F); //should be GL_RGBA32F (see update)
//compile the shaders
GLuint imageShader = compile(GL_VERTEX_SHADER, imageSource);
GLuint bindlessShader = compile(GL_VERTEX_SHADER, bindlessSource);
//initialize buffer
glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);
assert(glIsBufferResidentNV(GL_TEXTURE_BUFFER)); //sanity check
//run image_load_store
glUseProgram(imageShader);
glUniform1i(glGetUniformLocation(imageShader, "data"), 0);
glUniform1f(glGetUniformLocation(imageShader, "val"), testVal);
glDrawArrays(GL_POINTS, 0, numVec4s);
glMemoryBarrier(GL_ALL_BARRIER_BITS);
check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
for (GLsizeiptr i = 0; i < numFloats; ++i)
{
if (check[i] != testVal)
{
printf("failed image_load_store: dat[%td] = %f (%fMiB)\n", i, check[i], (double)i*sizeof(float)/1024.0/1024.0);
break;
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER);
//initialize buffer
glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);
assert(glIsBufferResidentNV(GL_TEXTURE_BUFFER)); //sanity check
//run bindless
glUseProgram(bindlessShader);
glProgramUniformui64NV(bindlessShader, glGetUniformLocation(bindlessShader, "data"), address);
glUniform1f(glGetUniformLocation(bindlessShader, "val"), testVal);
glDrawArrays(GL_POINTS, 0, numVec4s);
glMemoryBarrier(GL_ALL_BARRIER_BITS);
check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
for (GLsizeiptr i = 0; i < numFloats; ++i)
{
if (check[i] != testVal)
{
printf("failed bindless: dat[%td] = %f (%fMiB)\n", i, check[i], (double)i*sizeof(float)/1024.0/1024.0);
break;
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER);
return 0;
}
This is the output I get:
> make && ./a.out
g++ -lGL -lGLEW -lglut main.c
failed image_load_store: dat[134217727] = 0.000000 (511.999996MiB)
UPDATE:
Found a mistake. The GL_R32F internal format should be GL_RGBA32F, allows image_load_store to reach the ~2GB mark. The program correctly executes with no output until the size reaches 2GB or more at which point it still fails for both image_load_store and bindless.
GL_MAX_TEXTURE_BUFFER_SIZE is 134217728 for me, which puts the max size at exactly 2GB for RGBA32F. However my question about getting larger than 2GB remains. Sure, I could allocate multiple buffers but that's a bunch of house keeping and overhead I'd prefer not to deal with.
You might need to go vendor-specific; for NVIDIA, you have the following extensions available, allowing you to use 64 bit-sized addresses (and buffer sizes) in the shaders:
https://www.khronos.org/registry/OpenGL/extensions/NV/NV_shader_buffer_load.txt
https://www.khronos.org/registry/OpenGL/extensions/NV/NV_shader_buffer_store.txt
Basically, you can start using pointers inside GLSL with it, and pass them up as 64bit values from the CPU host.
The maximum buffer size is returned by
GLuint64EXT max_shader_buffer_address;
glGetIntegerui64vNV(GL_MAX_SHADER_BUFFER_ADDRESS_NV, &max_shader_buffer_address);
printf("Maximum shader buffer address: %lu\n", max_shader_buffer_address);
on my machine with an RTX 3070, it is 18446744073709551615.

Opengl 3.3 doesn't draw anything. Using GLSL 330 Core

I am following the guides from this site and have stopped on the lesson 2. At first I have tried wiriting my own code but after it didn't work I have simply taken the code from the site. And it still doesn't draw anything besides the glClearColor.
What I have done:
Checked compiling and linking. Works fine
Checked errors. Not sure if I have done it right but seems like everything's allright (I get a 1280 error but I have read that GLEW can cause it and it can be ignored).
Moved the glUseProgram through the main loop but didn't get any results.
Changed colors and tried modifying shaders. Still nothing
I will post the code I have at the moment (the original code from the site):
main.cpp
#include <stdio.h>
#include <stdlib.h>
#include <glew.h>
#include <glfw3.h>
#include <glm/glm.hpp>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
int max(int i, int j)
{
if (i > j) return i;
return j;
}
GLuint LoadShaders(const char * vertex_file_path, const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if (VertexShaderStream.is_open())
{
std::string Line = "";
while (getline(VertexShaderStream, Line))
VertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if (FragmentShaderStream.is_open()){
std::string Line = "";
while (getline(FragmentShaderStream, Line))
FragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const * VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer, NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> VertexShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &VertexShaderErrorMessage[0]);
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const * FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer, NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> FragmentShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &FragmentShaderErrorMessage[0]);
// Link the program
fprintf(stdout, "Linking program\n\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> ProgramErrorMessage(max(InfoLogLength, int(1)));
std::cout << "Checking program\n";
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
fprintf(stdout, "%s\n", &ProgramErrorMessage[0]);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
std::cout << "END";
return ProgramID;
}
int main(void)
{
if (!glfwInit())
{
std::cout << "Cannot init glfw";
return -1;
}
//glfwWindowHint(GLFW_SAMPLES, 4); // 4x antialiasing
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); // We want OpenGL 3.3
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); //We don't want the old OpenGL
// Open a window and create its OpenGL context
GLFWwindow* window; // (In the accompanying source code, this variable is global)
window = glfwCreateWindow(1024, 768, "Tutorial 01", NULL, NULL);
if (window == NULL){
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window); // Initialize GLEW
glewExperimental = GL_TRUE; // Needed in core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
static const GLfloat g_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
GLuint programID = LoadShaders("res\\vertex.glsl", "res\\fragment.glsl");
do{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(programID);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
return 0;
}
fragment.glsl
#version 330 core
out vec3 color;
void main(){
color = vec3(1,1,0);
}
vertex.glsl
#version 330 core
layout(location = 0) in vec3 vertexPosition_modelspace;
void main(){
gl_Position.xyz = vertexPosition_modelspace;
gl_Position.w = 1.0;
}
The OpenGL Core Profile requires the use of Vertex Array Objects (VAOs). This is in the "Deprecated and Removed Features" of the spec:
Client vertex and index arrays - all vertex array attribute and element array index pointers must refer to buffer objects. The default vertex array object (the name zero) is also deprecated. Calling VertexAttribPointer when no buffer object or no vertex array object is bound will generate an INVALID_OPERATION error, as will calling any array drawing command when no vertex array object is bound.
The tutorial you are using suggests to use this code as part of your initialization:
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
This will be enough to get the tutorial code working. To make productive use of VAOs in more complex applications, you will probably want to create a VAO for each object. This will then track the full vertex setup state for the object, and allow you to set the state with a single glBindVertexArray() call before drawing.

glBindAttribLocation whats next?

I'm struggling to find OpenGL/GLSL examples that don't require glew or glut or what have you.
I'm trying to work with only using glfw3 (if possible I would like to use no other libraries) and I'm struggling to understand what to do once I use glBindAttribLocation? I've written code to pass an image as a texture into shaders, but I can't figure how to pass vertices.
I have a vertex shader and fragment shader I want to make a triangle and then color it red, I can create the shader programs and object program and link everything, but how do I pass things to the shaders.
// vert
in vec3 vPosition;
void main()
{
gl_Position = vec4(vPosition,1.0);
}
// Frag
out vec4 color;
void main()
{
color = vec4(1.0,0.0,0.0,1.0);
}
I don't understand what I need to do after I call glBindAttribLocation
glBindAttribLocation(p,0,"vPosition");
glUseProgram(p);
now how do I pass the vertices of a triangle into the shader?
more code, I'm calling my own library to read in the files so the textread won't work if anyone tries to run it
#include <GLFW/glfw3.h>
#include <stdlib.h>
#include <stdio.h>
#include "src/textfile.h"
GLuint v,f,p;
void printLog(GLuint obj)
{
int infologLength = 0;
int maxLength;
if(glIsShader(obj))
glGetShaderiv(obj,GL_INFO_LOG_LENGTH,&maxLength);
else
glGetProgramiv(obj,GL_INFO_LOG_LENGTH,&maxLength);
char infoLog[maxLength];
if (glIsShader(obj))
glGetShaderInfoLog(obj, maxLength, &infologLength, infoLog);
else
glGetProgramInfoLog(obj, maxLength, &infologLength, infoLog);
if (infologLength > 0)
printf("%s\n",infoLog);
}
static void error_callback(int error, const char* description)
{
fputs(description, stderr);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
void setShaders() {
char *vs = NULL,*fs = NULL;
v = glCreateShader(GL_VERTEX_SHADER);
f = glCreateShader(GL_FRAGMENT_SHADER);
vs = textFileRead("toon.vert");
fs = textFileRead("toon.frag");
const char * ff = fs;
const char * vv = vs;
glShaderSource(v, 1, &vv,NULL);
glShaderSource(f, 1, &ff,NULL);
free(vs);free(fs);
glCompileShader(v);
glCompileShader(f);
p = glCreateProgram();
glAttachShader(p,f);
glAttachShader(p,v);
glLinkProgram(p);
//glUseProgram(p);
}
int main(void)
{
GLFWwindow* window;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
window = glfwCreateWindow(640, 480, "Simple example", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSetKeyCallback(window, key_callback);
while (!glfwWindowShouldClose(window))
{
int height, width;
float ratio;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
setShaders();
glBindAttribLocation(p,0,"vPosition");
glUseProgram(p);
/* Now What */
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
you "pass vertices into the shaders" by making a draw call, most typically glDrawArrays().
when glDrawArrays() hits, the currently bound vertex array gets sent off to GPU-land. the vertices will be processed by the currently bound program (which you seem to have figured out) and each vertex attribute will flow into the vertex shader variables based on whether or not the shader variable's attribute index matches the vertex attribute's glVertexAttribPointer() "index" parameter (which you seem on the way to figuring out).
so, look into glVertexAttribPointer() to describe your array of vertices, glEnableAttributeArray() to enable your array of vertices to be sent on the next draw call, and then glDrawArrays() to kick off the party.
You don't need to call glBindAttribLocation, use glGetAttribLocation instead.
1 Define attribute variable in your cpp file
GLint vPosition
2 Bind shader variables
const char* attribute_name = "vPosition";
attribute_coord2d = glGetAttribLocation(p, attribute_name);
3 in vertex shader file, define vPosition as attribute variable and use it.
attribute vec3 vPosition;
void main()
{
gl_Position = vec4(vPosition,1.0);
}