OpenGL 3.3/GLSL & C++ error: "must write to gl_Position" - c++

I'm currently trying to get a triangle to render using OpenGL 3.3 and C++ with the GLM, GLFW3 and GLEW libraries, but get an error when trying to create my shaderprogram.
Vertex info
(0) : error C5145: must write to gl_Position
I already tried to find out why this happens and asked on other forums, but no one knew what the reason is. There are three possible points where this error could have his origin - in my main.cpp, where I create the window, the context, the program, the vao etc. ...
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <iostream>
#include <string>
#include "util/shaderutil.hpp"
#define WIDTH 800
#define HEIGHT 600
using namespace std;
using namespace glm;
GLuint vao;
GLuint shaderprogram;
void initialize() {
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glClearColor(0.5, 0.7, 0.9, 1.0);
string vShaderPath = "shaders/shader.vert";
string fShaderPath = "shaders/shader.frag";
shaderprogram = ShaderUtil::createProgram(vShaderPath.c_str(), fShaderPath.c_str());
}
void render() {
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaderprogram);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void clean() {
glDeleteProgram(shaderprogram);
}
int main(int argc, char** argv) {
if (!glfwInit()) {
cerr << "GLFW ERROR!" << endl;
return -1;
}
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
GLFWwindow* win = glfwCreateWindow(WIDTH, HEIGHT, "Rendering a triangle!", NULL, NULL);
glfwMakeContextCurrent(win);
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
cerr << "GLEW ERROR!" << endl;
return -1;
} else {
glGetError();
//GLEW BUG: SETTING THE ERRORFLAG TO INVALID_ENUM; THEREFORE RESET
}
initialize();
while (!glfwWindowShouldClose(win)) {
render();
glfwPollEvents();
glfwSwapBuffers(win);
}
clean();
glfwDestroyWindow(win);
glfwTerminate();
return 0;
}
...the ShaderUtil class, where I read in the shader files, compile them, do error checking and return a final program...
#include "shaderutil.hpp"
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
using namespace std;
GLuint ShaderUtil::createProgram(const char* vShaderPath, const char* fShaderPath) {
/*VARIABLES*/
GLuint vertexShader;
GLuint fragmentShader;
GLuint program;
ifstream vSStream(vShaderPath);
ifstream fSStream(fShaderPath);
string vSCode, fSCode;
/*CREATING THE SHADER AND PROGRAM OBJECTS*/
vertexShader = glCreateShader(GL_VERTEX_SHADER);
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
program = glCreateProgram();
/*READING THE SHADERCODE*/
/*CONVERTING THE SHADERCODE TO CHAR POINTERS*/
while (vSStream.is_open()) {
string line = "";
while (getline(vSStream, line)) {
vSCode += "\n" + line;
}
vSStream.close();
}
const char* vSCodePointer = vSCode.c_str();
while (fSStream.is_open()) {
string line = "";
while (getline(fSStream, line)) {
fSCode += "\n" + line;
}
fSStream.close();
}
const char* fSCodePointer = fSCode.c_str();
/*COMPILING THE VERTEXSHADER*/
glShaderSource(vertexShader, 1, &vSCodePointer, NULL);
glCompileShader(vertexShader);
/*VERTEXSHADER ERROR CHECKING*/
GLint vInfoLogLength;
glGetShaderiv(vertexShader, GL_INFO_LOG_LENGTH, &vInfoLogLength);
if (vInfoLogLength > 0) {
vector<char> vInfoLog(vInfoLogLength + 1);
glGetShaderInfoLog(vertexShader, vInfoLogLength, &vInfoLogLength, &vInfoLog[0]);
for(int i = 0; i < vInfoLogLength; i++) {
cerr << vInfoLog[i];
}
}
/*COMPILING THE FRAGMENTSHADER*/
glShaderSource(fragmentShader, 1, &fSCodePointer, NULL);
glCompileShader(fragmentShader);
/*FRAGMENTSHADER ERROR CHECKING*/
GLint fInfoLogLength;
glGetShaderiv(fragmentShader, GL_INFO_LOG_LENGTH, &fInfoLogLength);
if (fInfoLogLength > 0) {
vector<char> fInfoLog(fInfoLogLength + 1);
glGetShaderInfoLog(fragmentShader, fInfoLogLength, &fInfoLogLength, &fInfoLog[0]);
for(int i = 0; i < fInfoLogLength; i++) {
cerr << fInfoLog[i];
}
}
/*LINKING THE PROGRAM*/
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
//glValidateProgram(program);
/*SHADERPROGRAM ERROR CHECKING*/
GLint programInfoLogLength;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &programInfoLogLength);
if (programInfoLogLength > 0) {
vector<char> programInfoLog(programInfoLogLength + 1);
glGetProgramInfoLog(program, programInfoLogLength, &programInfoLogLength, &programInfoLog[0]);
for(int i = 0; i < programInfoLogLength; i++) {
cerr << programInfoLog[i];
}
}
/*CLEANUP & RETURNING THE PROGRAM*/
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
return program;
}
...and the vertex shader itself, which is nothing special. I just create an array of vertices and push them into gl_Position.
#version 330 core
void main() {
const vec3 VERTICES[3] = vec3[3] {
0.0, 0.5, 0.5,
0.5,-0.5, 0.5,
-0.5,-0.5, 0.5
};
gl_Position.xyz = VERTICES;
gl_Position.w = 1.0;
}
The fragmentshader just outputs a vec4 called color, which is set to (1.0, 0.0, 0.0, 1.0). The compiler doesn't show me any errors, but when I try to execute the program, I just get a window without the triangle and the error message that's shown above.
There's a few things I already tried to solve this problem, but none of them worked:
I tried creating the vertices inside my main.cpp and pushing them into the vertex-shader via a vertex buffer object; I changed some code inspired by opengl-tutorials.org and finally got a triangle to show up, but the shaders weren't applied; I only got the vertices inside my main.cpp to show up on the screen, but the "must write to gl_Position" problem remained.
I tried using glGetError() on different places and got 2 different error-codes: 1280 and 1282; the first one was caused by a bug inside GLEW, which causes the state to change from GL_NO_ERROR to GL_INVALID_ENUM or something like that. I was told to ignore this one and just change the state back to GL_NO_ERROR by using glGetError() after initializing GLEW. The other error code appeared after using glUseProgram() in the render-function. I wanted to get some information out of this, but the gluErrorString() function is deprecated in OpenGL 3.3 and I couldn't find an alternative provided by any of my libraries.
I tried validating my program via glValidateProgram() after linking it. When I did this, the gl_Position error message didn't show up anymore, but the triangle didn't either, so I assumed that this function just clears the infolog to put in some new information about the validation process
So right now, I have no idea what causes this error.

The problem got solved! I tried to print the source that OpenGL tries to compile and saw that there was no source loaded by the ifstream. Things I had to change:
Change the "while (vVStream.is_open())" to "if (vVStream.is_open())".
Error check, if the condition I listed first is executed (add "else {cerr << "OH NOES!" << endl}
Add a second parameter to the ifstreams I'm creating: change "ifstream(path)" to "ifstream(path, ios::in)"
Change the path I'm passing from a relative path (e.g "../shaders/shader.vert") to an absolute path (e.g "/home/USERNAME/Desktop/project/src/shaders/shader.vert"); this somehow was necessary, because the relative path wasn't understood; using an absolute one isn't a permanent solution though, but it fixes the problem of not finding the shader.
Now it actually loads and compiles the shaders; there are still some errors to fix, but if someone has the same "must write to gl_Position" problem, double, no, triple-check if the source you're trying to compile is actually loaded and if the ifstream is actually open.
I thank everyone who tried to help me, especially #MtRoad. This problem almost made me go bald.

Vertex shaders run on each vertex individually, so gl_Position is the output vertex after whatever transforms you wish to apply to the vertex being processed by vertex shader, so trying to emit multiple vertices doesn't make sense. Geometry shaders can emit additional geometry on the fly and can be used to do this to create motion blur for example.
For typical drawing, you bind a vertex array object like you did, but put data into buffers called Vertex Buffer Objects and tell OpenGL how to interpret the data's "attributes" using glVertexAttrib which you can read in your shaders.

Recently encountered this issue & I suspect the cause may be same as yours.
I'm not familiar with g++ however, on VS ones' build environment & the location on where your *exe is running from when you're debugging can impact on this. For example one such setting:
Project Properties -> General -> Output directory ->
Visual Studios Express - change debug output directory
And another similar issue here "The system cannot find the file specified" when running C++ program
You need to make sure if you've change the build environment and you're debugging from a different output directory that any of the relevant files are relative from where the *exe is being executed from.
This would explain why you've had to resort to using "if (vVStream.is_open())", which I suspect fails, & so then subsequently use full filepath of the shaders as the original referenced files are not relative.
My issue was exactly as yours but only in release mode. Once I copied over my shaders, into the release folder where the *exe could access them, the problem went away.

Related

Cross platrform way to identify current opengl rendering context?

I have discovered that some OpenGL objects, such as shaders, are not shared between rendering contexts and will raise a GL_INVALID_VALUE error if used in the wrong context.
So my library has something like this (assume C++ wrappers around the OpenGL IDs):
const char * const VERTEX_SHADER_LIB = "...";
const char * const FRAGMENT_SHADER_LIB = "...";
ShaderProgram shader (FragmentShader plugin)
{
ShaderProgram p;
p .attach (VerexShader (VERTEX_SHADER_LIB));
p .attach (FragmentShader (FRAGMENT_SHADER_LIB));
p .attach (plugin);
p .link ();
return p;
}
In the interests of efficiency, I rewrite it along these lines:
VertexShader vertex_shader_lib ()
{
static VertexShader SHADER (VERTEX_SHADER_LIB);
return SHADER;
}
FragmentShader fragment_shader_lib ()
{
static FragmentShader SHADER (FRAGMENT_SHADER_LIB);
return SHADER;
}
ShaderProgram shader (FragmentShader plugin)
{
ShaderProgram p;
p .attach (vertex_shader_lib ());
p .attach (fragment_shader_lib ());
p .attach (plugin);
p .link ();
return p;
}
but this more efficient version only works if used for a single rendering context.
I would therefore need to rewrite it like this:
VertexShader vertex_shader_lib ()
{
static Map <RenderContextID, VertexShader> SHADERS;
SHADERS .insert_if_key_does_not_exist (
current_render_context (),
VertexShader (VERTEX_SHADER_LIB));
return SHADERS [current_render_context ()];
}
however, I can't find a cross-platform way to describe current_render_context(). Is there one? I don't necessarily need a cross-platform handle for the rendering context object itself, but any piece of identifying data which is unchanging and unique to each context will do.
What can I use?
Use a platform-appropriate *GetCurrentContext() function:
// Windows
HGLRC wglGetCurrentContext(void)
// X11
GLXContext glXGetCurrentContext(void)
// OSX
CGLContextObj CGLGetCurrentContext(void)
// EGL
EGLContext eglGetCurrentContext(void)
It'll take a little bit of #ifdefing but nothing too onerous.
If you use a framework like SDL2 you can avoid even that.

glDrawArrays cause out of memory

I am building a Qt application with OpenGL using VAO and VBOs. I got a simple reference grid that I want to draw with the following code
void ReferenceGrid::initialize()
{
// Buffer allocation and initialization
Float3Array vertices;
for (float pos = -GridSide; pos <= GridSide; pos += 1.0) {
// X line
vertices.push_back(Float3(pos, -GridSide, 0.0f));
vertices.push_back(Float3(pos, GridSide, 0.0f));
// Y line
vertices.push_back(Float3(-GridSide, pos, 0.0f));
vertices.push_back(Float3( GridSide, pos, 0.0f));
LineCount += 2;
}
s_gridVao.create();
s_gridVao.bind();
s_gridBuffer.create();
s_gridBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
s_gridBuffer.allocate(vertices.data(), vertices.memorySize());
// Shader allocation and initialization
s_gridShader.create();
if (!s_gridShader.addShaderFromSourceFile(QOpenGLShader::Vertex, ":/shaders/Grid.vert")) {
qWarning() << "Cannot grid vertex shader";
}
if (!s_gridShader.addShaderFromSourceFile(QOpenGLShader::Fragment, ":/shaders/Grid.frag")) {
qWarning() << "Cannot grid fragment shader";
}
if (!s_gridShader.link()) {
qWarning() << "Cannot link grid shader";
}
s_gridBuffer.bind();
s_gridShader.enableAttributeArray("vertexPosition");
s_gridShader.setAttributeBuffer("vertexPosition", GL_FLOAT, 0, 3);
s_gridBuffer.release();
s_gridShader.release();
s_gridVao.release();
}
void ReferenceGrid::draw()
{
s_gridVao.bind();
s_gridShader.bind();
s_gridBuffer.bind();
glfuncs->glDrawArrays(GL_LINES, 0, LineCount);
// Return GL_OUT_OF_MEMORY
assert(glfuncs->glGetError() == GL_NO_ERROR);
s_gridBuffer.release();
s_gridShader.release();
s_gridVao.release();
}
The problem is that after the call to glDrawArrays an error (GL_OUT_OF_MEMORY) is returned. I cannot understand what is going on.
Has anyone already encountered this problem and has a solution?
I forgot to bind the buffer before allocating it. I thought Qt is doing this automatically but I was wrong. So the right thing to do is:
s_gridBuffer.create();
s_gridBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
s_gridBuffer.bind();
s_gridBuffer.allocate(vertices.data(), vertices.memorySize());
I found a good tutorial on the topic, http://www.kdab.com/opengl-in-qt-5-1-part-2/

Invalid value GLSL?

After letting my opengl program run for a while and viewing the scene from different angles I am getting an OpenGL "invalid value" error in my shader program. This is literally my program:
Vertex
#version 420
in vec4 Position;
uniform mat4 modelViewProjection;
void main()
{
in vec4 Position;
uniform mat4 modelViewProjection;
}
Fragment
#version 420
out vec4 fragment;
void main()
{
fragment = vec4(1,0,0,1);
}
This error occurs right after the function call to tell OpenGL to use my shader program. What could the cause of this be? It happens regardless of the object I call it on. How can I get more information on what is going on? The error occurs almost randomly for a series of frames, but then works again after a while, fails again after a bit, ect.
If it helps, here is what my program linking looks like:
...
myShader = glCreateProgram();
CreateShader(myShader,GL_VERTEX_SHADER, "shaders/prog.vert");
CreateShader(myShader,GL_FRAGMENT_SHADER, "shaders/prog.frag");
glLinkProgram(myShader);
PrintProgramLog(myShader);
...
void CreateShader(int prog, const GLenum type, const char* file)
{
int shad = glCreateShader(type);
char* source = ReadText(file);
glShaderSource(shad,1,(const char**)&source,NULL);
free(source);
glCompileShader(shad);
PrintShaderLog(shad,file);
glAttachShader(prog,shad);
}
This is what I'm using to get the error:
void ErrCheck(const char* where)
{
int err = glGetError();
if (err) fprintf(stderr,"ERROR: %s [%s]\n",gluErrorString(err),where);
}
And here is what is being printed out at me:
ERROR: invalid value [drawThing]
It happens after I call to use the program:
glUseProgram(_knightShaders[0]);
ErrCheck("drawThing");
or glGetUniformLocation:
glGetUniformLocation(myShader, "modelViewProjection");
ErrCheck("drawThing2");
So I fixed the problem. What I had above wasn't the whole truth, what I actually had was
myShader[0] = glCreateProgram();
myShader was an array of 4 GLuint(s), each int being a different shader program (although at this point they were all copies of the shader program I posted above). The problem was fixed when I stopped using an array and instead used:
GLuint myShader0;
GLuint myShader1;
GLuint myShader2;
GLuint myShader3;
Why this fixed the problem makes no sense to me, it's also pretty annyoing because rather than being able to index the shader mode I want, such as:
int mode = ... (code the determine what shader to use here)
glUseProgram(myShader[mode]);
I have to instead use conditionals:
int mode = ... (code the determine what shader to use here)
if (mode == 0) glUseProgram(myShader0);
else if (mode == 1) glUseProgram(myShader1);
else if (mode == 2) glUseProgram(myShader2);
else glUseProgram(myShader3);
If anyone of you know why this fixes the problem, I would very much appreciate the knowledge!

OpenGL: glVertexAttribPointer() fails with "Invalid value" for stride bigger than 2048 on new NVIDIA drivers

Did anyone else recognize that it is not possible any more to call glVertexAttribPointer() with a stride bigger then 2048 with new NVIDIA drivers (from 331.58 WHQL and above)? The call creates the OpenGL error Invalid value (1281).
For example the following minimal GLUT example will generate the OpenGL error 1281 after testStride(2049); is called when using driver 331.58 WHQL:
#include <iostream>
#include <GL/glut.h>
#include <windows.h>
using namespace std;
PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer = 0;
void testStride(const GLsizei stride)
{
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, stride, 0);
GLenum code = glGetError();
if (code != GL_NO_ERROR)
std::cerr << "glVertexAttribPointer() with a stride of " << stride << " failed with code " << code << std::endl;
else std::cout << "glVertexAttribPointer() with a stride of " << stride << " succeeded" << std::endl;
}
void render(void)
{
testStride(2048); // Works well with driver version 311.06 and 331.58
testStride(2049); // Does not work with driver version 331.58 but works well with driver version 311.06
}
int main(int argc, char* argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA);
glutCreateWindow("Window");
glutDisplayFunc(render);
glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) wglGetProcAddress("glVertexAttribPointer");
glutMainLoop();
return 0;
}
What is your opinion? Am I doing anything wrong?
#Christian Rau: Thank you very much for the hint. I immediately replaced the glVertexPointer() call with the glVertexAttribPointer() and still get the same result.
OpenGL 4.4 added GL_MAX_VERTEX_ATTRIB_STRIDE, which is exactly what it sounds like: a hard, implementation-defined limit on the maximum stride you're allowed to use. It applies equally to separate attribute formats and old-style glVertexAttribPointer.

SIGSEGV error while using c++ on linux with openGL and SDL

Myself and a few other guys are taking a crack at building a simple side scroller type game. However, I can not get a hold of them to help answer my question so I put it to you, the following code leaves me with a SIGSEGV error in the notated place... if anyone can tell me why, I would really appreciate it. If you need anymore info I will be watching this closely.
Main.cpp
Vector2 dudeDim(60,60);
Vector2 dudePos(300, 300);
Entity *test = new Entity("img/images.jpg", dudeDim, dudePos, false);
leads to:
Entity.cpp
Entity::Entity(std::string filename, Vector2 size, Vector2 position, bool passable):
mTexture(filename)
{
mTexture.load(false);
mDimension2D = size;
mPosition2D = position;
mPassable = passable;
}
leads to:
Textures.cpp
void Texture::load(bool generateMipmaps)
{
FREE_IMAGE_FORMAT imgFormat = FIF_UNKNOWN;
FIBITMAP *dib(0);
imgFormat = FreeImage_GetFileType(mFilename.c_str(), 0);
//std::cout << "File format: " << imgFormat << std::endl;
if (FreeImage_FIFSupportsReading(imgFormat)) // Check if the plugin has reading capabilities and load the file
dib = FreeImage_Load(imgFormat, mFilename.c_str());
if (!dib)
std::cout << "Error loading texture files!" << std::endl;
BYTE* bDataPointer = FreeImage_GetBits(dib); // Retrieve the image data
mWidth = FreeImage_GetWidth(dib); // Get the image width and height
mHeight = FreeImage_GetHeight(dib);
mBitsPerPixel = FreeImage_GetBPP(dib);
if (!bDataPointer || !mWidth || !mHeight)
std::cout << "Error loading texture files!" << std::endl;
// Generate and bind ID for this texture
vvvvvvvvvv!!!ERROR HERE!!!vvvvvvvvvvv
glGenTextures(1, &mId);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
glBindTexture(GL_TEXTURE_2D, mId);
int format = mBitsPerPixel == 24 ? GL_BGR_EXT : mBitsPerPixel == 8 ? GL_LUMINANCE : 0;
int iInternalFormat = mBitsPerPixel == 24 ? GL_RGB : GL_DEPTH_COMPONENT;
if(generateMipmaps)
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, mWidth, mHeight, 0, format, GL_UNSIGNED_BYTE, bDataPointer);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//std::cout << "texture generated " << mId << std::endl;
FreeImage_Unload(dib);
}
after reading Peter's suggestion I have changed my main.cpp file to:
#include <iostream>
#include <vector>
#include "Game.h"
using namespace std;
int main(int argc, char** argv)
{
Game theGame;
/* Initialize game control objects and resources */
if (theGame.onInit() != false)
{
return theGame.onExecute();
}
else
{
return -1;
}
}
and it would seem the SIGSEGV error is gone and I'm now left with something not initializing. So thank you peter you were correct now I'm off to solve this issue.
ok so this is obviously a small amount of the code but in order to save time and a bit of sanity: all the code is available at:
GitHub Repo
So after looking at your code I can say that it's probably that you have not initialized you OpenGL context before executing that code.
You need to call your Game::onInit() which also calls RenderEngine::initGraphics() before making any calls to OpenGL. Which you currently don't do. You currently do main()->Game ctor (calls rendering engine ctor but that ctor doesn't init SDL and OpenGL)->Entity ctor->load texture
For details look at the OpenGL Wiki FAQ