Unable to get Tessallation shader working - opengl

I've just started following OpenGL SuperBible 7th ed, and translating the examples into LWJGL, but have become stuck on the tessellation shader. In the following program there is the line " //IF THESE TWO LINES..." if the following two lines are commented out then the vertex and fragment shaders work but when the control.tess.glsl and eval.tess.glsl are included then the triangle no longer renders.
I've uploaded my program onto github but will reproduce the code here as well:
package com.ch3vertpipeline;
public class App {
public static void main(String [] args){
LwjglSetup setup = new LwjglSetup();
setup.run();
}
}
package com.ch3vertpipeline;
import java.nio.IntBuffer;
import java.util.Scanner;
import org.lwjgl.*;
import org.lwjgl.glfw.*;
import org.lwjgl.opengl.*;
import org.lwjgl.system.*;
import static org.lwjgl.glfw.Callbacks.*;
import static org.lwjgl.glfw.GLFW.*;
import static org.lwjgl.opengl.GL11.*;
import static org.lwjgl.opengl.GL20.*;
import static org.lwjgl.opengl.GL30.*;
import static org.lwjgl.system.MemoryStack.stackPush;
import static org.lwjgl.system.MemoryUtil.NULL;
public class LwjglSetup {
private long window;
private int vertex_shader;
private int fragment_shader;
private int tess_control_shader;
private int tess_evaluation_shader;
private int program;
private int vertex_array_object;
public LwjglSetup() {
}
private void init() {
GLFWErrorCallback.createPrint(System.err).set();
if (!glfwInit()) {
throw new IllegalStateException("Unable to initialize GLFW");
}
// Configure GLFW
glfwDefaultWindowHints(); // optional, the current window hints are already the default
glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE); // the window will stay hidden after creation
glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE); // the window will be resizable
// Create the window
window = glfwCreateWindow(300, 300, "Hello World!", NULL, NULL);
if (window == NULL) {
throw new RuntimeException("Failed to create the GLFW window");
}
// Setup a key callback. It will be called every time a key is pressed, repeated or released.
glfwSetKeyCallback(window, (window, key, scancode, action, mods) -> {
if (key == GLFW_KEY_ESCAPE && action == GLFW_RELEASE) {
glfwSetWindowShouldClose(window, true); // We will detect this in the rendering loop
}
});
// Get the thread stack and push a new frame
try (MemoryStack stack = stackPush()) {
IntBuffer pWidth = stack.mallocInt(1); // int*
IntBuffer pHeight = stack.mallocInt(1); // int*
// Get the window size passed to glfwCreateWindow
glfwGetWindowSize(window, pWidth, pHeight);
// Get the resolution of the primary monitor
GLFWVidMode vidmode = glfwGetVideoMode(glfwGetPrimaryMonitor());
// Center the window
glfwSetWindowPos(
window,
(vidmode.width() - pWidth.get(0)) / 2,
(vidmode.height() - pHeight.get(0)) / 2
);
} // the stack frame is popped automatically
// Make the OpenGL context current
glfwMakeContextCurrent(window);
// Enable v-sync
glfwSwapInterval(1);
// Make the window visible
glfwShowWindow(window);
}
public void run() {
System.out.println("Hello LWJGL " + Version.getVersion() + "!");
init();
loop();
// Free the window callbacks and destroy the window
glfwFreeCallbacks(window);
glfwDestroyWindow(window);
// Terminate GLFW and free the error callback
glfwTerminate();
glfwSetErrorCallback(null).free();
}
private void loop() {
GL.createCapabilities();//Critical
System.out.println("OpenGL Verion: " + glGetString(GL_VERSION));
this.compileShader();
vertex_array_object = glGenVertexArrays();
glBindVertexArray(vertex_array_object);
while (!glfwWindowShouldClose(window)) {
double curTime = System.currentTimeMillis() / 1000.0;
double slowerTime = curTime;//assigned direcly but I was applying a factor here
final float colour[] = {
(float) Math.sin(slowerTime) * 0.5f + 0.5f,
(float) Math.cos(slowerTime) * 0.5f + 0.5f,
0.0f, 1.0f};
glClearBufferfv(GL_COLOR, 0, colour);
glUseProgram(program);
final float attrib[] = {
(float) Math.sin(slowerTime) * 0.5f,
(float) Math.cos(slowerTime) * 0.6f,
0.0f, 0.0f};
//glPatchParameteri(GL_PATCH_VERTICES, 3);//this is the default so is unneeded
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glVertexAttrib4fv(0, attrib);
glDrawArrays(GL_TRIANGLES, 0, 3);
glfwSwapBuffers(window); // swap the color buffers
glfwPollEvents();
}
glDeleteVertexArrays(vertex_array_object);
glDeleteProgram(program);
}
private String readFileAsString(String filename) {
String next = new Scanner(LwjglSetup.class.getResourceAsStream(filename), "UTF-8").useDelimiter("\\A").next();
System.out.println("readFileAsString: " + next);
return next;
}
private void compileShader() {
//int program;
//NEW CODE
//create and compile vertex shader
String vertShaderSource = readFileAsString("/vert.glsl");
vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, vertShaderSource);
glCompileShader(vertex_shader);
//check compilation
if (glGetShaderi(vertex_shader, GL_COMPILE_STATUS) != 1) {
System.err.println(glGetShaderInfoLog(vertex_shader));
System.exit(1);
}
//create and compile fragment shader
String fragShaderSource = readFileAsString("/frag.glsl");
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, fragShaderSource);
glCompileShader(fragment_shader);
//check compilation
if (glGetShaderi(fragment_shader, GL_COMPILE_STATUS) != 1) {
System.err.println(glGetShaderInfoLog(fragment_shader));
System.exit(1);
}
//create and compile tessellation shader
String tessControlShaderSource = readFileAsString("/control.tess.glsl");
tess_control_shader = glCreateShader(GL40.GL_TESS_CONTROL_SHADER);
glShaderSource(tess_control_shader, tessControlShaderSource);
glCompileShader(tess_control_shader);
//check compilation
if (glGetShaderi(tess_control_shader, GL_COMPILE_STATUS) != 1) {
System.err.println(glGetShaderInfoLog(tess_control_shader));
System.exit(1);
}
//create and compile tessellation shader
String tessEvaluationShaderSource = readFileAsString("/eval.tess.glsl");
tess_evaluation_shader = glCreateShader(GL40.GL_TESS_EVALUATION_SHADER);
glShaderSource(tess_evaluation_shader, tessEvaluationShaderSource);
glCompileShader(tess_evaluation_shader);
//check compilation
if (glGetShaderi(tess_evaluation_shader, GL_COMPILE_STATUS) != 1) {
System.err.println(glGetShaderInfoLog(tess_evaluation_shader));
System.exit(1);
}
//create program and attach it
program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
//IF THESE TWO LINES ARE COMMENTED PROGRAM WORKS...although there
//is no tessallation...
glAttachShader(program, tess_control_shader);
glAttachShader(program, tess_evaluation_shader);
glLinkProgram(program);
//check link
if (glGetProgrami(program, GL_LINK_STATUS) != 1) {
System.err.println(glGetProgramInfoLog(program));
System.exit(1);
}
glValidateProgram(program);
if (glGetProgrami(program, GL_VALIDATE_STATUS) != 1) {
System.err.println(glGetProgramInfoLog(program));
System.exit(1);
}
//delete shaders as the program has them now
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
glDeleteShader(tess_control_shader);
glDeleteShader(tess_evaluation_shader);
//return program;
}
}
vert.glsl
#version 440 core
//'offset' is an input vertex attribute
layout (location=0) in vec4 offset;
layout (location=1) in vec4 color;
out vec4 vs_color;
void main(void)
{
const vec4 vertices[3] = vec4[3]( vec4( 0.25, -0.25, 0.5, 1.0),
vec4(-0.25, -0.25, 0.5, 1.0),
vec4( 0.25, 0.25, 0.5, 1.0));
//Add 'offset' to hour hard-coded vertex position
gl_Position = vertices[gl_VertexID] + offset;
//Output a fixed value for vs_color
vs_color = color;
}
frag.glsl
#version 440 core
in vec4 vs_color;
out vec4 color;
void main(void)
{
color = vs_color;
}
control.tess.glsl
#version 440 core
layout (vertices=3) out;
void main(void)
{
//Only if I am invocation 0
if (gl_InvocationID == 0){
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
//Everybody copies their input to their output?
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
eval.tess.glsl
#version 440 core
layout (triangles, equal_spacing, cw) in;
void main(void){
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position) +
(gl_TessCoord.y * gl_in[1].gl_Position) +
(gl_TessCoord.z * gl_in[2].gl_Position);
}
Finally, if it helps here is some version information, which is printed at the start of the application:
Hello LWJGL 3.1.5 build 1!
OpenGL Verion: 4.4.0 NVIDIA 340.107

glDrawArrays(GL_TRIANGLES, 0, 3);
When you draw something with tessellation, you are drawing patches, not triangles. Hence, you have to specify GL_PATCHES:
glDrawArrays(GL_PATCHES, 0, 3);
//Everybody copies their input to their output?
The reason is that the input vertices and output vertices of the tessellation control shader are not related to each other. The input vertices are taken from the input stream, i.e. your vertex buffers (after being processed by the vertex shader). Their number is specified by the GL_PATCH_VERTICES parameter. Each invocation takes this number of vertices from the buffer. The output vertices are kept internally in the pipeline. Their number is specified by the layout directive. This number can be different from the number of input vertices. They can also have different attributes. I find it more intuitive to think of these vertices as pieces of data instead of actual vertices with a geometric meaning. In some cases, this interpretation might make sense, but definitely not in all.

Related

glpointsize() Fails to Shrink a Point

I'm trying to write a simple opengl program that cyclically grows and shrinks a single point at the center of the screen using glPointSize() and a variable pointSize. Printing the value of pointSize and stepping through the code with a debugger appears to show that pointSize updates correctly on each iteration. The rendering of the point also appears to be correct when pointSize is increasing, but when pointSize is decreasing the point is still rendered at its maximum size on the screen, and keeps rendering that way no matter how much pointSize grows or shrinks in value - and I cannot figure out why.
#include <GL\glew.h>
#include <GLFW\glfw3.h>
#include <iostream>
#define numVAOs 1
GLuint renderingProgram;
GLuint vao[numVAOs];
GLuint createShaderProgram() {
const char *vshaderSource =
"#version 430 \n"
"void main(void) \n"
"{ gl_Position = vec4(0.0, 0.0, 0.0, 1.0); }";
const char *fshaderSource =
"#version 430 \n"
"out vec4 color; \n"
"void main(void) \n"
"{ color = vec4(0.0, 0.0, 1.0, 1.0); }";
GLuint vShader = glCreateShader(GL_VERTEX_SHADER);
GLuint fShader = glCreateShader(GL_FRAGMENT_SHADER);
GLuint vfprogram = glCreateProgram();
glShaderSource(vShader, 1, &vshaderSource, NULL);
glShaderSource(fShader, 1, &fshaderSource, NULL);
glCompileShader(vShader);
glCompileShader(fShader);
glAttachShader(vfprogram, vShader);
glAttachShader(vfprogram, fShader);
glLinkProgram(vfprogram);
return vfprogram;
}
void init() {
renderingProgram = createShaderProgram();
glGenVertexArrays(numVAOs, vao);
glBindVertexArray(vao[0]);
}
GLfloat pointSize = 100.0f;
GLfloat increment = 1.0f;
void display() {
glUseProgram(renderingProgram);
if (pointSize > 200 || pointSize < 2) increment *= -1.0f;
pointSize += increment;
glPointSize(pointSize);
glDrawArrays(GL_POINTS, 0, 1);
}
int main(void) {
if (!glfwInit()) exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
GLFWwindow* window = glfwCreateWindow(600, 600, "Test", nullptr, nullptr);
glfwMakeContextCurrent(window);
if (glewInit() != GLEW_OK) exit(EXIT_FAILURE);
glfwSwapInterval(1);
init();
while (!glfwWindowShouldClose(window)) {
display();
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
Your code does not clear the screen with glClear. Therefore, when the point grows, you can see it. When it shrinks, it's simply drawn 'inside' the bigger dot, without you noticing it. You could catch that if you changed the color of the dot as it changes the size.
Simply clear the color buffer at the beginning of the frame:
void display() {
// default clear color is black, but you can change it:
//glClearColor(1,0,0,1);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(renderingProgram);
...

Memory barrier problems for writing and reading an image OpenGL

i'm having a problem trying to reading an image from a fragment shader, first i write into the image in shader porgram A (im just painting blue on the image) then i'm reading from another shader program B to display the image, but the reading part is not getting the right color i'm getting a black image
Unexpected result
This is my application code:
void GLAPIENTRY MessageCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, const void* userParam)
{
std::cout << "GL CALLBACK: type = " << std::hex << type << ", severity = " << std::hex << severity << ", message = " << message << "\n"
<< (type == GL_DEBUG_TYPE_ERROR ? "** GL ERROR **" : "") << std::endl;
}
class ImgRW
: public Core
{
public:
ImgRW()
: Core(512, 512, "JFAD")
{}
virtual void Start() override
{
glEnable(GL_DEBUG_OUTPUT);
glDebugMessageCallback(MessageCallback, nullptr);
shader_w = new Shader("w_img.vert", "w_img.frag");
shader_r = new Shader("r_img.vert", "r_img.frag");
glGenTextures(1, &space);
glBindTexture(GL_TEXTURE_2D, space);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, 512, 512);
glBindImageTexture(0, space, 0, GL_FALSE, 0, GL_READ_WRITE, GL_RGBA32F);
glGenVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
}
virtual void Update() override
{
shader_w->use(); // writing shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT | GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
shader_r->use(); // reading shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
virtual void End() override
{
delete shader_w;
delete shader_r;
glDeleteTextures(1, &space);
glDeleteVertexArrays(1, &vertex_array);
}
private:
Shader* shader_w;
Shader* shader_r;
GLuint vertex_array;
GLuint space;
};
#if 1
CORE_MAIN(ImgRW)
#endif
and these are my fragment shaders:
Writing to image
Code glsl:
#version 430 core
layout (binding = 0, rgba32f) uniform image2D img;
out vec4 out_color;
void main()
{
imageStore(img, ivec2(gl_FragCoord.xy), vec4(0.0f, 0.0f, 1.0f, 1.0f));
}
Reading from image
Code glsl:
#version 430 core
layout (binding = 0, rgba32f) uniform image2D img;
out vec4 out_color;
void main()
{
vec4 color = imageLoad(img, ivec2(gl_FragCoord.xy));
out_color = color;
}
The only way that i get the correct result is if i change the order of the drawing commands and i dont need the memory barriers, like this (in the Update fuction of above):
shader_r->use(); // reading shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
shader_w->use(); // writing shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
I don't know if the problem is the graphics card or the drivers or if i'm missing some kind of flag that enables memoryBarriers or if i put the wrong barrier bits or if i placed the barriers in the code in the wrong part
The Vertex shader for both shader programs is the next:
#version 430 core
void main()
{
vec2 v[4] = vec2[4]
(
vec2(-1.0, -1.0),
vec2( 1.0, -1.0),
vec2(-1.0, 1.0),
vec2( 1.0, 1.0)
);
vec4 p = vec4(v[gl_VertexID], 0.0, 1.0);
gl_Position = p;
}
and in my init function is:
void Window::init()
{
glfwInit();
window = glfwCreateWindow(getWidth(), getHeight(), name, nullptr, nullptr);
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebufferSizeCallback);
glfwSetCursorPosCallback(window, cursorPosCallback);
//glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
assert(gladLoadGLLoader((GLADloadproc)glfwGetProcAddress) && "Couldn't initilaize OpenGL");
glEnable(GL_DEPTH_TEST);
}
and in my function run i'm calling my start, update and end functions
void Core::Run()
{
std::cout << glGetString(GL_VERSION) << std::endl;
Start();
float lastFrame{ 0.0f };
while (!window.close())
{
float currentFrame = static_cast<float>(glfwGetTime());
Time::deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
glViewport(0, 0, getWidth(), getHeight());
glClearBufferfv(GL_COLOR, 0, &color[0]);
glClearBufferfi(GL_DEPTH_STENCIL, 0, 1.0f, 0);
Update();
glfwSwapBuffers(window);
glfwPollEvents();
}
End();
}
glEnable(GL_DEPTH_TEST);
As I suspected.
Just because a fragment shader doesn't write a color output doesn't mean that those fragments will not affect the depth buffer. If the fragment passes the depth test and the depth write mask is on (assuming no other state is involved), it will update the depth buffer with the current fragment's depth (and the color buffer with uninitialized values, but that's a different matter).
Since you're drawing the same geometry both times, the second rendering's fragments will get the same depth values as the corresponding fragments from the first rendering. But the default depth function is GL_LESS. Since any value is not less than itself, this means that all fragments from the second rendering fail the depth test.
And therefore, they don't get rendered.
So just turn off the depth test. And while you're at it, turn off color writes for your "writing" rendering pass, since you're not writing to the color buffers.
Now, you do properly need the memory barrier between the two draw calls. But you only need the GL_SHADER_IMAGE_ACCESS_BARRIER_BIT, since that's how you're reading the data (via image load/store, not samplers).

Vertex shader not compiling due to a non-Ascii character?

So I started using OpenGL with glew and GLFW to create a game engine, and I almost immediately ran into a problem when starting working with shaders:
They are not being used or have no effect whatsoever if they are being used.
I have been checking my code with plenty of other examples, and they all match up, nothing looks out of place, and I am starting to run out of ideas and patience (I have been trying to figure out why for nearly a month now) with this.
My main core code is here:
#include "headers/Default.hpp"
//Window width and height variables
int windowWidth = 800;
int windowHeight = 600;
float Aspect = (float)windowWidth / (float)windowHeight;
//Buffer width and buffer height
int bufferWidth;
int bufferHeight;
double deltaTime;
double currentTime;
double newTime;
void CalculateDelta()
{
newTime = glfwGetTime();
deltaTime = newTime - currentTime;
currentTime = newTime;
}
//A call back function to get the window size
void UpdateWindowSize(GLFWwindow* window, int width, int height)
{
windowWidth = width;
windowHeight = height;
Aspect = (float)windowWidth / (float)windowHeight;
}
void UpdateFrameBufferSize(GLFWwindow* window, int width, int height)
{
bufferWidth = width;
bufferHeight = height;
}
//Starts on startup and creates an window context and starts the rendering loop
int main()
{
//Creates an engine startup log to keep
CreateStartupLog();
if (!glewInit())
{
WriteStartupLog("ERROR: GLEW failed to start\n");
return 1;
}
else
{
WriteStartupLog("INFO: GLEW initiated!\n");
}
//If glfw is not initiated for whatever reason we return an error
if (!glfwInit())
{
WriteStartupLog("ERROR: GLFW failed to start\n");
return 1;
}
else
{
WriteStartupLog("INFO: GLFW initiated!\n");
}
////////////////////////////////////////////////////////////////
// Window Section //
////////////////////////////////////////////////////////////////
//glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
//glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
//Gets the primary monitor of the PC and tells OpenGL to use that monitor
GLFWmonitor* monitor = glfwGetPrimaryMonitor();
const GLFWvidmode* videoMode = glfwGetVideoMode(monitor);
//Creates a GLFW window context that we can work with
GLFWwindow* gameWindow = glfwCreateWindow(windowWidth/*videoMode->width*/, windowHeight/*videoMode->height*/, "FireTech Engine", NULL/*monitor*/, NULL);
//If the game window is not able to be created, prints an error and terminates the program
if (!gameWindow)
{
WriteStartupLog("ERROR: GLFW could not create a window\n");
glfwTerminate();
return 1;
}
else
{
WriteStartupLog("INFO: GLFW created a window!\n\n");
}
//Makes the current context
glfwMakeContextCurrent(gameWindow);
//Sets the window callback function for size
glfwSetWindowSizeCallback(gameWindow, UpdateWindowSize);
glfwSetFramebufferSizeCallback(gameWindow, UpdateFrameBufferSize);
//Initiate GLEW
glewExperimental = GL_TRUE;
glewInit();
////////////////////////////////////////////////////////////////
// Functions to set up various systems of the game engine //
////////////////////////////////////////////////////////////////
//Calls function to create a log file for the game engine
CreateEngineLog();
//Calls the function to compile the default shaders
CompileDefaultShader();
//Calls the function to get and print out hardware and OpenGL version
//PrintHardwareInfo();
////////////////////////////////////////////////////////////////
// Game Code //
////////////////////////////////////////////////////////////////
Sprite testSprite;
//Rendering loop
while (!glfwWindowShouldClose(gameWindow))
{
CalculateDelta();
glClearColor(0.3, 0.6, 1.0, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Viewport and ortho settings
glViewport(0, 0, windowWidth, windowHeight);
glOrtho(-1, 1, -1 / Aspect, 1 / Aspect, 0, 1);
//Draw a sprite
if (GLFW_PRESS == glfwGetKey(gameWindow, GLFW_KEY_F2))
{
testSprite.DebugDraw();
}
else
{
testSprite.Draw();
}
//Draws the stuff we just rendered
glfwSwapBuffers(gameWindow);
glLoadIdentity();
//Polls different events, like input for example
glfwPollEvents();
if (GLFW_PRESS == glfwGetKey(gameWindow, GLFW_KEY_F1))
{
int fps = GetFPS();
printf("FPS: ");
printf("%d\n", fps);
printf("Frequency: ");
printf("%f\n", 1/double(fps));
}
if (GLFW_PRESS == glfwGetKey(gameWindow, GLFW_KEY_ESCAPE))
{
glfwSetWindowShouldClose(gameWindow, 1);
}
}
glfwTerminate();
WriteEngineLog("PROGRAM EXITED: Window closed");
return 0;
}
Here is the shader.cpp code:
#include "../headers/Default.hpp"
string ReadShaderFile(char* path)
{
ifstream shaderFile;
shaderFile.open(path, std::ifstream::in);
string output;
if (shaderFile.is_open())
{
printf("Opened shader file located at: \"%s\"\n", path);
while (!shaderFile.eof())
{
output += shaderFile.get();
}
printf("Successfully read shader file located at: \"%s\"\n", path);
}
else
{
WriteEngineLog("ERROR: Could not read shader file!\n");
}
shaderFile.close();
return output;
}
Shader::Shader()
{
WriteEngineLog("WARNING: There was no path to any GLSL Shader files\n");
}
Shader::Shader(char* VertexShaderPathIn, char* FragmentShaderPathIn)
{
string vertexShaderString = ReadShaderFile(VertexShaderPathIn);
string fragmentShaderString = ReadShaderFile(FragmentShaderPathIn);
//Prints out the string to show the shader's code
printf("\n%s\n", vertexShaderString.c_str());
printf("\n%s\n", fragmentShaderString.c_str());
//Creates the GLchars needed to input the shader code
const GLchar* vertex_shader = vertexShaderString.c_str();
const GLchar* fragment_shader = fragmentShaderString.c_str();
//Creates a vertex shader and compiles it
GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
WriteEngineLog("Blank vertex shader created\n");
glShaderSource(vertexShader, 1, &vertex_shader, NULL);
WriteEngineLog("Vertex shader given source\n");
glCompileShader(vertexShader);
//Compilation error checking begions here
GLint isVertexCompiled = 0;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &isVertexCompiled);
if (isVertexCompiled == GL_FALSE)
{
//Gets the length of the log
GLint maxLength = 0;
glGetShaderiv(vertexShader, GL_INFO_LOG_LENGTH, &maxLength);
//Creates and writes the log to the errorLog
GLchar* errorLog = (GLchar*)malloc(maxLength);
glGetShaderInfoLog(vertexShader, maxLength, &maxLength, &errorLog[0]);
//Writes to the engine log with the shader error
WriteEngineLog("ERROR: Vertex shader failed to compile!\n");
printf("%s\n", (char*)errorLog);
//Frees the errorLog allocation
free(errorLog);
//Deletes the shader so it doesn't leak
glDeleteShader(vertexShader);
WriteEngineLog("ERROR: Aborting shader creation.\n");
return;
}
//Writes in the engine log to report successful compilation
WriteEngineLog("Vertex shader successfully compiled!\n");
//Creates a fragment shader
GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
WriteEngineLog("Blank fragment shader created\n");
glShaderSource(fragmentShader, 1, &fragment_shader, NULL);
WriteEngineLog("Fragment shader given source\n");
glCompileShader(fragmentShader);
//Compilation error checking begions here
GLint isFragmentCompiled = 0;
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &isFragmentCompiled);
if (isFragmentCompiled == GL_FALSE)
{
//Gets the length of the log
GLint maxLength = 0;
glGetShaderiv(vertexShader, GL_INFO_LOG_LENGTH, &maxLength);
//Creates and writes the log to the errorLog
GLchar* errorLog = (GLchar*)malloc(maxLength);
glGetShaderInfoLog(vertexShader, maxLength, &maxLength, &errorLog[0]);
WriteEngineLog("ERROR: Fragment shader failed to compile\n");
printf("%s\n", (char*)errorLog);
//Frees the errorLog allocation
free(errorLog);
//Deletes the shader so it doesn't leak
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
WriteEngineLog("ERROR: Aborting shader creation.\n");
return;
}
//Writes in the engine log to report successful compilation
WriteEngineLog("Fragment shader successfully compiled!\n");
//Creates the final shader product
this->Program = glCreateProgram();
WriteEngineLog("Blank shader created\n");
glAttachShader(this->Program, vertexShader);
WriteEngineLog("Attatched Vertex shader to the shader\n");
glAttachShader(this->Program, fragmentShader);
WriteEngineLog("Attatched Fragment shader to the shader\n");
glLinkProgram(this->Program);
/*GLint isLinked = 0;
glGetProgramiv(this->Program, GL_LINK_STATUS, (int*)&isLinked);
if (isLinked == GL_FALSE)
{
//Gets the lngth of the shader info log
GLint maxLength = 0;
glGetProgramInfolog(ShaderOut, GL_INFO_LOG_LENGTH, &maxLength);
//Gets and puts the actual log into a GLchar
std::vector<GLchar> infoLog(maxLength);
glGetProgramInfoLog(ShaderOut, maxLength, &maxLength, &infoLog[0]);
//Deletes programs and shaders so they don't leak
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
WriteEngineLog((string)infoLog);
return;
}*/
WriteEngineLog("Shader linked!\n\n");
WriteEngineLog("INFO: Shader created!\n");
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
}
void Shader::Use()
{
glUseProgram(this->Program);
}
Here is the quad.cpp code:
#include "../headers/Default.hpp"
Quad::Quad()
{
position.x = 0;
position.y = 0;
scale.x = 1;
scale.y = 1;
VertexArray = CreateVertexArray();
}
//Quad constructor with one arg
Quad::Quad(Vector2 Position)
{
position = Position;
VertexArray = CreateVertexArray();
}
//Quad constructor with two args
Quad::Quad(Vector2 Position, Vector2 Scale)
{
position = Position;
scale = Scale;
VertexArray = CreateVertexArray();
}
GLuint Quad::CreateVertexArray()
{
GLfloat Vertices[] =
{
//VERTICES //COLORS //TEXCOORDS
0.5f, 0.5f, 0.0f, 0.0f, 0.0f, 0.0f, //1.0f, 1.0f, //Top Right Vertice
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, //1.0f, 0.0f, //Top Left Vertice
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f//, 0.0f, 0.0f //Bottom Left Vertice
};
GLuint vbo, vao;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
//Copy vertices into the buffer
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
//Attribute Pointers
//Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
//Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
//Unbinds the VAO
glBindVertexArray(0);
return vao;
}
//Quad debug drawing function
void Quad::DebugDraw()
{
//Use the default shader
DefaultShader.Use();
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glBindVertexArray(VertexArray);
// draw points 0-3 from the currently bound VAO with current in-use shader
glDrawArrays(GL_TRIANGLES, 0, 3);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); //CAUSING A CRASH AT THE MOMENT
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
//Unbinds the VAO
glBindVertexArray(0);
}
Here is the sprite.cpp code:
#include "../headers/Default.hpp"
Sprite::Sprite()
{
position.x = 0;
position.y = 0;
}
Sprite::Sprite(Texture tex)
{
defaultTexture = tex;
currentTexture = tex;
}
Sprite::Sprite(Texture tex, Vector2 pos)
{
defaultTexture = tex;
currentTexture = tex;
position = pos;
}
Sprite::Sprite(Texture tex, Vector2 pos, Vector2 Scale)
{
defaultTexture = tex;
currentTexture = tex;
position = pos;
scale = Scale;
}
void Sprite::Draw()
{
//Binds the default shader again
glBindVertexArray(VertexArray);
//Use the default shader
DefaultShader.Use();
// draw points 0-3 from the currently bound VAO with current in-use shader
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
}
Here is my vertex shader and fragment shader code (In order):
//Vertex Shader
#version 330 core
layout (location = 0) in vec3 position; // The position variable has attribute position 0
layout (location = 1) in vec3 color;
out vec3 ourColor;
void main()
{
gl_Position = vec4(position, 1.0f); // See how we directly give a vec3 to vec4's constructor
ourColor = color;
}
//Fragment shader
#version 330 core
in vec3 ourColor;
out vec4 color;
void main()
{
color = ourColor;
}
And I'm getting a warning that my shader did not compile... error is that there is a non ascii character at line ZERO of the vertex shader.
I had exactly the same error. This is almost certainly due to Unicode Byte Order Marks, or similar unprinted characters generated by text editors.
These are common in the first characters of a unicode file, but can occur anywhere.
You can programmatically strip these from your shader source strings before compiling, but this could be costly if you are compiling many shaders. See the above link for the data to strip if you go this route.
An alternative is simply to keep the files in ANSI/ASCII format. I am sure most text editors have the facility to set/convert formats, but I will give Notepad++ as an example since it's what I use to edit GLSL:
Open the GLSL file.
Encoding -> Convert to ANSI. (Note that merely hitting "Encode in ANSI" will not strip the characters)
Save the file.
The above should also strip other characters prone to confusing GLSL parsers (and C/C++ in general).
You could inform the user(/developer) the files are in an incorrect format on load in debug builds.

How to draw multiple objects by GLM/OpenGL?

I found a lot cases to create multiple objects by one single vertices array. And then through transform to get different ones, such as Draw 2 cubes in OpenGL using GLM and Cant draw multiple objects in opengl.
But what I want is to create complete diff objects by complete diff vertices arrays. A typical case is http://www.spacesimulator.net/wiki/index.php?title=Tutorials:Matrices_%28OpenGL3.3%29. How to implement it by GLM? I tried to find some example codes.
Next code are a complete example to draw 2 cubes using OpenGL and Linux Debian 9. Also, has a class to see pressed buton and positions if click the mouse. All sources and Makefile are included.
To compile: copy all files with correct name in same directory and
type make
To execute: type ./start
All needed libraries are installed with next commands:
apt-get install libglew-dev
apt-get install freeglut3-dev
apt-get install libglm-dev
The example are created from multiple examples from Internet and changed to Object Oriented code to clarify.
I hope it's useful.
File: cube.hpp
#ifndef CUBE_H
#define CUBE_H
#include <stdio.h>
#include <stdlib.h>
#include <glm/gtc/type_ptr.hpp>
#include <GL/glew.h>
#include <GL/glut.h>
class Cube{
private:
GLuint vboCubeVertex, vboCubeColors, vboCubeFaces;
GLint attribute_coord3d, attribute_v_color;
public:
int init_resources(GLuint shaderProgram);
void draw(GLint uniform_mvp, glm::mat4 mvp);
void disable();
void deleteBuffers();
};
#endif
File: cube.cpp
#include "cube.hpp"
int Cube::init_resources(GLuint shaderProgram)
{
fprintf(stderr, "init_resources\n");
//**********************************************************
// VERTEX **************************************************
//**********************************************************
// Each vertex has the format x,y,z
GLfloat arrCubeVertex[] = {
// front
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
// back
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
-1.0, 1.0, -1.0,
};
//**********************************************************
// COLORS OF EACH VERTEX ***********************************
//**********************************************************
GLfloat arrCubeColors[] = {
// front colors
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
1.0, 1.0, 1.0,
// back colors
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
1.0, 1.0, 1.0,
};
// Faces are squares, each square are maked wit two triangles.
// See document OpenGL_Charly.pdf.
GLushort arrCubeFaces[] = {
// front
0, 1, 2,
2, 3, 0,
// right
1, 5, 6,
6, 2, 1,
// back
7, 6, 5,
5, 4, 7,
// left
4, 0, 3,
3, 7, 4,
// bottom
4, 5, 1,
1, 0, 4,
// up
3, 2, 6,
6, 7, 3,
};
glGenBuffers(1, &vboCubeVertex);
glBindBuffer(GL_ARRAY_BUFFER, vboCubeVertex);
glBufferData(GL_ARRAY_BUFFER, sizeof(arrCubeVertex), arrCubeVertex, GL_STATIC_DRAW);
// Las mismas operaciones que hicimos con los vertices, las hacemos ahora con los colores.
glGenBuffers(1, &vboCubeColors);
glBindBuffer(GL_ARRAY_BUFFER, vboCubeColors);
glBufferData(GL_ARRAY_BUFFER, sizeof(arrCubeColors), arrCubeColors, GL_STATIC_DRAW);
// Ahora lo mismo pero con las caras.
glGenBuffers(1, &vboCubeFaces);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboCubeFaces);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(arrCubeFaces), arrCubeFaces, GL_STATIC_DRAW);
//**********************************************************
attribute_coord3d = glGetAttribLocation(shaderProgram, "coord3d");
if (attribute_coord3d == -1)
{
fprintf(stderr, "Could not bind attribute %s\n", "coord3d");
return 0;
}
attribute_v_color = glGetAttribLocation(shaderProgram, "v_color");
if (attribute_v_color == -1)
{
fprintf(stderr, "Could not bind attribute %s\n", "v_color");
return 0;
}
return 1;
}
void Cube::draw(GLint uniform_mvp, glm::mat4 mvp){
glUniformMatrix4fv(uniform_mvp, 1, GL_FALSE, glm::value_ptr(mvp));
// Cube vertices
glEnableVertexAttribArray(attribute_coord3d);
glBindBuffer(GL_ARRAY_BUFFER, vboCubeVertex);
// Attribute, elements per vertex (x,y,z), the type of each element, take our values as-is, no extra data between each position, offset of first element
glVertexAttribPointer(attribute_coord3d, 3, GL_FLOAT, GL_FALSE, 0, 0);
// Cube colors
glEnableVertexAttribArray(attribute_v_color);
glBindBuffer(GL_ARRAY_BUFFER, vboCubeColors);
// Attribute, elements per vertex (R,G,B), the type of each element, take our values as-is, no extra data between each position, offset of first element
glVertexAttribPointer(attribute_v_color, 3, GL_FLOAT, GL_FALSE, 0, 0);
/* Push each element in buffer_vertices to the vertex shader */
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboCubeFaces);
int size;
glGetBufferParameteriv(GL_ELEMENT_ARRAY_BUFFER, GL_BUFFER_SIZE, &size);
glDrawElements(GL_TRIANGLES, size/sizeof(GLushort), GL_UNSIGNED_SHORT, 0);
}
void Cube::disable(){
glDisableVertexAttribArray(attribute_coord3d);
glDisableVertexAttribArray(attribute_v_color);
}
void Cube::deleteBuffers(){
glDeleteBuffers(1, &vboCubeVertex);
glDeleteBuffers(1, &vboCubeColors);
glDeleteBuffers(1, &vboCubeFaces);
}
File: mouse.hpp
#ifndef MOUSE_H
#define MOUSE_H
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
struct Point {
GLint x;
GLint y;
};
class Mouse{
private:
Point p1, p2;
public:
int show(int button, int state, int x, int y);
};
#endif
File: mouse.cpp
#include "mouse.hpp"
int Mouse::show(int button, int state, int x, int y)
{
if(button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) {
printf("%s\n", "LEFT DOWN");
}
else if(button == GLUT_LEFT_BUTTON && state == GLUT_UP) {
printf("%s\n", "LEFT UP");
}
printf("mouse X: %d, Y: %d\n", x, y);
}
File: shader_utils.hpp
#ifndef _CREATE_SHADER_H
#define _CREATE_SHADER_H
#include <GL/glew.h>
char* file_read(const char* filename);
void print_log(GLuint object);
GLuint create_shader(const char* filename, GLenum type);
GLuint create_program(const char* vertexfile, const char *fragmentfile);
GLuint create_gs_program(const char* vertexfile, const char *geometryfile, const char *fragmentfile, GLint input, GLint output, GLint vertices);
GLint get_attrib(GLuint program, const char *name);
GLint get_uniform(GLuint program, const char *name);
#endif
File: shader_utils.cpp
#include <stdio.h>
#include <stdlib.h>
#include "shader_utils.hpp"
char* file_read(const char* filename)
{
FILE* in = fopen(filename, "rb");
if (in == NULL) return NULL;
int res_size = BUFSIZ;
char* res = (char*)malloc(res_size);
int nb_read_total = 0;
while (!feof(in) && !ferror(in)) {
if (nb_read_total + BUFSIZ > res_size) {
if (res_size > 10*1024*1024) break;
res_size = res_size * 2;
res = (char*)realloc(res, res_size);
}
char* p_res = res + nb_read_total;
nb_read_total += fread(p_res, 1, BUFSIZ, in);
}
fclose(in);
res = (char*)realloc(res, nb_read_total + 1);
res[nb_read_total] = '\0';
return res;
}
void print_log(GLuint object)
{
GLint log_length = 0;
if (glIsShader(object))
glGetShaderiv(object, GL_INFO_LOG_LENGTH, &log_length);
else if (glIsProgram(object))
glGetProgramiv(object, GL_INFO_LOG_LENGTH, &log_length);
else {
fprintf(stderr, "printlog: Not a shader or a program\n");
return;
}
char* log = (char*)malloc(log_length);
if (glIsShader(object))
glGetShaderInfoLog(object, log_length, NULL, log);
else if (glIsProgram(object))
glGetProgramInfoLog(object, log_length, NULL, log);
fprintf(stderr, "%s", log);
free(log);
}
GLuint create_shader(const char* filename, GLenum type)
{
const GLchar* source = file_read(filename);
if (source == NULL) {
fprintf(stderr, "Error opening %s: ", filename); perror("");
return 0;
}
GLuint res = glCreateShader(type);
const GLchar* sources[] = {
// Define GLSL version
#ifdef GL_ES_VERSION_2_0
"#version 100\n"
#else
"#version 120\n"
#endif
,
// GLES2 precision specifiers
#ifdef GL_ES_VERSION_2_0
// Define default float precision for fragment shaders:
(type == GL_FRAGMENT_SHADER) ?
"#ifdef GL_FRAGMENT_PRECISION_HIGH\n"
"precision highp float; \n"
"#else \n"
"precision mediump float; \n"
"#endif \n"
: ""
// Note: OpenGL ES automatically defines this:
// #define GL_ES
#else
// Ignore GLES 2 precision specifiers:
"#define lowp \n"
"#define mediump\n"
"#define highp \n"
#endif
,
source };
glShaderSource(res, 3, sources, NULL);
free((void*)source);
glCompileShader(res);
GLint compile_ok = GL_FALSE;
glGetShaderiv(res, GL_COMPILE_STATUS, &compile_ok);
if (compile_ok == GL_FALSE) {
fprintf(stderr, "%s:", filename);
print_log(res);
glDeleteShader(res);
return 0;
}
return res;
}
GLuint create_program(const char *vertexfile, const char *fragmentfile) {
GLuint program = glCreateProgram();
GLuint shader;
if(vertexfile) {
shader = create_shader(vertexfile, GL_VERTEX_SHADER);
if(!shader)
return 0;
glAttachShader(program, shader);
}
if(fragmentfile) {
shader = create_shader(fragmentfile, GL_FRAGMENT_SHADER);
if(!shader)
return 0;
glAttachShader(program, shader);
}
glLinkProgram(program);
GLint link_ok = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &link_ok);
if (!link_ok) {
fprintf(stderr, "glLinkProgram:");
print_log(program);
glDeleteProgram(program);
return 0;
}
return program;
}
#ifdef GL_GEOMETRY_SHADER
GLuint create_gs_program(const char *vertexfile, const char *geometryfile, const char *fragmentfile, GLint input, GLint output, GLint vertices) {
GLuint program = glCreateProgram();
GLuint shader;
if(vertexfile) {
shader = create_shader(vertexfile, GL_VERTEX_SHADER);
if(!shader)
return 0;
glAttachShader(program, shader);
}
if(geometryfile) {
shader = create_shader(geometryfile, GL_GEOMETRY_SHADER);
if(!shader)
return 0;
glAttachShader(program, shader);
glProgramParameteriEXT(program, GL_GEOMETRY_INPUT_TYPE_EXT, input);
glProgramParameteriEXT(program, GL_GEOMETRY_OUTPUT_TYPE_EXT, output);
glProgramParameteriEXT(program, GL_GEOMETRY_VERTICES_OUT_EXT, vertices);
}
if(fragmentfile) {
shader = create_shader(fragmentfile, GL_FRAGMENT_SHADER);
if(!shader)
return 0;
glAttachShader(program, shader);
}
glLinkProgram(program);
GLint link_ok = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &link_ok);
if (!link_ok) {
fprintf(stderr, "glLinkProgram:");
print_log(program);
glDeleteProgram(program);
return 0;
}
return program;
}
#else
GLuint create_gs_program(const char *vertexfile, const char *geometryfile, const char *fragmentfile, GLint input, GLint output, GLint vertices) {
fprintf(stderr, "Missing support for geometry shaders.\n");
return 0;
}
#endif
GLint get_attrib(GLuint program, const char *name) {
GLint attribute = glGetAttribLocation(program, name);
if(attribute == -1)
fprintf(stderr, "Could not bind attribute %s\n", name);
return attribute;
}
GLint get_uniform(GLuint program, const char *name) {
GLint uniform = glGetUniformLocation(program, name);
if(uniform == -1)
fprintf(stderr, "Could not bind uniform %s\n", name);
return uniform;
}
File: start.cpp
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "shader_utils.hpp"
#include "mouse.hpp"
#include "cube.hpp"
int scrWidth = 600;
int scrHeight = 600;
GLuint shaderProgram;
GLint uniform_mvp;
Cube cube1;
Cube cube2;
glm::mat4 mvpCube1;
glm::mat4 mvpCube2;
Mouse theMouse;
void onIdle()
{
glm::vec3 rotationCube1(0, 1, 0); // Rotate cube 1 over Y axis
float angleCube1 = glutGet(GLUT_ELAPSED_TIME) / 1000.0 * 5; // 5° per second
glm::vec3 translationCube1 (0.0, 0.0, -2.0); // Translate cube 1 over Z axis
glm::vec3 rotationCube2(1, 0, 0); // Rotate cube 2 over X axis
float angleCube2 = glutGet(GLUT_ELAPSED_TIME) / 1000.0 * 3; // 3° per second
glm::vec3 translationCube2 (1.0, 0.0, 0.0); // Translate cube 2 over X axis
// Frustum parameters
float angleVision = 45.0f; // Also called Fovy
float aspect = 1.0f * scrWidth / scrHeight;
float zNear = 0.1f;
float zFar = 10.f;
// Camera parameters
glm::vec3 cameraEye(0.0, 2.0, -10.0);
glm::vec3 cameraCenter(0.0, 0.0, -5.0);
glm::vec3 cameraUp(0.0, 1.0, 0.0);
// Method lookAt has 3 parameters: eye, center, up
glm::mat4 view = glm::lookAt(cameraEye, cameraCenter, cameraUp);
// Create the projection matrix
glm::mat4 projection = glm::perspective(angleVision, aspect, zNear, zFar);
glm::mat4 modelCube1 = glm::mat4(); // Carga model con la identity matrix
modelCube1 = glm::translate(modelCube1, translationCube1); // El modelo queda trasladado.
modelCube1 = glm::rotate(modelCube1, angleCube1, rotationCube1); // El modelo ahora queda trasladado y rotado.
mvpCube1 = projection * view * modelCube1;
glm::mat4 modelCube2 = glm::mat4(); // Carga model con la identity matrix
modelCube2 = glm::translate(modelCube2, translationCube2); // El modelo queda trasladado.
modelCube2 = glm::rotate(modelCube2, angleCube2, rotationCube2); // El modelo ahora queda trasladado y rotado.
mvpCube2 = projection * view * modelCube2;
glUseProgram(shaderProgram);
glutPostRedisplay();
}
void onDisplay()
{
glClearColor(0.0, 0.0, 0.0, 1.0); // Set color (0, 0, 0 = Black)
// R,G,B,A
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glUseProgram(shaderProgram); // Program with loaded shaders
cube1.draw(uniform_mvp, mvpCube1);
cube1.disable();
cube2.draw(uniform_mvp, mvpCube2);
cube2.disable();
glutSwapBuffers();
}
void onReshape(int width, int height)
{
fprintf(stderr, "onReshape\n");
scrWidth = width;
scrHeight = height;
glViewport(0, 0, scrWidth, scrHeight);
}
void free_resources()
{
fprintf(stderr, "free_resources\n");
glDeleteProgram(shaderProgram);
cube1.deleteBuffers();
cube2.deleteBuffers();
}
void myMouseFunc(int button, int state, int x, int y)
{
theMouse.show(button, state, x, y);
}
int init(){
uniform_mvp = glGetUniformLocation(shaderProgram, "mvp");
if (uniform_mvp == -1)
{
fprintf(stderr, "Could not bind uniform %s\n", "mvp");
return 0;
}
return 1;
}
int main(int argc, char* argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA|GLUT_ALPHA|GLUT_DOUBLE|GLUT_DEPTH);
glutInitWindowSize(scrWidth, scrHeight);
glutCreateWindow("Two Cubes");
GLenum glew_status = glewInit();
if (glew_status != GLEW_OK)
{
fprintf(stderr, "Error: %s\n", glewGetErrorString(glew_status));
return 1;
}
if (!GLEW_VERSION_2_0)
{
fprintf(stderr, "Error: your graphic card does not support OpenGL 2.0\n");
return 1;
}
GLint link_ok = GL_FALSE;
GLuint vs, fs;
if ((vs = create_shader("cube_vertex.glsl", GL_VERTEX_SHADER)) == 0) return 0;
if ((fs = create_shader("cube_fragment.glsl", GL_FRAGMENT_SHADER)) == 0) return 0;
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vs);
glAttachShader(shaderProgram, fs);
glLinkProgram(shaderProgram);
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &link_ok);
if (!link_ok)
{
fprintf(stderr, "glLinkProgram:");
print_log(shaderProgram);
return 0;
}
cube1 = Cube();
cube2 = Cube();
if (init() && cube1.init_resources(shaderProgram) && cube2.init_resources(shaderProgram))
{
glutMouseFunc(myMouseFunc);
glutDisplayFunc(onDisplay); // Set the display function for the current window.
glutReshapeFunc(onReshape); // Set the reshape function for the current window.
glutIdleFunc(onIdle); // Set the global idle callback (perform background proccessing)
// Enable server side capabilities
glEnable(GL_BLEND); // GL_BLEND: If enabled, blend the computed fragment color values with the
// values in the color buffers. See glBlendFunc.
glEnable(GL_DEPTH_TEST); // GL_DEPTH_TEST: If enabled, do depth comparisons and update the depth buffer.
// Note that even if the depth buffer exists and the depth mask is non-zero,
// the depth buffer is not updated if the depth test is disabled. See glDepthFunc
// and glDepthRange.
//glDepthFunc(GL_LESS);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glutMainLoop();
}
free_resources();
return 0;
}
File: cube_fragment.glsl
varying vec3 f_color;
void main(void) {
gl_FragColor = vec4(f_color.x, f_color.y, f_color.z, 1.0);
}
File: cube_vertex.glsl
attribute vec3 v_color;
uniform mat4 mvp;
varying vec3 f_color;
void main(void)
{
gl_Position = mvp * vec4(coord3d, 1.0);
f_color = v_color;
}
File: Makefile
LDLIBS=-lglut -lGLEW -lGL -lm
all: start
clean:
rm -f *.o start
start: mouse.o shader_utils.o cube.o
.PHONY: all clean
The result are two rotating cubes:
Firstly, GLM is a maths library that provides vectors, matrices etc that behave and look like (as much as possible) vectors and matrices used in GLSL - that is, shader code. GLM is useful when writing applications that use OpenGL, but it actually isn't necessary, and it's got nothing to do with rendering objects.
If you want to draw different objects using different vertex arrays, read a tutorial such as the one mentioned or check out http://www.arcsynthesis.org/gltut/. See how it loads data into a vertex array and renders it, then do this twice for your different arrays. Bear in mind that switching vertex arrays is slow - you might want to load multiple objects into one vertex buffer and then use a render command with index offsets (e.g. glMultiDrawElementsBaseVertex).

Implementing a fragment shader that uses a uniform Sampler2D (lwjgl)

I am unable to successfully run a shader, and I seem to be missing some step to make it all work. I end up with the error of:
Exception in thread "main" org.lwjgl.opengl.OpenGLException: Invalid operation (1282)
at org.lwjgl.opengl.Util.checkGLError(Util.java:59)
at org.lwjgl.opengl.GL20.glUniform1i(GL20.java:374)
at sprites.Sprite.draw(Sprite.java:256)
at gui.Game.drawFrame(Game.java:238)
at gui.Game.gameLoop(Game.java:205)
at gui.Game.startGame(Game.java:244)
at tests.simple.SimpleShader.main(SimpleShader.java:36)
My initialization begins with:
int frag = FilterLoader.createShader("/tests/resources/shaders/grayscale.frag", GL20.GL_FRAGMENT_SHADER);
and the createShader method looks like the following:
int shader = GL20.glCreateShader(type);
if(shader == 0)
return 0;
StringBuilder code = new StringBuilder("");
String line;
try
{
String path = FilterLoader.class.getResource(filename).getPath();
BufferedReader reader = new BufferedReader(new FileReader(path));
while((line = reader.readLine()) != null)
{
code.append(line + "\n");
}
}
catch(Exception e)
{
e.printStackTrace();
System.err.println("Error reading in " + type + " shader");
return 0;
}
GL20.glShaderSource(shader, code);
GL20.glCompileShader(shader);
return shader;
I then attach the shader to the specific Sprite with:
two.addFragmentShader(frag); //two is a Sprite
which is just simply:
fragmentShader = fragment_shader;
GL20.glAttachShader(shader, fragment_shader);
GL20.glLinkProgram(shader);
The int shader has been previously initialized in the Sprites constructor with:
shader = GL20.glCreateProgram();
This was a previous problem, but no longer obviously. Now I get to where the actual error occured, in the Sprites (two in this case) draw method, which looks like so:
if(true)
{
GL20.glUseProgram(shader);
}
glPushMatrix();
glActiveTexture(GL13.GL_TEXTURE0);
imageData.getTexture().bind();
//The line below is where the error occurs.
GL20.glUniform1i(fragmentShader, GL13.GL_TEXTURE0);
int tx = (int)location.x;
int ty = (int)location.y;
glTranslatef(tx, ty, location.layer);
float texture_X = ((float)which_column/(float)columns);
float texture_Y = ((float)which_row/(float)rows);
float texture_XplusWidth = ((float)(which_column+wide)/(float)columns);
float texture_YplusHeight = ((float)(which_row+tall)/(float)rows);
glBegin(GL_QUADS);
{
GL11.glTexCoord2f(texture_X, texture_Y);
glVertex2f(0, 0);
GL11.glTexCoord2f(texture_X, texture_YplusHeight);
glVertex2f(0, getHeight());
GL11.glTexCoord2f(texture_XplusWidth, texture_YplusHeight);
glVertex2f(getWidth(), getHeight());
GL11.glTexCoord2f(texture_XplusWidth, texture_Y);
glVertex2f(getWidth(), 0);
}
glEnd();
GL20.glUseProgram(0);
glPopMatrix();
And the error occurs at this line:
GL20.glUniform1i(fragmentShader, GL13.GL_TEXTURE0);
And for reference my shader:
// simple fragment shader
uniform sampler2D texture;
void main()
{
vec4 color, texel;
color = gl_Color;
texel = texture2DRect(texture, gl_TexCoord[0].xy);
color *= texel;
float gray = dot(color.rgb, vec3(0.299, 0.587, 0.144));
gl_FragColor = vec4(gray, gray, gray, color.a);
}
I've gone through the tutorials, read about the error, and I can't figure out what step I have missed.
GL20.glUniform1i(fragmentShader, GL13.GL_TEXTURE0);
This is wrong. The first parameter of glUniform1i is the uniform location, which you can get with glGetUniformLocation.
The second parameter is an integer, but for texture sampler, you need to pass the texture unit number (0, 1, 2, etc), and bind the texture to that texture unit, for example:
glUseProgram(program);
int loc = glGetUniformLocation(program, "texture");
glUniform1i(loc, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texId);
Then it should work.