I have drawn a box. I added the basic class that deals vao and vbo.
class BasicObject_V2
{
public:
BasicObject_V2(GLuint typeOfPrimitives = 0){
this->typeOfPrimitives = typeOfPrimitives;
switch (this->typeOfPrimitives){
case GL_QUADS: numVertsPerPrimitive = 4; break;
}}
void allocateMemory(){
glGenVertexArrays(1, &vaoID);
glBindVertexArray(vaoID);
glGenBuffers(1, &vboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertexAttributes), &vertices[0], GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 48, (const void*)0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 48, (const void*)12);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, 48, (const void*)24);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 48, (const void*)40);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
}
GLuint vboID;
GLuint vaoID;
GLuint typeOfPrimitives;
GLuint numVertsPerPrimitive;
int getNumberOfVertices(){
return vertices.size();
}
struct vertexAttributes{
glm::vec3 pos;
glm::vec3 normal;
glm::vec4 color;
glm::vec2 texCoord;
};
std::vector<vertexAttributes> vertices;
};
Here is the my Box-class.
class Box_V2 : public BasicObject_V2 {
public:
Box_V2(float width = 1, float height = 1, float length = 1) : BasicObject_V2(GL_QUADS) {
float wHalf = width / 2.0f;
float hHalf = height / 2.0f;
float lHalf = length / 2.0f;
vertexAttributes va;
glm::vec3 corners[8];
corners[0] = glm::vec3(-wHalf, -hHalf, lHalf);
corners[1] = (...)
glm::vec3 normals[6];
normals[0] = glm::vec3(0, 0, 1); // front
//(...)
// FRONT
va.pos = corners[0];
va.normal = normals[0];
va.texCoord = glm::vec2(0.0, 0.0);
vertices.push_back(va);
//...
allocateMemory();
}
};
Also I have set the texture for it (acoording to the opengl superbible example), but it does work at all.
static GLubyte checkImage[128][128][4];
void makeCheckImage(void)
{
int i, j, c;
for (i = 0; i<128; i++) {
for (j = 0; j<128; j++) {
c = ((((i & 0x8) == 0) ^ ((j & 0x8)) == 0)) * 255;
checkImage[i][j][0] = (GLubyte)c;
checkImage[i][j][1] = (GLubyte)c;
checkImage[i][j][2] = (GLubyte)c;
checkImage[i][j][3] = (GLubyte)255;
}}}
void init(){
glClearColor(0.7, 0.4, 0.6, 1);
//Create a Texture
makeCheckImage();
//Texure
glGenTextures(4, tex);
//glCreateTextures(GL_TEXTURE_2D, 4, tex);//4.5
glTextureStorage2D(tex[0], 0, GL_RGBA32F, 2, 2);
glBindTexture(GL_TEXTURE_2D, tex[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGBA, GL_FLOAT, checkImage);
//glTextureSubImage2D(tex[0], 0, 0, 0, 2, 2, GL_RGBA, GL_FLOAT, checkImage);//4.5
//load the shader
programID = loadShaders("vertex.vsh", "fragment.fsh");
glUseProgram(programID);
//create a box
box_v2 = new Box_V2();
glBindTexture(GL_TEXTURE_2D, tex[0]);
}}
void drawing(BasicObject* object){
glBindVertexArray(object->vaoID);
glDrawArrays(object->typeOfPrimitives, 0, object->getNumberOfVertices() * object->numVertsPerPrimitive);
glBindVertexArray(0);
}
This is my shaders:
VS:
#version 450 core
layout(location = 0) in vec3 pos;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
layout(location = 3) in vec2 texCoord;
uniform mat4 rotate;(...)
out vec4 v_normal;
out vec4 v_color;
out vec4 v_pos;
out vec2 v_texCoord;
void main(){
v_normal = rotate*vec4(normal,1);//*model;
v_color=color;
v_texCoord=texCoord;
gl_Position = rotate*scale*trans*vec4(pos.xyz, 1);
}
And FS:
#version 450 core
in vec4 v_color;
in vec2 v_texCoord;
uniform sampler2D ourTexture;
out vec4 outColor;
void main(){
//outColor=texelFetch(ourTexture, ivec2(gl_FragCoord.xy), 0); //4.5
outColor=texture(ourTexture,v_texCoord*vec2(3.0, 1.0));
}
Now it draws a black box without any texture on it. I'm new in OpenGL and I can't find where is the problem.
There are a lot of things wrong with your code. It looks like you tried to write it one way, then tried to write it another way, leading to some kind of Frankenstein's code where it's not really one way or the other.
glGenTextures(4, tex);
//glCreateTextures(GL_TEXTURE_2D, 4, tex);//4.5
glTextureStorage2D(tex[0], 0, GL_RGBA32F, 2, 2);
This is a perfect example. You use the non-DSA glGenTextures call, but you immediately turn around and use the DSA glTextureStorage2D call on it. You cannot do that. Why? Because tex[0] hasn't been created yet.
glGenTextures only allocates the name for the texture, not its state data. Only by binding the texture does it get contents. That's why DSA introduced the glCreate* functions; they allocate both the name and state for the object. This is also why glCreateTextures takes a target; that target is part of the texture object's state.
In short, the commented out call was correct all along. Looking at your code as written, the glTextureStorage2D call fails with an OpenGL error (FYI: turn on KHR_debug to see when errors happen).
So let's look at what your code would look like if we take out the failure parts:
glGenTextures(4, tex);
glBindTexture(GL_TEXTURE_2D, tex[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGBA, GL_FLOAT, checkImage);
There are two problems here. First, as BDL mentioned, your data is described incorrectly. You're passing GLubytes, but you told OpenGL you were passing GLfloats. Don't lie to OpenGL.
Second, there's the internal format. Because you used an unsized internal format (and because you're not in OpenGL ES land), the format your implementation chooses will be unsigned normalized. It won't be a 32-bit floating point format. If you want one of those, you must explicitly ask for it, with GL_RGBA32F.
Furthermore, because you allocated mutable storage for your texture (ie: because glTextureStorage2D failed), you now have a problem. You only allocated one mipmap level, but the default filtering mode will try to fetch from multiple mipmaps. And you never set the texture's mipmap range to only sample from the first. So your texture is not complete. An immutable storage texture would have made the mipmap range match what you allocated, but mutable storage textures aren't so nice.
You should always set your mipmap range when creating mutable storage textures:
glTexParameteri(GL_TEXTURE2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE2D, GL_TEXTURE_MAX_LEVEL, 0); //It's a closed range.
Lastly, you bind the texture to unit 0 for rendering (mostly by accident, since you never call glActiveTexture). But you never bother to inform your shader of this, by setting the ourTexture uniform value to 0.
There are several Problems with your code:
Data type doesn't match data
The data comes in an unsigned byte format (GLubyte checkImage[128][128][4]) with a size of of 4 bytes per pixel (1 byte per channel), but you tell OpenGL that it is in GL_FLOAT format, which would require 16 bytes per pixel (4 bytes per channel).
Data storage
See answer from 246Nt.
Mipmaps/Filters
Per default OpenGL has the minification filter set to GL_LINEAR_MIPMAP_LINEAR which requries the user to supply valid mipmaps for the texture. One can either change this filter to GL_LINEAR or GL_NEAREST which do not require mipmaps (see glTexParameteri), or generate mipmaps (glGenerateMipmaps).
Related
I generate a PointCloud in my program, and now, I want to be able to click on a point in this point cloud rendered to my screen using OpenGL.
In order to do so, I used the trick of giving to each pixel in an offscreen render a colour based on its index in the VBO. I use the same camera for my offscreen render and my onscreen render so they move together, and when I click, I get values of my offscreen render to retrieve the position in the VBO to get the point I clicked on. This is the theory since when I click, I have only (0,0,0). I believe that means my FBO is not well renderer but I'm not sure whether it is that or if the problem comes from somewhere else...
So here are the steps. clicFBO is the FBO I'm using for offscreen render, and clicTextureColorBuf is the texture in which I write in the FBO
glGenFramebuffers(1, &clicFBO);
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
glGenTextures(1, &clicTextureColorBuf);
glBindTexture(GL_TEXTURE_2D, clicTextureColorBuf);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, clicTextureColorBuf, 0);
GLenum DrawBuffers[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers(1, DrawBuffers);
After that, I wrote a shader that gives to each point the color of its index in the VBO...
std::vector<cv::Point3f> reconstruction3D; //Will contain the position of my points
std::vector<float> indicesPointsVBO; //Will contain the indexes of each point
for (int i = 0; i < pts3d.size(); ++i) {
reconstruction3D.push_back(pts3d[i].pt3d);
colors3D.push_back(pt_tmp);
indicesPointsVBO.push_back(((float)i / (float)pts3d.size() ));
}
GLuint clicVAO, clicVBO[2];
glGenVertexArrays(1, &clicVAO);
glGenBuffers(2, &clicVBO[0]);
glBindVertexArray(clicVAO);
glBindBuffer(GL_ARRAY_BUFFER, clicVBO[0]);
glBufferData(GL_ARRAY_BUFFER, reconstruction3D.size() * sizeof(cv::Point3f), &reconstruction3D[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glEnable(GL_PROGRAM_POINT_SIZE);
glBindBuffer(GL_ARRAY_BUFFER, clicVBO[1]);
glBufferData(GL_ARRAY_BUFFER, indicesPointsVBO.size() * sizeof(float), &indicesPointsVBO[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
and the vertex shader:
layout (location = 0) in vec3 pos;
layout (location = 1) in float col;
out float Col;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform int pointSize;
void main()
{
gl_PointSize = pointSize;
gl_Position = projection * view * model * vec4(pos, 1.0);
Col = col;
}
And the Fragment:
#version 330 core
layout(location = 0) out vec4 FragColor;
in float Col;
void main()
{
FragColor = vec4(Col, Col, Col ,1.0);
}
And this is how I render this texture:
glm::mat4 view = camera.GetViewMatrix();
glm::mat4 projection = glm::perspective(glm::radians(camera.Zoom), (float)SCR_WIDTH / (float)SCR_HEIGHT, 1.0f, 100.0f);
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
clicShader.use();
glDisable(GL_DEPTH_TEST);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
clicShader.setMat4("projection", projection);
clicShader.setMat4("view", view);
model = glm::mat4();
clicShader.setMat4("model", model);
clicShader.setInt("pointSize", pointSize);
glBindVertexArray(clicVAO);
glDrawArrays(GL_POINTS, 0, (GLsizei)reconstruction3D.size());
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And then, when I click, I Use this piece of Code:
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
glReadBuffer(GL_COLOR_ATTACHMENT0);
int width = 11, height = 11;
std::array<GLfloat, 363> arry{ 1 };
glReadPixels(Xpos - 5, Ypos - 5, width, height, GL_RGB, GL_UNSIGNED_BYTE, &arry);
for (int i = 0; i < 363; i+=3) { // It's 3 time the same number anyways for each number
std::cout << arry[i] << " "; // It gives me only 0's
}
std::cout << std::endl << std::endl;
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
I know the error might be really stupid but I still have some problems with how OpenGL works.
I put what I thought was necessary to understand the problem (without extending too much), but if you need more code, I can write it too.
I know this is not a question in which you can say Yes or No and it's more like debugging my program, but since I really don't find from where the problem comes from, I'm looking toward someone who can explain to me what I did wrong. I do not necessarily seek the solution itself, but clues that could help me understand where my error is ...
Using a framebuffer object FBO to store a "object identifier" is a cool method. But also want to see the objects, right? Then you must render also to the default frame buffer (let me call it "defFB", which is not a FBO).
Because you need to render to two different targets, you need one of these techniques:
Draw objects twice (e.g. with two glDrawArrays calls), one to the FBO and a second one to the defFB.
Draw to two FBO's images at once and later blit one of then (with colors) to the defFB.
For the first technique you may use a texture attached to a FBO (as you currently do). Or you can use a "Renderbuffer" and draw to it.
The second approach needs a second "out" in the fragment shader:
layout(location = 0) out vec3 color; //GL_COLOR_ATTACHMENT0
layout(location = 1) out vec3 objID; //GL_COLOR_ATTACHMENT1
and setting the two attachments with glDrawBuffers.
For the blit part, read this answer.
Note that both "out" have the same format, vec3 in this example.
A fail in your code is that you set a RGB texture format and also use this format at glReadPixels, but your "out" in the FS is vec4 instead of vec3.
More concerns are:
Check the completeness with glCheckFramebufferStatus
Using a "depth attachment" to the FBO may be needed, even it will not be used for reading.
Disabling the depth test will put all elements if the frame. Your point-picking will select the last drawn, not the nearest.
I found the problem.
There were 2 failures in my code :
The first one is that in OpenGL, there is an Y inversion between the image and the framebuffer. So in order to pick the good point, you have to flip Y using the size of the viewport : I did it like this :
GLint m_viewport[4];
glGetIntegerv(GL_VIEWPORT, m_viewport);
int YposTMP = m_viewport[3] - Ypos - 1;
The second one is the use of
glReadPixels(Xpos - 2, Ypos - 2, width, height, GL_RGB, GL_UNSIGNED_BYTE, &pixels[0]);, the 6th parameter must be GL_FLOAT since the datas i'm returning are float.
Thanks all!
Best regards,
R.S
I am trying to wrap my head around the various types of GLSL shaders in OpenGL.
At the moment I am struggling with a 2d layered-tile implementation. For some reason the int values that get passed into my shader are always 0 (or more likely, null).
I currently have a 2048x2048px 2d texture composed of 20x20 tiles. I am trying to texture one quad with it and change the index of the tile based upon the block of ints I pass into the vertex shader.
I am passing in a vec2 of floats for the position of the quad (really a TRIANGLE_STRIP). I am also attempting to pass in 6 ints that will represent the 6 layers of tiles.
My input:
// Build and compile our shader program
Shader ourShader("b_vertex.vertexShader", "b_fragment.fragmentShader");
const int floatsPerPosition = 2;
const int intsPerTriangle = 6;
const int numVertices = 4;
const int sizeOfPositions = sizeof(float) * numVertices * floatsPerPosition;
const int sizeOfColors = sizeof(int) * numVertices * intsPerTriangle;
const int numIndices = 4;
const int sizeOfIndices = sizeof(int) * numIndices;
float positions[numVertices][floatsPerPosition] =
{
{ -1, 1 },
{ -1, -1 },
{ 1, 1 },
{ 1, -1 },
};
// ints indicating Tile Index
int colors[numVertices][intsPerTriangle] =
{
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
{ 1, 2, 3, 4, 5, 6 },
};
// Indexes on CPU
int indices[numVertices] =
{
0, 1, 2, 3,
};
My setup:
GLuint vao, vbo1, vbo2, ebo; // Identifiers of OpenGL objects
glGenVertexArrays(1, &vao); // Create new VAO
// Binded VAO will store connections between VBOs and attributes
glBindVertexArray(vao);
glGenBuffers(1, &vbo1); // Create new VBO
glBindBuffer(GL_ARRAY_BUFFER, vbo1); // Bind vbo1 as current vertex buffer
// initialize vertex buffer, allocate memory, fill it with data
glBufferData(GL_ARRAY_BUFFER, sizeOfPositions, positions, GL_STATIC_DRAW);
// indicate that current VBO should be used with vertex attribute with index 0
glEnableVertexAttribArray(0);
// indicate how vertex attribute 0 should interpret data in connected VBO
glVertexAttribPointer(0, floatsPerPosition, GL_FLOAT, GL_FALSE, 0, 0);
glGenBuffers(1, &vbo2); // Create new VBO
glBindBuffer(GL_ARRAY_BUFFER, vbo2); // Bind vbo2 as current vertex buffer
// initialize vertex buffer, allocate memory, fill it with data
glBufferData(GL_ARRAY_BUFFER, sizeOfColors, colors, GL_STATIC_DRAW);
// indicate that current VBO should be used with vertex attribute with index 1
glEnableVertexAttribArray(1);
// indicate how vertex attribute 1 should interpret data in connected VBO
glVertexAttribPointer(1, intsPerTriangle, GL_INT, GL_FALSE, 0, 0);
// Create new buffer that will be used to store indices
glGenBuffers(1, &ebo);
// Bind index buffer to corresponding target
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
// ititialize index buffer, allocate memory, fill it with data
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeOfIndices, indices, GL_STATIC_DRAW);
// reset bindings for VAO, VBO and EBO
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// Load and create a texture
GLuint texture1 = loadBMP_custom("uvtemplate3.bmp");
GLuint texture2 = loadBMP_custom("texture1.bmp");
My draw:
// Game loop
while (!glfwWindowShouldClose(window))
{
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
// Render
// Clear the colorbuffer
glClearColor(1.f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
ourShader.Use();
// Bind Textures using texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
//add some cool params
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 0.45f, 0.25f, 0.25f, 0.25f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture1"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture2"), 1);
// Draw container
//glBindVertexArray(VAO);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(vao);
//glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glDrawElements(GL_TRIANGLE_STRIP, numIndices, GL_UNSIGNED_INT, NULL);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
My shader most definitely works, as I can adjust the output by hard-coding the
values from within the vertexShader. My suspicion is I am not passing the values correctly/ in the correct format or not indicating somewhere that the int[6] needs to be included per vertex.
I cannot read anything from my layout (location = 1) in int Base[6]; I've tried just about everything I can think of. Declaring each int individually, trying to read two ivec3's, uint and what ever else I could think of but everything comes back with 0.
The following are my vertex and fragment shader for completeness:
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in int Base[6];
out vec2 TexCoord;
out vec2 TexCoord2;
out vec2 TexCoord3;
out vec2 TexCoord4;
out vec2 TexCoord5;
out vec2 TexCoord6;
// 0.5f, 0.5f,// 0.0f, 118.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Top Right
// 0.5f, -0.5f,// 0.0f, 118.0f, 1.0f, 0.0f, 0.0f,0.009765625f, // Bottom Right
// -0.5f, -0.5f,// 0.0f, 118.0f, 0.0f, 1.0f, 0.009765625f, 0.009765625f, // Bottom Left
// -0.5f, 0.5f//, 0.0f, 118.0f, 1.0f, 0.0f, 0.009765625f, 0.0f // Top Left
void main()
{
int curBase = Base[5];
int curVertex = gl_VertexID % 4;
vec2 texCoord = (curVertex == 0?
vec2(0.0,0.0):(
curVertex == 1?
vec2(0.0,0.009765625):(
curVertex == 2?
vec2(0.009765625,0.0):(
curVertex == 3?
vec2(0.009765625,0.009765625):(
vec2(0.0,0.0)))))
);
gl_Position = vec4(position, 0.0f, 1.0f);
TexCoord = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
//curBase = Base+1;
TexCoord2 = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
//curBase = Base+2;
TexCoord3 = vec2(texCoord.x + ((int(curBase)%102)*0.009765625f)
, (1.0 - texCoord.y) - ((int(curBase)/102)*0.009765625f));
}
Fragment:
#version 330 core
//in vec3 ourColor;
in vec2 TexCoord;
in vec2 TexCoord2;
in vec2 TexCoord3;
in vec2 TexCoord4;
in vec2 TexCoord5;
in vec2 TexCoord6;
out vec4 color;
// Texture samplers
uniform sampler2D ourTexture1;
uniform sampler2D ourTexture2;
void main()
{
color = (texture(ourTexture2, TexCoord )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord2 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord3 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord4 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord5 )== vec4(1.0,0.0,1.0,1.0)?
(texture(ourTexture2, TexCoord6 )== vec4(1.0,0.0,1.0,1.0)?
vec4(0.0f,0.0f,0.0f,0.0f)
:texture(ourTexture2, TexCoord6 ))
:texture(ourTexture2, TexCoord5 ))
:texture(ourTexture2, TexCoord4 ))
:texture(ourTexture2, TexCoord3 ))
:texture(ourTexture2, TexCoord2 ))
:texture(ourTexture2, TexCoord ));
}
This is wrong in two different ways:
glVertexAttribPointer(1, intsPerTriangle, GL_INT, GL_FALSE, 0, 0);
Vertex attributes in the GL can be scalars or vectors of 2 to 4 components. Hence, the size parameter of glVertexAttribPointer can take the values of 1, 2, 3 or 4. Using a different value (intsPerTriangle == 6) means that the call will just generate an GL_INVALID_VALUE error and has no ther effect, so you don't even set a pointer.
If you you want to pass 6 values per vertex, you can either use 6 different scalr attributes (consuming 6 attribute slots), or pack this into some vectors, like 2 3d vectors (consuming only 2 slots). No matter what packing you chose, you'll need a proper attrib pointer setup for each attribute slot in use.
However, glVertexAttribPointer is also the wrong function for your use case. It is defining floating-point attributes, which musthave matching declarations as float/vec* in the shader. The fact that you can input GL_INT just means that the GPU can do the conversion to floating-point on the fly for you.
If you want to use an int or ivec (or their unsigned counterparts) attribute, you have to use glVertexAttribIPointer (note the I in that function name) when setting up the attribute.
how can I get the z-Coordinate of an Object in 3D-space when I click on it.
(Its not really an Object more an graph, I need to know what an user selected) I use JOGL.
I just finished to port a picking sample from g-truck ogl-samples.
I will try to give you a quick explanation about the code.
We start by enabling the depth test
private boolean initTest(GL4 gl4) {
gl4.glEnable(GL_DEPTH_TEST);
return true;
}
In the initBuffer we:
generate all the buffer we need with glGenBuffers
bind the element buffer and we transfer the content of our indices. Each index refers to the vertex to use. We need to bind it first because glBufferData will be using whatever is bounded at the target specify by the first argument, GL_ELEMENT_ARRAY_BUFFER in this case
do the same for the vertices themselves.
get the GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT(it's a global parameter) to determine the minimum uniform block size to store our transform variable. This is necessary if we want to bind it via glBindBufferRange, function that we will not use, instead, for binding our picking buffer, this is why we pass just the size of a float, Float.BYTES
the last argument of glBufferData is just an hint (it's up to OpenGL and the driver do what they want), as you see is static for the indices and vertices, because we are not gonna change them anymore, but is dynamic for the uniform buffers, since we will update them every frame.
Code:
private boolean initBuffer(GL4 gl4) {
gl4.glGenBuffers(Buffer.MAX.ordinal(), bufferName, 0);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName[Buffer.ELEMENT.ordinal()]);
ShortBuffer elementBuffer = GLBuffers.newDirectShortBuffer(elementData);
gl4.glBufferData(GL_ELEMENT_ARRAY_BUFFER, elementSize, elementBuffer, GL_STATIC_DRAW);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.VERTEX.ordinal()]);
FloatBuffer vertexBuffer = GLBuffers.newDirectFloatBuffer(vertexData);
gl4.glBufferData(GL_ARRAY_BUFFER, vertexSize, vertexBuffer, GL_STATIC_DRAW);
gl4.glBindBuffer(GL_ARRAY_BUFFER, 0);
int[] uniformBufferOffset = {0};
gl4.glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, uniformBufferOffset, 0);
int uniformBlockSize = Math.max(projection.length * Float.BYTES, uniformBufferOffset[0]);
gl4.glBindBuffer(GL_UNIFORM_BUFFER, bufferName[Buffer.TRANSFORM.ordinal()]);
gl4.glBufferData(GL_UNIFORM_BUFFER, uniformBlockSize, null, GL_DYNAMIC_DRAW);
gl4.glBindBuffer(GL_UNIFORM_BUFFER, 0);
gl4.glBindBuffer(GL_TEXTURE_BUFFER, bufferName[Buffer.PICKING.ordinal()]);
gl4.glBufferData(GL_TEXTURE_BUFFER, Float.BYTES, null, GL_DYNAMIC_READ);
gl4.glBindBuffer(GL_TEXTURE_BUFFER, 0);
return true;
}
In the initTexture we initialize our textures, we:
generate both the textures with glGenTextures
set the GL_UNPACK_ALIGNMENT to 1 (default is usually 4 bytes), in order to avoid any problem at all, (because your horizontal texture size must match the alignment).
set the activeTexture to GL_TEXTURE0, there is a specific number of texture slots and you need to specify it before working on any texture.
bind the diffuse texture
set the swizzle, that is what each channel will receive
set the levels (mipmap), where 0 is the base (original/biggest)
set the filters
allocate the space, levels included with glTexStorage2D
transfer for each level the corresponding data
reset back the GL_UNPACK_ALIGNMENT
bind to GL_TEXTURE0 our other texture PICKING
allocate a single 32b float storage and associate the PICKING texture to the PICKING buffer with glTexBuffer
Code:
private boolean initTexture(GL4 gl4) {
try {
jgli.Texture2D texture = new Texture2D(jgli.Load.load(TEXTURE_ROOT + "/" + TEXTURE_DIFFUSE));
jgli.Gl.Format format = jgli.Gl.instance.translate(texture.format());
gl4.glGenTextures(Texture.MAX.ordinal(), textureName, 0);
// Diffuse
{
gl4.glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
gl4.glActiveTexture(GL_TEXTURE0);
gl4.glBindTexture(GL_TEXTURE_2D, textureName[Texture.DIFFUSE.ordinal()]);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_RED);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_G, GL_GREEN);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_BLUE);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_A, GL_ALPHA);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, texture.levels() - 1);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl4.glTexStorage2D(GL_TEXTURE_2D, texture.levels(), format.internal.value,
texture.dimensions(0)[0], texture.dimensions(0)[1]);
for (int level = 0; level < texture.levels(); ++level) {
gl4.glTexSubImage2D(GL_TEXTURE_2D, level,
0, 0,
texture.dimensions(level)[0], texture.dimensions(level)[1],
format.external.value, format.type.value,
texture.data(0, 0, level));
}
gl4.glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
}
// Piking
{
gl4.glBindTexture(GL_TEXTURE_BUFFER, textureName[Texture.PICKING.ordinal()]);
gl4.glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, bufferName[Buffer.PICKING.ordinal()]);
gl4.glBindTexture(GL_TEXTURE_BUFFER, 0);
}
} catch (IOException ex) {
Logger.getLogger(Gl_420_picking.class.getName()).log(Level.SEVERE, null, ex);
}
return true;
}
In the initProgram we initialize our program, by:
generating a pipeline (composition of different shaders), glGenProgramPipelines
creating a vertex shader code vertShaderCode, where GL_VERTEX_SHADER is the shader type, SHADERS_ROOT is the place where the shader source is located, SHADERS_SOURCE_UPDATE is the name and "vert" is the extension.
initializing it, similarly for the fragment shader
grabbing the generated index and saving in programName
setting the program separable, nothing useful here, just pure sport, glProgramParameteri
adding both shader to our shaderProgram and linking and compiling it, link
specifing which program stage our pipelineName has, glUseProgramStages
Code:
private boolean initProgram(GL4 gl4) {
boolean validated = true;
gl4.glGenProgramPipelines(1, pipelineName, 0);
// Create program
if (validated) {
ShaderProgram shaderProgram = new ShaderProgram();
ShaderCode vertShaderCode = ShaderCode.create(gl4, GL_VERTEX_SHADER,
this.getClass(), SHADERS_ROOT, null, SHADERS_SOURCE_UPDATE, "vert", null, true);
ShaderCode fragShaderCode = ShaderCode.create(gl4, GL_FRAGMENT_SHADER,
this.getClass(), SHADERS_ROOT, null, SHADERS_SOURCE_UPDATE, "frag", null, true);
shaderProgram.init(gl4);
programName = shaderProgram.program();
gl4.glProgramParameteri(programName, GL_PROGRAM_SEPARABLE, GL_TRUE);
shaderProgram.add(vertShaderCode);
shaderProgram.add(fragShaderCode);
shaderProgram.link(gl4, System.out);
}
if (validated) {
gl4.glUseProgramStages(pipelineName[0], GL_VERTEX_SHADER_BIT | GL_FRAGMENT_SHADER_BIT, programName);
}
return validated & checkError(gl4, "initProgram");
}
In the initVertexArray we:
generate a single vertex array, glGenVertexArrays, and bind it, glBindVertexArray
bind the vertices buffer and set the attribute for the position and the color, here interleaved. The position is identified by the attribute index Semantic.Attr.POSITION (this will match the one in the vertex shader), component size 2, type GL_FLOAT, normalized false, stride or the total size of each vertex attribute 2 * 2 * Float.BYTES and the offset in this attribute 0. Similarly for the color.
unbind the vertices buffer since it is not part of the vertex array state. It must be bound only for the glVertexAttribPointer so that OpenGL can know which buffer those parameters refers to.
enable the corresponding vertex attribute array, glEnableVertexAttribArray
bind the element (indices) array, part of the vertex array
Code:
private boolean initVertexArray(GL4 gl4) {
gl4.glGenVertexArrays(1, vertexArrayName, 0);
gl4.glBindVertexArray(vertexArrayName[0]);
{
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.VERTEX.ordinal()]);
gl4.glVertexAttribPointer(Semantic.Attr.POSITION, 2, GL_FLOAT, false, 2 * 2 * Float.BYTES, 0);
gl4.glVertexAttribPointer(Semantic.Attr.TEXCOORD, 2, GL_FLOAT, false, 2 * 2 * Float.BYTES, 2 * Float.BYTES);
gl4.glBindBuffer(GL_ARRAY_BUFFER, 0);
gl4.glEnableVertexAttribArray(Semantic.Attr.POSITION);
gl4.glEnableVertexAttribArray(Semantic.Attr.TEXCOORD);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName[Buffer.ELEMENT.ordinal()]);
}
gl4.glBindVertexArray(0);
return true;
}
In the render we:
bind the TRANSFORM buffer that will contain our transformation matrix.
get a byteBuffer pointer out of that.
calculate the projection, view and model matrices and multiplying them in the same order p * v * m, called also mvp matrix.
save our mvp matrix in our pointer and rewind the buffer (position set to 0 again).
unmap it to make sure it gets uploaded to the gpu
set the viewport to match our window size
set the clear depthValue to 1 (superflous, since it is the default value), clear depth, with the depthValue, and color buffer, with the color {1.0f, 0.5f, 0.0f, 1.0f}
bind the pipeline
set active texture 0
bind the diffuse texture and the picking image texture
bind the vertex array
bind the transform uniform buffer
render, glDrawElementsInstancedBaseVertexBaseInstance is overused it, but what is important is the primitive type GL_TRIANGLES, the number of indices elementCount and their type GL_UNSIGNED_SHORT
bind the picking texture buffer and retrieve its value
Code:
#Override
protected boolean render(GL gl) {
GL4 gl4 = (GL4) gl;
{
gl4.glBindBuffer(GL_UNIFORM_BUFFER, bufferName[Buffer.TRANSFORM.ordinal()]);
ByteBuffer pointer = gl4.glMapBufferRange(
GL_UNIFORM_BUFFER, 0, projection.length * Float.BYTES,
GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
FloatUtil.makePerspective(projection, 0, true, (float) Math.PI * 0.25f,
(float) windowSize.x / windowSize.y, 0.1f, 100.0f);
FloatUtil.makeIdentity(model);
FloatUtil.multMatrix(projection, view());
FloatUtil.multMatrix(projection, model);
for (float f : projection) {
pointer.putFloat(f);
}
pointer.rewind();
// Make sure the uniform buffer is uploaded
gl4.glUnmapBuffer(GL_UNIFORM_BUFFER);
}
gl4.glViewportIndexedf(0, 0, 0, windowSize.x, windowSize.y);
float[] depthValue = {1.0f};
gl4.glClearBufferfv(GL_DEPTH, 0, depthValue, 0);
gl4.glClearBufferfv(GL_COLOR, 0, new float[]{1.0f, 0.5f, 0.0f, 1.0f}, 0);
gl4.glBindProgramPipeline(pipelineName[0]);
gl4.glActiveTexture(GL_TEXTURE0);
gl4.glBindTexture(GL_TEXTURE_2D, textureName[Texture.DIFFUSE.ordinal()]);
gl4.glBindImageTexture(Semantic.Image.PICKING, textureName[Texture.PICKING.ordinal()],
0, false, 0, GL_WRITE_ONLY, GL_R32F);
gl4.glBindVertexArray(vertexArrayName[0]);
gl4.glBindBufferBase(GL_UNIFORM_BUFFER, Semantic.Uniform.TRANSFORM0, bufferName[Buffer.TRANSFORM.ordinal()]);
gl4.glDrawElementsInstancedBaseVertexBaseInstance(GL_TRIANGLES, elementCount, GL_UNSIGNED_SHORT, 0, 5, 0, 0);
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.PICKING.ordinal()]);
ByteBuffer pointer = gl4.glMapBufferRange(GL_ARRAY_BUFFER, 0, Float.BYTES, GL_MAP_READ_BIT);
float depth = pointer.getFloat();
gl4.glUnmapBuffer(GL_ARRAY_BUFFER);
System.out.printf("Depth: %2.3f\n", depth);
return true;
}
In our vertex shader, executed for each vertex, we:
define the glsl version and profile
define all the attribute indices, that must coincide with our coming from the Semantic we used previously
set some memory layout parameters, such as std140 and column_mayor (useless, default value for matrices)
declare the Transform uniform buffer
declare a vec3 position and vec2 texCoord inputs
declare a (built in, incomplete and useless) gl_PerVertex output
declare a Block block output
save inside our block the incoming texCoord and inside gl_Position our vertex in clip space position. The incoming position vertex is in Model space -> * model matrix = vertex in World space, * view/camera matrix = vertex in Camera/View space, * projection matrix = vertex in Clip space.
Code:
#version 420 core
#define POSITION 0
#define COLOR 3
#define TEXCOORD 4
#define TRANSFORM0 1
precision highp float;
precision highp int;
layout(std140, column_major) uniform;
layout(binding = TRANSFORM0) uniform Transform
{
mat4 mvp;
} transform;
layout(location = POSITION) in vec3 position;
layout(location = TEXCOORD) in vec2 texCoord;
out gl_PerVertex
{
vec4 gl_Position;
};
out Block
{
vec2 texCoord;
} outBlock;
void main()
{
outBlock.texCoord = texCoord;
gl_Position = transform.mvp * vec4(position, 1.0);
}
There may be are other stages after the vertex shader, such as tessellation control/evaluation and geometry, but they are not mandatory.
The last stage is the fragment shader, executed once per fragment/pixel, that starts similarly, then we:
declare the texture diffuse on binding 0, that matches with our glActiveTexture(GL_TEXTURE0) inside the render and the imageBuffer picking where we will save our depth identified by binding 1, that matches our Semantic.Image.PICKING inside our render.glBindImageTexture
declare the picking coordinates, here hardcoded, but nothing stops you from turning them out as uniform variable and set it on runtime
declare the incoming Block block holding the texture coordinates
declare the default output color
if the current fragment coordinates gl_FragCoord (built in function) corresponds to the picking coordinates pickingCoord, save the current z value gl_FragCoord.z inside the imageBuffer depth and set the output color to vec4(1, 0, 1, 1), otherwise we set it equal to the diffuse texture by texture(diffuse, inBlock.texCoord.st). st is part of the stqp selection, synonymous of xywz or rgba.
Code:
#version 420 core
#define FRAG_COLOR 0
precision highp float;
precision highp int;
layout(std140, column_major) uniform;
in vec4 gl_FragCoord;
layout(binding = 0) uniform sampler2D diffuse;
layout(binding = 1, r32f) writeonly uniform imageBuffer depth;
uvec2 pickingCoord = uvec2(320, 240);
in Block
{
vec2 texCoord;
} inBlock;
layout(location = FRAG_COLOR, index = 0) out vec4 color;
void main()
{
if(all(equal(pickingCoord, uvec2(gl_FragCoord.xy))))
{
imageStore(depth, 0, vec4(gl_FragCoord.z, 0, 0, 0));
color = vec4(1, 0, 1, 1);
}
else
color = texture(diffuse, inBlock.texCoord.st);
}
Finally in the end we clean up all our OpenGL resources:
#Override
protected boolean end(GL gl) {
GL4 gl4 = (GL4) gl;
gl4.glDeleteProgramPipelines(1, pipelineName, 0);
gl4.glDeleteProgram(programName);
gl4.glDeleteBuffers(Buffer.MAX.ordinal(), bufferName, 0);
gl4.glDeleteTextures(Texture.MAX.ordinal(), textureName, 0);
gl4.glDeleteVertexArrays(1, vertexArrayName, 0);
return true;
}
I'm trying to do a compute pass where I render to a texture that will be used in a draw pass later on. My initial implementation was based on shader storage buffer objects and was working nicely. But I want to apply a computation method that is going to take advantage of the blend hardware of the GPU so I started porting the SSBO implementation to RTT one. Unfortunately the code has stopped working. Now when I read back the texture it is getting wrong values.
Here is my texture and frame buffer setup code:
glGenFramebuffers(1, &m_fbo);
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// Create render textures
glGenTextures(NUM_TEX_OUTPUTS, m_renderTexs);
m_texSize = square_approximation(m_numVertices);
cout << "Textures size: " << glm::to_string(m_texSize) << endl;
GLenum drawBuffers[NUM_TEX_OUTPUTS];
for (int i = 0 ; i < NUM_TEX_OUTPUTS; ++i)
{
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
// 1st 0: level, 2nd 0: no border, 3rd 0: no initial data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
// XXX: do we need this?
// Poor filtering. Needed !
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
// 0: level
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, m_renderTexs[i], 0);
drawBuffers[i] = GL_COLOR_ATTACHMENT0 + i;
}
glDrawBuffers(NUM_TEX_OUTPUTS, drawBuffers);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
cout << "Error when setting frame buffer" << endl;
// throw exception?
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And this is the code to start the compute pass:
m_shaderProgram.use();
// setup openGL
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glViewport(0, 0, m_texSize.x, m_texSize.y); // setup viewport (equal to textures size)
// make a single patch have the vertex, the bases and the neighbours
glPatchParameteri(GL_PATCH_VERTICES, m_maxNeighbours + 5);
// Wait all writes to shader storage to finish
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
glUniform1i(m_shaderProgram.getUniformLocation("curvTex"), m_renderTexs[2]);
glUniform2i(m_shaderProgram.getUniformLocation("size"), m_texSize.x, m_texSize.y);
glUniform2f(m_shaderProgram.getUniformLocation("vertexStep"), (umax - umin)/divisoes,
(vmax-vmin)/divisoes);
// Bind buffers
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBindBufferBase(GL_UNIFORM_BUFFER, m_mvp_location, m_mvp_ubo);
// Make textures active
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
}
// no need to pass index array 'cause ibo is bound already
glDrawElements(GL_PATCHES, m_numElements, GL_UNSIGNED_INT, 0);
I then read back the textures using the following:
bool readTex(GLuint tex, void *dest)
{
glBindTexture(GL_TEXTURE_2D, tex);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, dest);
glBindTexture(GL_TEXTURE_2D, 0);
// TODO: check glGetTexImage return values for error
return true;
}
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
if (m_tensors[i] == NULL) {
m_tensors[i] = new glm::vec4[m_texSize.x*m_texSize.y];
}
memset(m_tensors[i], 0, m_texSize.x*m_texSize.y*sizeof(glm::vec4));
readTex(m_renderTexs[i], m_tensors[i]);
}
Finally, the fragment shader code is:
#version 430
#extension GL_ARB_shader_storage_buffer_object: require
layout(pixel_center_integer) in vec4 gl_FragCoord;
layout(std140, binding=6) buffer EvalBuffer {
vec4 evalDebug[];
};
uniform ivec2 size;
in TEData {
vec4 _a;
vec4 _b;
vec4 _c;
vec4 _d;
vec4 _e;
};
layout(location = 0) out vec4 a;
layout(location = 1) out vec4 b;
layout(location = 2) out vec4 c;
layout(location = 3) out vec4 d;
layout(location = 4) out vec4 e;
void main()
{
a= _a;
b= _b;
c= _c;
d= _d;
e= _e;
evalDebug[gl_PrimitiveID] = gl_FragCoord;
}
The fragment coordinates are correct (each fragment is pointing to a x,y coordinate in the texture), so are all the input values (_a to _e), but I do not see them outputted correctly to the textures when reading back. I also tried accessing the texture in the shader to see if it was only a read-back error, but my debug SSBO returned all zeroes.
Am I missing some setup step?
I've tested both on linux and windows (titan and 540M geforces) and I'm using openGL 4.3.
As derhass pointed out in the comments above, the problem was with the texture format. I assumed that by passing GL_FLOAT as the data type it would use 32bit floats for each of the RGBA channels. It was not so.
As derhass said, the data type parameter here does not change the texture format. I had to change the internalFormat parameter to what I wanted (GL_RGBA32F) so that it would work as expected.
So, after changing glTexImage2D call to:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
I was able to correctly render the results to the texture and read it back. :)
When using cubemaps I'm getting inconsistent results in my shaders as opposed to my program.
For testing purposes I wrote a test-program that simply creates a depth cubemap texture and writes '1' to all sides of it:
unsigned int frameBuffer;
glGenFramebuffers(1,&frameBuffer);
unsigned int texture;
glGenTextures(1,&texture);
glBindFramebuffer(GL_FRAMEBUFFER,frameBuffer);
glBindTexture(GL_TEXTURE_CUBE_MAP,texture);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_DEPTH_TEXTURE_MODE,GL_LUMINANCE);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_COMPARE_FUNC,GL_LEQUAL);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_COMPARE_MODE,GL_COMPARE_R_TO_TEXTURE);
unsigned int width = 512;
unsigned int height = 512;
for(unsigned int i=0;i<6;i++)
{
glTexImage2D(
GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,
0,
GL_DEPTH_COMPONENT16,
width,height,
0,GL_DEPTH_COMPONENT,
GL_FLOAT,
0
);
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_DEPTH_ATTACHMENT,GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,texture,0);
}
unsigned int status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
return;
float *data = new float[width *height];
for(unsigned long long i=0;i<(width *height);i++)
data[i] = 1.f;
for(unsigned int i=0;i<6;i++)
{
glTexSubImage2D(
GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,
0,
0,
0,
width,height,
GL_DEPTH_COMPONENT,
GL_FLOAT,
&data[0]
);
}
delete[] data;
// Check to see if data has been written correctly
data = new float[width *height];
for(unsigned int i=0;i<6;i++)
{
glFramebufferTexture2D(GL_READ_FRAMEBUFFER,GL_DEPTH_ATTACHMENT,GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,texture,0);
glReadPixels(0,0,width,height,GL_DEPTH_COMPONENT,GL_FLOAT,&data[0]);
for(unsigned long long j=0;j<(width *height);j++)
{
if(data[j] != 1.f)
return;
}
}
delete[] data;
// Check end
Rendering:
float screenVerts[18] = {
-1.f,-1.f,0.f,
1.f,-1.f,0.f,
-1.f,1.f,0.f,
-1.f,1.f,0.f,
1.f,-1.f,0.f,
1.f,1.f,0.f
};
unsigned int vertexBuffer;
glGenBuffers(1,&vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER,vertexBuffer);
glBufferData(GL_ARRAY_BUFFER,sizeof(float) *18,&screenVerts[0],GL_STATIC_DRAW);
glUseProgram(shader);
glBindTexture(GL_TEXTURE_CUBE_MAP,texture);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER,vertexBuffer);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,0,(void*)0);
glDrawArrays(GL_TRIANGLES,0,6);
glDisableVertexAttribArray(0);
Vertex Shader:
#version 330 core
layout(location = 0) in vec3 vertPos;
out vec2 UV;
void main()
{
gl_Position = vec4(vertPos,1);
UV = (vertPos.xy +vec2(1,1)) /2.0;
}
Fragment Shader:
#version 330 core
in vec2 UV;
out vec3 color;
uniform samplerCubeShadow testShadow;
void main()
{
color.r = texture(testShadow,vec4(0,0,1,1));
// Just grab the value from a random direction and put it out as red color
}
(I've ported the C++ code from another language, so if you find some syntax errors in there, don't mind those, they're not in the actual code)
glGetError() does not return any errors.
ReadPixels proves that the writing process worked, however the result is a black screen. That means that the texture-call inside the shader returns 0, which should be impossible regardless of what I use as the direction vector.
What am I missing?
You consistently have the arguments for the glBind*() calls reversed. They all take a target as the first argument, and the object id (aka name) as the second argument. Instead of this:
glBindFramebuffer(frameBuffer,GL_FRAMEBUFFER);
glBindTexture(texture,GL_TEXTURE_CUBE_MAP);
glBindBuffer(vertexBuffer,GL_ARRAY_BUFFER);
It should be this:
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
glBindTexture(GL_TEXTURE_CUBE_MAP, texture);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
There are multiple instances of some of these in the code, so make sure that you catch them all.
Other than that, if this is the complete code, you're not rendering to the screen. You have an FBO without color attachment bound, so the output goes nowhere. If you want to render to the screen, you'll need to unbind the FBO:
glBindFramebuffer(GL_FRAMEBUFFER, 0);
You're also leaving the g and b components of the fragment output undefined, since you only write to r.