Use sampler2D and samplerCube in one shader in WebGL - glsl

I am struggeling with when and how to use activeTexture, bindtexture and gl.uniform1i('TextureLocation', 'texture'), Int) functions.
However, all of my stuff worked so far but it seems my rudimentary understanding starts to become a problem now.
I am trying to use a cubemap and a texture map in one shader. To test I am currently drawing two objects with this shader and try to color each with a different texture map. If I only use texturemap or cubemap the program runs nicely, so I assume both are loaded correctly. Now here is my boiled down fragment shader:
precision mediump float;
uniform int Switch; //switch between sphere and background
// normal
varying vec3 vNormal;
// texture
uniform mat4 TexMatrix;
uniform samplerCube texMap;
void main() {
vec4 FragColor;
//draw square
if( Switch == 0 ) {
vec3 fTexCoord = vec4(vNormal,1.0)).xyz;
FragColor = textureCube(texMap, fTexCoord);
}
//draw sphere
else if( Switch == 1 ) {
// spherical coord -> kartesian coord
float PI = 3.141592653;
vec2 bTexCoord = vec2(2.0*atan((length(vec2(vNormal.x,vNormal.y)) - vNormal.x)/vNormal.y)/(2.0*PI),acos(vNormal.z)/PI);
FragColor = texture2D(bumpMap, bTexCoord);
}
gl_FragColor = FragColor;
}
this is where is where I initialize the two textures, the images are correctly loaded.
function initBumpMap(img){
gl.Texture = gl.createTexture();
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, gl.Texture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGB, gl.RGB, gl.UNSIGNED_BYTE, img);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
//gl.generateMipmap(gl.TEXTURE_2D);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT); // set the texture to repreat for values of (s,t) outside of [0,1]
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT);
gl.uniform1i(gl.getUniformLocation(program, "bumpMap"), 0); //link texture to sampler
}
function initCubeMap(images){
gl.CubeMap = gl.createTexture();
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_CUBE_MAP, gl.CubeMap);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
//put images on cubemap
gl.texImage2D(gl.TEXTURE_CUBE_MAP_POSITIVE_Z ,0,gl.RGB,gl.RGB,gl.UNSIGNED_BYTE, images[0]);
gl.texImage2D(gl.TEXTURE_CUBE_MAP_NEGATIVE_Y ,0,gl.RGB,gl.RGB,gl.UNSIGNED_BYTE, images[1]);
gl.texImage2D(gl.TEXTURE_CUBE_MAP_NEGATIVE_Z ,0,gl.RGB,gl.RGB,gl.UNSIGNED_BYTE, images[2]);
gl.texImage2D(gl.TEXTURE_CUBE_MAP_POSITIVE_X ,0,gl.RGB,gl.RGB,gl.UNSIGNED_BYTE, images[3]);
gl.texImage2D(gl.TEXTURE_CUBE_MAP_NEGATIVE_X ,0,gl.RGB,gl.RGB,gl.UNSIGNED_BYTE, images[4]);
gl.texImage2D(gl.TEXTURE_CUBE_MAP_POSITIVE_Y ,0,gl.RGB,gl.RGB,gl.UNSIGNED_BYTE, images[5]);
gl.texParameteri(gl.TEXTURE_CUBE_MAP, gl.TEXTURE_MIN_FILTER,gl.NEAREST);
gl.uniform1i(gl.getUniformLocation(program, "texMap"), 1); //link texture to sampler
}
and here is the main JavaScript/WebGL snippet
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, gl.Texture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_CUBE_MAP, gl.CubeMap);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//draw square
...bind point and enable stuff for the square...
gl.uniform1i(gl.getUniformLocation(program,'Switch'),1); //switch to use bumpmap
gl.bindTexture(gl.TEXTURE_2D, gl.Texture0);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
// draw sphere
...bind point and enable stuff for the sphere...
gl.uniform1i(gl.getUniformLocation(program,'Switch'),0); //switch to cube map
gl.bindTexture(gl.TEXTURE_CUBE_MAP, gl.CubeMap);
for( var i=0; i < vertices.length; i+=3){
gl.drawArrays(gl.TRIANGLES, i, 3);
}
If I only use one if statement in the shader everything works find, the moment I try to use both textures nothing happens! Whats going on?

Apperently I need to always set
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, gl.Texture);
gl.uniform1i(gl.getUniformLocation(program, "bumpMap"), 0);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_CUBE_MAP, gl.CubeMap);
gl.uniform1i(gl.getUniformLocation(program, "texMap"), 1);
before drawing, then it works! :D
EDIT: but why? what do I really need to set? why do I need to use all three commands?

Related

Rendering to texture with OpenGL C++

I am trying to render some polygons to a texture, and then render the texture to the screen.
I'm not sure how to debug my code since that would require to probe the internal state of OpenGL, so I would appreciate tips on how to debug myself more than pointing out the error I have done.
Anyway, I commented the code I wrote explaining what I expect each line to do.
Here is a description of what the code is supposed to do.
Basically, I made a vertex shader that provides the position, UV and color to the fragment shader. The fragment shader has a uniform to activate texture sampling, otherwise it will just output the input color. In both cases, the color is multiplied by a uniform color. First I create a texture, and I fill it with red and green raw pixel data to test. This texture is correcly rendered to the screen (I see the red and green part correctly as I initialized it). Then i try to do the actual rendering on the texture. I try to render a small blue square in the middle of it (sampler disabled on the fragment shader, color uniform set to blue) but I can't get this blue square to appear on the rendered texture.
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "utils.h"
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <iostream>
using namespace std;
#define numVAOs 1
#define numVBOs 1
GLuint shaderProgram;
GLuint unifUseTexture, unifInTexture, unifTMat, unifDrawColor;
GLuint texture;
GLuint textureFrameBuffer;
GLuint vao[numVAOs];
GLuint vbo[numVBOs];
void drawRectangle() {
}
void init() {
// Compile the shaderProgram
shaderProgram = createShaderProgram("vertex.glsl","fragment.glsl");
// Retrieve the uniform location
unifUseTexture = glGetUniformLocation(shaderProgram,"useTexture");
unifInTexture = glGetUniformLocation(shaderProgram,"inTexture");
unifTMat = glGetUniformLocation(shaderProgram,"tMat");
unifDrawColor = glGetUniformLocation(shaderProgram,"drawColor");
// Create vertex array object and vertex buffer object
glGenVertexArrays(numVAOs,vao);
glBindVertexArray(vao[0]);
float xyzuvrgbaSquare[54] = {
/* C */ 1.0,-1.0,0.0, 1.0,0.0, 1.0,1.0,1.0,1.0,
/* A */ -1.0,1.0,0.0, 0.0,1.0, 1.0,1.0,1.0,1.0,
/* B */ 1.0,1.0,0.0, 1.0,1.0, 1.0,1.0,1.0,1.0,
/* A */ -1.0,1.0,0.0, 0.0,1.0, 1.0,1.0,1.0,1.0,
/* C */ 1.0,-1.0,0.0, 1.0,0.0, 1.0,1.0,1.0,1.0,
/* D */-1.0,-1.0,0.0, 0.0,0.0, 1.0,1.0,1.0,1.0
};
glGenBuffers(numVBOs,vbo);
glBindBuffer(GL_ARRAY_BUFFER,vbo[0]);
glBufferData(GL_ARRAY_BUFFER, 4*54,xyzuvrgbaSquare,GL_STATIC_DRAW);
// Associate vbo with the correct vertex attribute to display the rectangle
glBindBuffer(GL_ARRAY_BUFFER,vbo[0]);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,36,0); // inPosition
glVertexAttribPointer(1,4,GL_FLOAT,GL_FALSE,36,(void*)20); // inColor
glVertexAttribPointer(2,2,GL_FLOAT,GL_FALSE,36,(void*)12); // inUV
glEnableVertexAttribArray(0); // location=0 in the shader
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
// Generate a small 128x128 texture. I followed the tutorial
// over http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture/
// generate a frameBuffer to contain the texture
glGenFramebuffers(1,&textureFrameBuffer);
// Bind it, so when I will generate the texture it will be associated with it
glBindFramebuffer(GL_FRAMEBUFFER, textureFrameBuffer);
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
// Put some raw data inside of it for testing purposes. I will fill it
// half with green, half with red
unsigned char* imageRaw = new unsigned char[4*128*128];
for(int i=0; i<4*128*64; i+=4) {
imageRaw[i] = 255;
imageRaw[i+1] = 0;
imageRaw[i+2] = 0;
imageRaw[i+3] = 255;
imageRaw[4*128*64+i] = 0;
imageRaw[4*128*64+i+1] = 255;
imageRaw[4*128*64+i+2] = 0;
imageRaw[4*128*64+i+3] = 255;
}
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,128,128,0,GL_RGBA,GL_UNSIGNED_BYTE,imageRaw);
// Setup some required parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Draw a small blue square on the texture
// So, activate the previously compiled shader program and setup the uniforms
glUseProgram(shaderProgram);
// First, create a transform matrix to make the square smaller (20% of texture)
glm::mat4 tMat = glm::scale(glm::mat4(1.0f),glm::vec3(0.2,0.2,0));
glUniformMatrix4fv(unifTMat,1,GL_FALSE,glm::value_ptr(tMat));
// do not use a texture (ignore sampler2D in fragment shader)
glUniform1i(unifUseTexture,0);
// use the color BLUE for the rectangle
glUniform4f(unifDrawColor,0.0,0.0,1.0,1.0);
// Bind the textureFrameBuffer to render on the texture instead of the screen
glBindFramebuffer(GL_FRAMEBUFFER,textureFrameBuffer);
glFramebufferTexture(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,texture,0);
GLenum drawBuffers[1] = {GL_COLOR_ATTACHMENT0};
glDrawBuffers(1, drawBuffers);
GLenum status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
if( status != GL_FRAMEBUFFER_COMPLETE ) {
cout << "framebuffer status: " << status << endl;
}
// the vertex framebuffer and vertex attribute pointer have already been
// described, so I'll just do the draw call here
glDrawArrays(GL_TRIANGLES,0,6);
// Display the textore on screen
// Bind the screen framebuffer (0) so the following rendering will occurr on screen
glBindFramebuffer(GL_FRAMEBUFFER,0);
// Put a white background color
glClearColor(1.0,1.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
// Change properly the shader uniforms
glUniform4f(unifDrawColor,1.0,1.0,1.0,1.0); // multiply by white, no changes
glUniform1i(unifUseTexture,1); // set useTexture to True
// Create a transform matrix to scale the rectangle so that it uses up only half screen
tMat = glm::scale(glm::mat4(1.0f),glm::vec3(.5,.5,.0));
glUniformMatrix4fv(unifTMat,1,GL_FALSE,glm::value_ptr(tMat));
// Put the sampler2D
glActiveTexture(GL_TEXTURE0); // Work on texture0
// 0 because of (binding = 0) on the fragment shader
glBindTexture(GL_TEXTURE_2D,texture);
glDrawArrays(GL_TRIANGLES,0,6); // 6 vertices
}
int main(int argc, char** argv) {
// Build the window
if (!glfwInit()) exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR,4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR,3);
GLFWwindow* window = glfwCreateWindow(600,600,"Dashboard",NULL,NULL);
glfwMakeContextCurrent(window);
if(glewInit() != GLEW_OK) exit(EXIT_FAILURE);
glfwSwapInterval(1);
init();
while(!glfwWindowShouldClose(window)) {
//display(window,glfwGetTime());
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
edit: I forgot to put the shader code here, though the problem is not within the shader because it does work when used to render the texture to screen.
vertex.glsl:
#version 430
layout (location=0) in vec3 inPosition;
layout (location=1) in vec4 inColor;
layout (location=2) in vec2 inUV;
uniform mat4 tMat;
uniform vec4 drawColor;
out vec4 varyingColor;
out vec2 varyingUV;
void main(void) {
gl_Position = tMat * vec4(inPosition,1.0);
varyingColor = inColor*drawColor;
varyingUV = inUV;
}
fragment.glsl:
#version 430
in vec4 varyingColor;
in vec2 varyingUV;
layout(location = 0) out vec4 color;
layout (binding=0) uniform sampler2D inTexture;
uniform bool useTexture;
void main(void) {
if( useTexture )
color = vec4(texture(inTexture,varyingUV).rgb,1.0) * varyingColor;
else
color = varyingColor;
}
The texture which is attached to the framebuffer, has a different size than the window. Hence you've to adjust the viewport rectangle (glViewport) to the size of the size of the currently bound framebuffer, before drawing the geometry:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 128, 128, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageRaw);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, textureFrameBuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture,0);
glViewport(0, 0, 128, 128);
// [...]
glDrawArrays(GL_TRIANGLES, 0, 6);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, WIDTH, HEIGHT);
// [...]
glDrawArrays(GL_TRIANGLES, 0, 6);

OpenGL access DepthComponent Texture in GLSL 400

I'm trying to access a DepthComponent Texture in my GLSL Shader of version 400.
The program does a two pass rendering. In the first pass I render all the geometry and colors to a Framebuffer on which I have a ColorAttachment and DepthAttachment. The DepthAttachment is bound like this:
(Note: I'm using C# with OpenTK, which is strongly typed, in my code examples.)
GL.FramebufferTexture2D(FramebufferTarget.Framebuffer, FramebufferAttachment.DepthAttachment, TextureTarget.Texture2D, depthTexture.ID, 0);
The depth Texture has an internal pixel format of DepthComponent32f, pixel format of DepthComponent and Float as pixel type. All the other properties have default values.
The second pass renders the framebuffers color image onto the screen using the following shader:
#version 400
uniform sampler2D finalImage;
in vec2 texCoords;
out vec4 fragColor;
void main(){
fragColor = vec4(texture2D(finalImage, texCoords.xy).rgb, 1.0);
}
But now I want to read the depth Texture(DepthComponent) instead of the color Texture(RGBA).
I tried a lot of things like disabling TextureCompareMode, using shadow2DSampler with shadow2DProj(sampler, vec4(texCoords.xy, 0.0, 1.0)) or just textureProj(sampler, vec3(texCoords.xy, 0.0)). But it returns only 1 or 0, depends on which configuration I use.
To be sure that my depth Texture is ok, I've read the pixels back to a float array like this:
GL.ReadPixels(0, 0, depthTexture.Width, depthTexture.Height, PixelFormat.DepthComponent, PixelType.Float, float_array);
Everything seems to be correct, its showing me 1.0 for empty space and values between 0.99 and 1.0 for visible objects.
Edit
Here is a code example how my process looks like:
Init code
depthTexture= new GLEXTexture2D(width, height);
depthTexture.TextureCompareMode = TextureCompareMode.None;
depthTexture.CreateMutable(PixelInternalFormat.DepthComponent32f, PixelFormat.DepthComponent, PixelType.Float);
***CreateMutable Function***
ReserveTextureID();
GLEX.glBeginTexture2D(ID);
GL.TexImage2D(TextureTarget.Texture2D, 0, pInternalFormat, width, height, 0, pFormat, pType, IntPtr.Zero);
ApplyOptions();
MarkReserved(true);
GLEX.glEndTexture2D();
(Framebuffer attachment mentioned above)
Render pass 1
GL.BindFramebuffer(FramebufferTarget.Framebuffer, drawBuffer.ID);
GL.Viewport(0, 0, depthTexture.Width, depthTexture.Height);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit | ClearBufferMask.StencilBufferBit);
GL.Enable(EnableCap.DepthTest);
GL.ClearColor(Color.Gray);
GL.UseProgram(geometryPassShader.ID);
geometry_shaderUniformMVPM.SetValueMat4(false, geometryImageMVMatrix * geometryImageProjMatrix);
testRectangle.Render(PrimitiveType.QuadStrip);
GL.UseProgram(0);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
Render pass 2
GL.Viewport(0, 0, depthTexture.Width, depthTexture.Height);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit | ClearBufferMask.StencilBufferBit);
GL.ClearColor(Color.White);
GL.UseProgram(finalImageShader.ID);
GL.ActiveTexture(TextureUnit.Texture0);
depthTexture.Bind();
final_shaderUniformMVPM.SetValueMat4(false, finalImageMatrix);
screenQuad.Render(PrimitiveType.Quads);
GL.UseProgram(0);
GL.BindTexture(TextureTarget.Texture2D, 0);
A few hours later I found the solution.
The Problem was the MinFilter. Like the khronos group said on glTexParameter:
The initial value of GL_TEXTURE_MIN_FILTER is GL_NEAREST_MIPMAP_LINEAR.
I changed the MinFilter of my depth Texture to GL_NEAREST (where GL_LINEAR is also legal) and now the depth values in the GLSL shader are right (after linearization of course).
Additional Info:
There are some extensions for MagFilter like LINEAR_DETAIL_ALPHA_SGIS. I`ve tried some of these, the depth value correctness was not affected.

OpenGL Texture3D FragmentShader Write Into Layers

i want to filter a texture (no mipmapping) and write the filtered levels into a 3D-Texture. I already created a 3D-Texture with following code:
glGenTextures(1, &outTexture);
glBindTexture(GL_TEXTURE_3D, outTexture);
glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA8, width, height, layer, 0, GL_RGBA, GL_INT, 0);
glBindTexture(GL_TEXTURE_3D, 0);
and i render my models like so:
glFramebufferTexture3D(GL_FRAMEBUFFER, attachment, GL_TEXTURE_3D, outTexture, 0, layer);
entity.render();
is there a way for a fragment-shader to write into multiple layers at the same time and knowing in which layer you are? Like so?
out_color[layer] = vec4(r,g,b,a);
PS: i need to know in which layer in the fragment shader (some filtering things & please no gl_Layer ... can be only used at OpenGL 4 and i can only use OpenGL 3) ... I could write it into a uniform and render multiple times but if there is a way to go with a one pass ... it would be nice :)
An instance of a fragment shader cannot choose what layer it is writing to. This is chosen for it by the geometry shader that constructed the primitive that generates the fragment in question. So if you want to render to multiple layers, you will need to use layered rendering and GS's.
If your Fragment Shader needs to know what layer it is writing to, then have the Geometry Shader pass that as a per-vertex parameter:
//Geometry shader
flat out int layerIndex;
void main()
{
for(int currLayer = 0; currLayer < numLayers; ++currLayer)
{
for(int currVertex = 0; currVertex < gl_in.length(); ++currVertex)
{
layerIndex = currLayer;
gl_Layer = currLayer;
//Compute other per-vertex outputs
EmitVertex();
}
EndPrimitive();
}
EmitVertex();
}
//Fragment shader
flat in int layerIndex;
void main()
{
//Do stuff with layerIndex;
}

Is there a simple way to get the depth of an object in OpenGL (JOGL)

how can I get the z-Coordinate of an Object in 3D-space when I click on it.
(Its not really an Object more an graph, I need to know what an user selected) I use JOGL.
I just finished to port a picking sample from g-truck ogl-samples.
I will try to give you a quick explanation about the code.
We start by enabling the depth test
private boolean initTest(GL4 gl4) {
gl4.glEnable(GL_DEPTH_TEST);
return true;
}
In the initBuffer we:
generate all the buffer we need with glGenBuffers
bind the element buffer and we transfer the content of our indices. Each index refers to the vertex to use. We need to bind it first because glBufferData will be using whatever is bounded at the target specify by the first argument, GL_ELEMENT_ARRAY_BUFFER in this case
do the same for the vertices themselves.
get the GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT(it's a global parameter) to determine the minimum uniform block size to store our transform variable. This is necessary if we want to bind it via glBindBufferRange, function that we will not use, instead, for binding our picking buffer, this is why we pass just the size of a float, Float.BYTES
the last argument of glBufferData is just an hint (it's up to OpenGL and the driver do what they want), as you see is static for the indices and vertices, because we are not gonna change them anymore, but is dynamic for the uniform buffers, since we will update them every frame.
Code:
private boolean initBuffer(GL4 gl4) {
gl4.glGenBuffers(Buffer.MAX.ordinal(), bufferName, 0);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName[Buffer.ELEMENT.ordinal()]);
ShortBuffer elementBuffer = GLBuffers.newDirectShortBuffer(elementData);
gl4.glBufferData(GL_ELEMENT_ARRAY_BUFFER, elementSize, elementBuffer, GL_STATIC_DRAW);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.VERTEX.ordinal()]);
FloatBuffer vertexBuffer = GLBuffers.newDirectFloatBuffer(vertexData);
gl4.glBufferData(GL_ARRAY_BUFFER, vertexSize, vertexBuffer, GL_STATIC_DRAW);
gl4.glBindBuffer(GL_ARRAY_BUFFER, 0);
int[] uniformBufferOffset = {0};
gl4.glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, uniformBufferOffset, 0);
int uniformBlockSize = Math.max(projection.length * Float.BYTES, uniformBufferOffset[0]);
gl4.glBindBuffer(GL_UNIFORM_BUFFER, bufferName[Buffer.TRANSFORM.ordinal()]);
gl4.glBufferData(GL_UNIFORM_BUFFER, uniformBlockSize, null, GL_DYNAMIC_DRAW);
gl4.glBindBuffer(GL_UNIFORM_BUFFER, 0);
gl4.glBindBuffer(GL_TEXTURE_BUFFER, bufferName[Buffer.PICKING.ordinal()]);
gl4.glBufferData(GL_TEXTURE_BUFFER, Float.BYTES, null, GL_DYNAMIC_READ);
gl4.glBindBuffer(GL_TEXTURE_BUFFER, 0);
return true;
}
In the initTexture we initialize our textures, we:
generate both the textures with glGenTextures
set the GL_UNPACK_ALIGNMENT to 1 (default is usually 4 bytes), in order to avoid any problem at all, (because your horizontal texture size must match the alignment).
set the activeTexture to GL_TEXTURE0, there is a specific number of texture slots and you need to specify it before working on any texture.
bind the diffuse texture
set the swizzle, that is what each channel will receive
set the levels (mipmap), where 0 is the base (original/biggest)
set the filters
allocate the space, levels included with glTexStorage2D
transfer for each level the corresponding data
reset back the GL_UNPACK_ALIGNMENT
bind to GL_TEXTURE0 our other texture PICKING
allocate a single 32b float storage and associate the PICKING texture to the PICKING buffer with glTexBuffer
Code:
private boolean initTexture(GL4 gl4) {
try {
jgli.Texture2D texture = new Texture2D(jgli.Load.load(TEXTURE_ROOT + "/" + TEXTURE_DIFFUSE));
jgli.Gl.Format format = jgli.Gl.instance.translate(texture.format());
gl4.glGenTextures(Texture.MAX.ordinal(), textureName, 0);
// Diffuse
{
gl4.glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
gl4.glActiveTexture(GL_TEXTURE0);
gl4.glBindTexture(GL_TEXTURE_2D, textureName[Texture.DIFFUSE.ordinal()]);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_RED);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_G, GL_GREEN);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_BLUE);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_A, GL_ALPHA);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, texture.levels() - 1);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl4.glTexStorage2D(GL_TEXTURE_2D, texture.levels(), format.internal.value,
texture.dimensions(0)[0], texture.dimensions(0)[1]);
for (int level = 0; level < texture.levels(); ++level) {
gl4.glTexSubImage2D(GL_TEXTURE_2D, level,
0, 0,
texture.dimensions(level)[0], texture.dimensions(level)[1],
format.external.value, format.type.value,
texture.data(0, 0, level));
}
gl4.glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
}
// Piking
{
gl4.glBindTexture(GL_TEXTURE_BUFFER, textureName[Texture.PICKING.ordinal()]);
gl4.glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, bufferName[Buffer.PICKING.ordinal()]);
gl4.glBindTexture(GL_TEXTURE_BUFFER, 0);
}
} catch (IOException ex) {
Logger.getLogger(Gl_420_picking.class.getName()).log(Level.SEVERE, null, ex);
}
return true;
}
In the initProgram we initialize our program, by:
generating a pipeline (composition of different shaders), glGenProgramPipelines
creating a vertex shader code vertShaderCode, where GL_VERTEX_SHADER is the shader type, SHADERS_ROOT is the place where the shader source is located, SHADERS_SOURCE_UPDATE is the name and "vert" is the extension.
initializing it, similarly for the fragment shader
grabbing the generated index and saving in programName
setting the program separable, nothing useful here, just pure sport, glProgramParameteri
adding both shader to our shaderProgram and linking and compiling it, link
specifing which program stage our pipelineName has, glUseProgramStages
Code:
private boolean initProgram(GL4 gl4) {
boolean validated = true;
gl4.glGenProgramPipelines(1, pipelineName, 0);
// Create program
if (validated) {
ShaderProgram shaderProgram = new ShaderProgram();
ShaderCode vertShaderCode = ShaderCode.create(gl4, GL_VERTEX_SHADER,
this.getClass(), SHADERS_ROOT, null, SHADERS_SOURCE_UPDATE, "vert", null, true);
ShaderCode fragShaderCode = ShaderCode.create(gl4, GL_FRAGMENT_SHADER,
this.getClass(), SHADERS_ROOT, null, SHADERS_SOURCE_UPDATE, "frag", null, true);
shaderProgram.init(gl4);
programName = shaderProgram.program();
gl4.glProgramParameteri(programName, GL_PROGRAM_SEPARABLE, GL_TRUE);
shaderProgram.add(vertShaderCode);
shaderProgram.add(fragShaderCode);
shaderProgram.link(gl4, System.out);
}
if (validated) {
gl4.glUseProgramStages(pipelineName[0], GL_VERTEX_SHADER_BIT | GL_FRAGMENT_SHADER_BIT, programName);
}
return validated & checkError(gl4, "initProgram");
}
In the initVertexArray we:
generate a single vertex array, glGenVertexArrays, and bind it, glBindVertexArray
bind the vertices buffer and set the attribute for the position and the color, here interleaved. The position is identified by the attribute index Semantic.Attr.POSITION (this will match the one in the vertex shader), component size 2, type GL_FLOAT, normalized false, stride or the total size of each vertex attribute 2 * 2 * Float.BYTES and the offset in this attribute 0. Similarly for the color.
unbind the vertices buffer since it is not part of the vertex array state. It must be bound only for the glVertexAttribPointer so that OpenGL can know which buffer those parameters refers to.
enable the corresponding vertex attribute array, glEnableVertexAttribArray
bind the element (indices) array, part of the vertex array
Code:
private boolean initVertexArray(GL4 gl4) {
gl4.glGenVertexArrays(1, vertexArrayName, 0);
gl4.glBindVertexArray(vertexArrayName[0]);
{
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.VERTEX.ordinal()]);
gl4.glVertexAttribPointer(Semantic.Attr.POSITION, 2, GL_FLOAT, false, 2 * 2 * Float.BYTES, 0);
gl4.glVertexAttribPointer(Semantic.Attr.TEXCOORD, 2, GL_FLOAT, false, 2 * 2 * Float.BYTES, 2 * Float.BYTES);
gl4.glBindBuffer(GL_ARRAY_BUFFER, 0);
gl4.glEnableVertexAttribArray(Semantic.Attr.POSITION);
gl4.glEnableVertexAttribArray(Semantic.Attr.TEXCOORD);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName[Buffer.ELEMENT.ordinal()]);
}
gl4.glBindVertexArray(0);
return true;
}
In the render we:
bind the TRANSFORM buffer that will contain our transformation matrix.
get a byteBuffer pointer out of that.
calculate the projection, view and model matrices and multiplying them in the same order p * v * m, called also mvp matrix.
save our mvp matrix in our pointer and rewind the buffer (position set to 0 again).
unmap it to make sure it gets uploaded to the gpu
set the viewport to match our window size
set the clear depthValue to 1 (superflous, since it is the default value), clear depth, with the depthValue, and color buffer, with the color {1.0f, 0.5f, 0.0f, 1.0f}
bind the pipeline
set active texture 0
bind the diffuse texture and the picking image texture
bind the vertex array
bind the transform uniform buffer
render, glDrawElementsInstancedBaseVertexBaseInstance is overused it, but what is important is the primitive type GL_TRIANGLES, the number of indices elementCount and their type GL_UNSIGNED_SHORT
bind the picking texture buffer and retrieve its value
Code:
#Override
protected boolean render(GL gl) {
GL4 gl4 = (GL4) gl;
{
gl4.glBindBuffer(GL_UNIFORM_BUFFER, bufferName[Buffer.TRANSFORM.ordinal()]);
ByteBuffer pointer = gl4.glMapBufferRange(
GL_UNIFORM_BUFFER, 0, projection.length * Float.BYTES,
GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
FloatUtil.makePerspective(projection, 0, true, (float) Math.PI * 0.25f,
(float) windowSize.x / windowSize.y, 0.1f, 100.0f);
FloatUtil.makeIdentity(model);
FloatUtil.multMatrix(projection, view());
FloatUtil.multMatrix(projection, model);
for (float f : projection) {
pointer.putFloat(f);
}
pointer.rewind();
// Make sure the uniform buffer is uploaded
gl4.glUnmapBuffer(GL_UNIFORM_BUFFER);
}
gl4.glViewportIndexedf(0, 0, 0, windowSize.x, windowSize.y);
float[] depthValue = {1.0f};
gl4.glClearBufferfv(GL_DEPTH, 0, depthValue, 0);
gl4.glClearBufferfv(GL_COLOR, 0, new float[]{1.0f, 0.5f, 0.0f, 1.0f}, 0);
gl4.glBindProgramPipeline(pipelineName[0]);
gl4.glActiveTexture(GL_TEXTURE0);
gl4.glBindTexture(GL_TEXTURE_2D, textureName[Texture.DIFFUSE.ordinal()]);
gl4.glBindImageTexture(Semantic.Image.PICKING, textureName[Texture.PICKING.ordinal()],
0, false, 0, GL_WRITE_ONLY, GL_R32F);
gl4.glBindVertexArray(vertexArrayName[0]);
gl4.glBindBufferBase(GL_UNIFORM_BUFFER, Semantic.Uniform.TRANSFORM0, bufferName[Buffer.TRANSFORM.ordinal()]);
gl4.glDrawElementsInstancedBaseVertexBaseInstance(GL_TRIANGLES, elementCount, GL_UNSIGNED_SHORT, 0, 5, 0, 0);
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.PICKING.ordinal()]);
ByteBuffer pointer = gl4.glMapBufferRange(GL_ARRAY_BUFFER, 0, Float.BYTES, GL_MAP_READ_BIT);
float depth = pointer.getFloat();
gl4.glUnmapBuffer(GL_ARRAY_BUFFER);
System.out.printf("Depth: %2.3f\n", depth);
return true;
}
In our vertex shader, executed for each vertex, we:
define the glsl version and profile
define all the attribute indices, that must coincide with our coming from the Semantic we used previously
set some memory layout parameters, such as std140 and column_mayor (useless, default value for matrices)
declare the Transform uniform buffer
declare a vec3 position and vec2 texCoord inputs
declare a (built in, incomplete and useless) gl_PerVertex output
declare a Block block output
save inside our block the incoming texCoord and inside gl_Position our vertex in clip space position. The incoming position vertex is in Model space -> * model matrix = vertex in World space, * view/camera matrix = vertex in Camera/View space, * projection matrix = vertex in Clip space.
Code:
#version 420 core
#define POSITION 0
#define COLOR 3
#define TEXCOORD 4
#define TRANSFORM0 1
precision highp float;
precision highp int;
layout(std140, column_major) uniform;
layout(binding = TRANSFORM0) uniform Transform
{
mat4 mvp;
} transform;
layout(location = POSITION) in vec3 position;
layout(location = TEXCOORD) in vec2 texCoord;
out gl_PerVertex
{
vec4 gl_Position;
};
out Block
{
vec2 texCoord;
} outBlock;
void main()
{
outBlock.texCoord = texCoord;
gl_Position = transform.mvp * vec4(position, 1.0);
}
There may be are other stages after the vertex shader, such as tessellation control/evaluation and geometry, but they are not mandatory.
The last stage is the fragment shader, executed once per fragment/pixel, that starts similarly, then we:
declare the texture diffuse on binding 0, that matches with our glActiveTexture(GL_TEXTURE0) inside the render and the imageBuffer picking where we will save our depth identified by binding 1, that matches our Semantic.Image.PICKING inside our render.glBindImageTexture
declare the picking coordinates, here hardcoded, but nothing stops you from turning them out as uniform variable and set it on runtime
declare the incoming Block block holding the texture coordinates
declare the default output color
if the current fragment coordinates gl_FragCoord (built in function) corresponds to the picking coordinates pickingCoord, save the current z value gl_FragCoord.z inside the imageBuffer depth and set the output color to vec4(1, 0, 1, 1), otherwise we set it equal to the diffuse texture by texture(diffuse, inBlock.texCoord.st). st is part of the stqp selection, synonymous of xywz or rgba.
Code:
#version 420 core
#define FRAG_COLOR 0
precision highp float;
precision highp int;
layout(std140, column_major) uniform;
in vec4 gl_FragCoord;
layout(binding = 0) uniform sampler2D diffuse;
layout(binding = 1, r32f) writeonly uniform imageBuffer depth;
uvec2 pickingCoord = uvec2(320, 240);
in Block
{
vec2 texCoord;
} inBlock;
layout(location = FRAG_COLOR, index = 0) out vec4 color;
void main()
{
if(all(equal(pickingCoord, uvec2(gl_FragCoord.xy))))
{
imageStore(depth, 0, vec4(gl_FragCoord.z, 0, 0, 0));
color = vec4(1, 0, 1, 1);
}
else
color = texture(diffuse, inBlock.texCoord.st);
}
Finally in the end we clean up all our OpenGL resources:
#Override
protected boolean end(GL gl) {
GL4 gl4 = (GL4) gl;
gl4.glDeleteProgramPipelines(1, pipelineName, 0);
gl4.glDeleteProgram(programName);
gl4.glDeleteBuffers(Buffer.MAX.ordinal(), bufferName, 0);
gl4.glDeleteTextures(Texture.MAX.ordinal(), textureName, 0);
gl4.glDeleteVertexArrays(1, vertexArrayName, 0);
return true;
}

OpenGL - Provide a set of values in a 1D texture

I want to provide a set of values in a 1D texture. Please consider the following simple example:
gl.glBindTexture(GL4.GL_TEXTURE_1D, myTextureHandle);
FloatBuffer values = Buffers.newDirectFloatBuffer(N);
for (int x = 0; x < N; ++x)
values.put(x);
values.rewind();
gl.glTexImage1D(GL4.GL_TEXTURE_1D, 0, GL4.GL_R32F, N, 0, GL4.GL_RED, GL4.GL_FLOAT, values);
Here, N is the amount of values I want to store in the texture. However, calling textureSize(myTexture, 0) in my fragment shader yields 1 (no matter to what I set N). So, what's going wrong here?
EDIT: The code above is executed at initialization. My rendering loop looks like
gl.glClear(GL4.GL_COLOR_BIT |GL4.GL_DEPTH_BUFFER_BIT);
gl.glUseProgram(myProgram);
gl.glActiveTexture(MY_TEXTURE_INDEX);
gl.glBindTexture(GL4.GL_TEXTURE_1D, myTextureHandle);
gl.glUniform1i(uMyTexture, MY_TEXTURE_INDEX);
gl.glDrawArrays(GL4.GL_POINTS, 0, 1);
My vertex shader consists of a main-function which does nothing. I'm using the geometry shader to create a fullscreen quad. The pixel shader code looks like
uniform sampler1D myTexture;
out vec4 color;
void main()
{
if (textureSize(myTexture, 0) == 1)
{
color = vec4(1, 0, 0, 1);
return;
}
color = vec4(1, 1, 0, 1);
}
The result is a red-colored window.
Make sure your texture is complete. Since GL_TEXTURE_MIN_FILTER defaults to GL_NEAREST_MIPMAP_LINEAR you'll have to supply a full set of mipmaps.
Or set GL_TEXTURE_MIN_FILTER to GL_NEAREST/GL_LINEAR.
You also need to pass GL_TEXTURE0 + MY_TEXTURE_INDEX (instead of only MY_TEXTURE_INDEX) to glActiveTexture():
gl.glActiveTexture( GL_TEXTURE0 + MY_TEXTURE_INDEX );
...
gl.glUniform1i( uMyTexture, MY_TEXTURE_INDEX );