How can i fix this texture rendering issue in this basic OpenGL demo? [closed] - opengl

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 6 months ago.
Improve this question
I tried making a basic square + texture demo in Rust OpenGL and the texture i rendered looks like this:
...while it's supposed to look like this:
Below is my texture creation and upload code, which is a rust-ized version of learnopengl.com's version of creating, binding and uploading a basic texture:
pub fn from_image(file: String) -> Result<Texture, String> {
let mut texture: GLuint = 0;
match image::open(file.clone()) {
Err(err) => panic!("Could not load image {:?}: {}", file, err),
Ok(img) => {
let (width, height) = img.dimensions();
let img = match img {
DynamicImage::ImageRgb8(img) => img,
img => img.to_rgb8(),
};
unsafe {
glGenTextures(1, &mut texture);
if texture == 0 {
return Err(String::from("Error creating texture"));
}
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT as GLint); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT as GLint);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR as GLint);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_LINEAR as GLint);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8 as GLint,
width as GLsizei,
height as GLsizei,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
img.as_raw().as_ptr().cast());
glGenerateMipmap(GL_TEXTURE_2D);
}
Ok(Texture(texture))
}
}
}
Here are my shaders:
const VERT_SHADER: &str = r#"#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec2 aTexCoords;
out vec2 texCoords;
void main() {
gl_Position = vec4(pos.x, pos.y, pos.z, 1.0);
texCoords = aTexCoords;
}"#;
const FRAG_SHADER: &str = r#"#version 330 core
out vec4 final_color;
in vec2 texCoords;
uniform sampler2D Texture;
void main() {
final_color = texture(Texture, texCoords);
}
"#;
Here are my VAOs, VBO and EBO, which i took from learnopengl.com
const VAO: [Vertex; 4] =
[[0.5, 0.5, 0.0, 1.0, 1.0], [0.5, -0.5, 0.0, 1.0, 0.0], [-0.5, -0.5, 0.0, 0.0, 0.0], [-0.5, 0.5, 0.0, 0.0, 1.0]];
const EBO: [TriIndexes; 2] = [[0, 1, 3], [1, 2, 3]];
unsafe {
glVertexAttribPointer(
0,
3,
GL_FLOAT,
GL_FALSE,
(5 * size_of::<f32>()) as GLsizei,
0 as *const _,
);
glEnableVertexAttribArray(0);
glVertexAttribPointer(
1,
2,
GL_FLOAT,
GL_FALSE,
(5 * size_of::<f32>()) as GLsizei,
(2 * size_of::<f32>()) as *const _,
);
glEnableVertexAttribArray(1);
}

The issue is the offset is incorrect for the second parameter in the VAO. It is currently an offset of 2 when it should really be an offset of 3. This causes the texture coordinates to be interpreted as [0.0, 1.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0]. This effectively causes the y-axis of a sample to be ignored as the texture is only sampled along its diagonal.
This is the corrected version of the call:
glVertexAttribPointer(
1,
2,
GL_FLOAT,
GL_FALSE,
(5 * size_of::<f32>()) as GLsizei,
(3 * size_of::<f32>()) as *const _, // <- This was the line with the issue
);

Related

imageStore() doesn't work with integer types

I bind a texture to both a texture unit and a image unit, write to it via imageStore() in the compute shader, and sample it with a sampler2D in the fragment shader.
This works when the pixel format is floating point, but stops working with integers. glGetError() yields nothing.
glew and glm are used; should be irrelevant to the problem though.
main.cpp:
constexpr glm::vec2 TEX_DIM = { 2048.0f, 2048.0f / ASPECT_RATIO };
constexpr int LOCAL_WORKGROUP_SIZE = 32;
// After setting up vbo, etc //////////
// Texture
const unsigned int texSlot = 0;
unsigned int texId;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0 + texSlot);
glBindTexture(GL_TEXTURE_2D, texId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, int(TEX_DIM.x), int(TEX_DIM.y), 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
// Binding to image unit
const unsigned int imageSlot = 0;
glBindImageTexture(imageSlot, texId, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_R8);
// Shaders
unsigned int computeShader;
unsigned int graphicsShader;
// After creating shaders //////////
// Graphics shader
glUseProgram(graphicsShader);
glUniform1i(glGetUniformLocation(graphicsShader, "uTexture"), texSlot);
glUniformMatrix4fv(glGetUniformLocation(graphicsShader, "uMVP"), 1, GL_FALSE, &mvp);
auto mvp = glm::ortho(0.0f, (float)WINDOW_WIDTH, 0.0f, (float)WINDOW_HEIGHT, -1.0f, 1.0f);
// Compute shader
glUseProgram(computeShader);
glUniform1i(glGetUniformLocation(computeShader, "uImage"), imageSlot);
// After validating shaders //////////
while (true)
{
// Compute
glUseProgram(computeShader);
glDispatchCompute(TEX_DIM.x / LOCAL_WORKGROUP_SIZE, TEX_DIM.y / LOCAL_WORKGROUP_SIZE, 1);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
// Draw
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(graphicsShader);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, nullptr);
glfwSwapBuffers(window);
}
Compute Shader:
# version 430 core
layout(local_size_x = 32, local_size_y = 32) in;
layout(r8) uniform image2D uImage;
void main()
{
// Writing all to red, for testing purpose
imageStore(uImage, ivec2(gl_GlobalInvocationID.xy), vec4(1.0, 0.0, 0.0, 0.0));
}
Vertex Shader:
# version 430 core
layout(location = 0) in vec2 position;
layout(location = 1) in vec2 texCoord;
out vec2 vTexCoord;
uniform mat4 uMVP;
void main()
{
gl_Position = uMVP * vec4(position, 0.0, 1.0);
vTexCoord = texCoord;
}
Fragment Shader:
# version 430 core
in vec2 vTexCoord;
out vec4 color;
uniform sampler2D uTexture;
void main()
{
color = vec4(
texture(uTexture, vTexCoord).x,
0.0, 0.0, 1.0
);
}
Below is my attempt to convert the minimal program to be using integers instead; gives me a black screen but no errors otherwize.
main.cpp:
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8UI, int(TEX_DIM.x), int(TEX_DIM.y), 0, GL_RED_INTEGER, GL_UNSIGNED_BYTE, nullptr);
glBindImageTexture(imageSlot, texId, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_R8UI);
Compute Shader:
layout(r8ui) uniform uimage2D uImage;
void main()
{
// Writing all to red, for testing purpose
imageStore(uImage, ivec2(gl_GlobalInvocationID.xy), uvec4(255, 0, 0, 0));
}
Fragment Shader:
uniform usampler2D uTexture;
void main()
{
color = vec4(
float(texture(uTexture, vTexCoord).x) / 256.0,
0.0, 0.0, 1.0
);
}
I've thought about GL_R8UI being incompatable but the wiki says both GL_R8 and GL_R8UI are fine to use.

Working with GL_TEXTURE_3D

I'm trying to read a file representing values to be mapped to colors when rendering a volume. I.e., each voxel has its color represented in the input file. Here it follows a dummy test file:
4 4 4
1 1 1 1
1 1 1 1
1 1 1 1
1 1 1 1
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
1 1 1 1
1 1 1 1
1 1 1 1
1 1 1 1
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
It should define a value with its top black, followed by a white layer, black layer and white again.
That's how I read it:
int width, height, depth;
file >> width;
file >> height;
file >> depth;
printf("Texture data: %d, %d, %d\n", width, height, depth);
// Creating buffer
unsigned char buffer[height][width][depth];
// Reading content
int v;
for (int i = 0; i < height; i ++) {
for (int j = 0; j < width; j ++) {
for (int k = 0; k < depth; k ++) {
file >> v;
buffer[i][j][k] = v * 255;
}
}
}
And that's how I create my texture:
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_3D, textureID);
// Give the image to OpenGL
glTexImage3D(GL_TEXTURE_3D, 0, GL_RED, height, width, depth, 0, GL_RED, GL_UNSIGNED_BYTE, buffer);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
These are the values that are going to be used to construct my VAO:
_modelCoordinates = { -1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
-1.0, 1.0, -1.0 };
_modelIndices = { 0, 1, 2,
2, 3, 0,
1, 5, 6,
6, 2, 1,
7, 6, 5,
5, 4, 7,
4, 0, 3,
3, 7, 4,
4, 5, 1,
1, 0, 4,
3, 2, 6,
6, 7, 3 };
_textureCoordinates = { 0.0, 0.0, 1.0,
1.0, 0.0, 1.0,
1.0, 1.0, 1.0,
0.0, 1.0, 1.0,
0.0, 0.0, 0.0,
1.0, 0.0, 0.0,
1.0, 1.0, 0.0,
0.0, 1.0, 0.0 };
Finally, this are my shaders:
Vertex Shader:
.
#version 410 core
uniform mat4 mvMatrix;
uniform mat4 pMatrix;
in vec4 vPosition;
in vec3 texCoord;
// Input for the fragment shader
smooth out vec3 uv;
void main() {
gl_Position = pMatrix * mvMatrix * vPosition;
uv = texCoord;
}
Frag Shader:
.
#version 410 core
uniform sampler3D tex;
in vec3 uv;
out vec4 fragColor;
void main()
{
vec3 color = texture(tex, uv).rrr;
fragColor.rgb = color;
}
After loading the uniforms and attribute values, I draw the volume:
_modelView = modelView;
_projection = projection;
_succeed = Bind();
if (_succeed)
{
glBindVertexArray(_vao);
LoadUniformVariables();
glDrawElements(GL_TRIANGLES, _modelIndices.size(), GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
UnBind();
However, all that I get is a blank volume:
I dont't know how to proper set the texture coordinates and how to correctly implement the fragment shader for this case.
Looks like your texture coordinates are not properly assigned to face vertices, so that should be fixed. However you seem to expect that OpenGL will "magically" perform some kind of volume rasterization for you. And no kind of texture coordinate fixing will do that.
With the fragment shader as you wrote it, only a slice through the volume data will be sampled. What you have to do instead is writing a fragment shader that actually sends a ray through the texture and samples it at each and every sample the ray crosses through. There's a whole chapter on volume rendering in GPU Gems:
http://http.developer.nvidia.com/GPUGems/gpugems_ch39.html
After adding these lines:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
Everything worked fine.

Applying GLM like "perspective" and "lookat" calculations results in my shape disappearing

I'm new to OpenGL and am working my way through translating the C++ OpenGL tutorials at http://open.gl/ to Python and Pyglet (https://github.com/01AutoMonkey/open.gl-tutorials-to-pyglet) and am currently at the second example in the "Transformation" section (http://open.gl/transformations).
It's done, except I just get a black screen instead of an image moving in a particular way (two triangles making up a rectangle are suppose to rotate and the camera is suppose to tilt a little). The problem seems to stem from matrix calculations, more specifically the applying of "perspective" and "lookat" to gain a "projection". If I remove those two from my vertex shader calculations it works, I get a rotating image, but without the tilting of the camera which the two matrixes were suppose to deliver.
I've compared my code with the C++ code and I don't see any discrepancies and I fail to see where the problem could lie.
How I'm translating the code:
C++ to Python and ctypes
SFML to Pyglet
GLEW to pyglet.gl
SOIL to pyglet.image
GLM to pyeuclid <-- note that pyeuclid is what I'm using to calculate the matrices
Could it be I'm suppose to send the matrices in some other form then a flat list? Also maybe the code is working but I'm doing something wrong or pyglet works differently and therefore the image is displayed outside the screen?
The C++ code:
// Set up projection
glm::mat4 view = glm::lookAt(
glm::vec3(1.2f, 1.2f, 1.2f),
glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 0.0f, 1.0f)
);
GLint uniView = glGetUniformLocation(shaderProgram, "view");
glUniformMatrix4fv(uniView, 1, GL_FALSE, glm::value_ptr(view));
glm::mat4 proj = glm::perspective(45.0f, 800.0f / 600.0f, 1.0f, 10.0f);
GLint uniProj = glGetUniformLocation(shaderProgram, "proj");
glUniformMatrix4fv(uniProj, 1, GL_FALSE, glm::value_ptr(proj));
My Python, Pyglet code equivilant:
# Set up projection
eye = Vector3(1.2, 1.2, 1.2)
at = Vector3(0.0, 0.0, 0.0)
up = Vector3(0.0, 0.0, 1.0)
view = Matrix4.new_look_at(eye, at, up)
view = view[:]
view_ctype = (GLfloat * len(view))(*view)
uniView = glGetUniformLocation(shader.handle, "view")
glUniformMatrix4fv(uniView, 1, GL_FALSE, view_ctype)
proj = Matrix4.new_perspective(45.0, 800.0 / 600.0, 1.0, 10.0)
proj = proj[:]
proj_ctype = (GLfloat * len(proj))(*proj)
uniProj = glGetUniformLocation(shader.handle, "proj")
glUniformMatrix4fv(uniProj, 1, GL_FALSE, proj_ctype)
The full script:
import pyglet
from pyglet.gl import *
from shader import Shader
from ctypes import pointer, sizeof
import math
import time
from euclid import *
window = pyglet.window.Window(800, 600, "OpenGL")
window.set_location(100, 100)
# Shaders (Vertex and Fragment shaders)
vertex = """
#version 150
in vec2 position;
in vec2 texcoord;
out vec2 Texcoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 proj;
void main() {
Texcoord = texcoord;
gl_Position = proj * view * model * vec4(position, 0.0, 1.0);
}
"""
fragment = """
#version 150 core
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texKitten;
uniform sampler2D texPuppy;
void main() {
outColor = mix(texture(texKitten, Texcoord), texture(texPuppy, Texcoord), 0.5);
}
"""
## Compiling shaders and combining them into a program
shader = Shader(vertex, fragment)
shader.bind() #glUseProgram
# Vertex Input
## Vertex Array Objects
vao = GLuint()
glGenVertexArrays(1, pointer(vao))
glBindVertexArray(vao)
## Vertex Buffer Object
vbo = GLuint()
glGenBuffers(1, pointer(vbo)) # Generate 1 buffer
# Position Texcoords
vertices = [-0.5, 0.5, 0.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, -0.5, 1.0, 0.0,
-0.5, -0.5, 0.0, 0.0]
## Convert the verteces array to a GLfloat array, usable by glBufferData
vertices_gl = (GLfloat * len(vertices))(*vertices)
## Upload data to GPU
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices_gl), vertices_gl, GL_STATIC_DRAW)
# Element array
ebo = GLuint()
glGenBuffers(1, pointer(ebo))
elements = [0, 1, 2,
2, 3, 0]
elements_gl = (GLuint * len(elements))(*elements)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(elements_gl), elements_gl, GL_STATIC_DRAW);
# Making the link between vertex data and attributes
## shader.handle holds the value of glCreateProgram()
posAttrib = glGetAttribLocation(shader.handle, "position");
glEnableVertexAttribArray(posAttrib);
glVertexAttribPointer(posAttrib, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), 0);
texAttrib = glGetAttribLocation(shader.handle, "texcoord");
glEnableVertexAttribArray(texAttrib);
glVertexAttribPointer(texAttrib, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), 2 * sizeof(GLfloat));
# Load textures
textures = [0] * 2
textures_ctype = (GLuint * len(textures))(*textures)
glGenTextures(2, textures_ctype);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures_ctype[0]);
image = pyglet.image.load("sample.png")
width, height = image.width, image.height
image = image.get_data('RGB', width * 3)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
glUniform1i(glGetUniformLocation(shader.handle, "texKitten"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textures_ctype[1]);
image = pyglet.image.load("sample2.png")
width, height = image.width, image.height
image = image.get_data('RGB', width * 3)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
glUniform1i(glGetUniformLocation(shader.handle, "texPuppy"), 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
uniModel = glGetUniformLocation(shader.handle, "model");
# Set up projection
eye = Vector3(1.2, 1.2, 1.2)
at = Vector3(0.0, 0.0, 0.0)
up = Vector3(0.0, 0.0, 1.0)
view = Matrix4.new_look_at(eye, at, up)
view = view[:]
view_ctype = (GLfloat * len(view))(*view)
uniView = glGetUniformLocation(shader.handle, "view")
glUniformMatrix4fv(uniView, 1, GL_FALSE, view_ctype)
proj = Matrix4.new_perspective(45.0, 800.0 / 600.0, 1.0, 10.0)
proj = proj[:]
proj_ctype = (GLfloat * len(proj))(*proj)
uniProj = glGetUniformLocation(shader.handle, "proj")
glUniformMatrix4fv(uniProj, 1, GL_FALSE, proj_ctype)
# Set clear color
glClearColor(0.0, 0.0, 0.0, 1.0)
#window.event
def on_draw():
# Clear the screen to black
glClear(GL_COLOR_BUFFER_BIT)
# Calculate transformation
model = Quaternion.new_rotate_axis(time.clock() * math.pi, Vector3(0, 0, 1))
model = model.get_matrix()[:]
model_ctype = (GLfloat * len(model))(*model)
glUniformMatrix4fv(uniModel, 1, GL_FALSE, model_ctype);
# Draw a rectangle from the 2 triangles using 6 indices
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0)
#window.event
def on_key_press(symbol, modifiers):
pass
#window.event
def on_key_release(symbol, modifiers):
pass
def update(dt):
pass
pyglet.clock.schedule(update)
pyglet.app.run()
OpenGL expects matrices to be provided in column-major order. GLM uses the same convention, however many matrix math libraries not designed for use with OpenGL use row-major order.
My python is rusty, but a cursory inspection of the source for pyeuclid seems to confirm that they are using row-major order.
Therefore, you need to convert from row-major to column major order before passing any matrices to functions like glUniformMatrix4fv. Luckily, this is a simple matter; it's just the transpose of the matrix!

OpenGL GLSL texture(...) function always returning v4(0,0,0,1)

I have the following setup for basic texture mapping
void init(void) {
glGenVertexArrays(NumVAOs, VAOs);
glBindVertexArray(VAOs[Triangles]);
GLfloat vertices[6][2] = {
{ -0.90, -0.90 }, // Triangle 1
{ 0.85, -0.90 },
{ -0.90, 0.85 },
{ 0.1, 0.1 }, // UVs
{ 0.9, 0.1 },
{ 0.1, 0.9 }
};
glGenBuffers(NumBuffers, Buffers);
glBindBuffer(GL_ARRAY_BUFFER, Buffers[ArrayBuffer]);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
GLuint program = LoadShaders( "triangles.vert", "triangles.frag" );
glUseProgram(program);
glVertexAttribPointer(vPosition, 2, GL_FLOAT,
GL_FALSE, 0, BUFFER_OFFSET(0));
glEnableVertexAttribArray(vPosition);
glVertexAttribPointer(1, 2, GL_FLOAT,
GL_FALSE, 0, BUFFER_OFFSET(2*3*sizeof(GLfloat)));
glEnableVertexAttribArray(1);
GLuint sloc = glGetUniformLocation(program, "mySampler");
glUniform1i(sloc, 0);
int x,y,n;
unsigned char *data = stbi_load("bricks.jpg", &x, &y, &n, 4);
glGenTextures(1, &m_textureId);
glBindTexture(GL_TEXTURE_2D, m_textureId);
glTexParameterf(m_textureId, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(m_textureId, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, x, y, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
}
void display(void) {
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_textureId);
glBindVertexArray(VAOs[Triangles]);
glDrawArrays(GL_TRIANGLES, 0, NumVertices);
glFlush();
}
Vertex Shader:
#version 330 core
layout(location = 0) in vec4 vPosition;
layout(location = 1) in vec2 uv;
out vec2 texCoord;
void
main() {
gl_Position = vPosition;
texCoord = uv;
}
Fragment Shader:
#version 330 core
in vec2 texCoord;
out vec4 fColor;
uniform sampler2D mySampler;
void
main() {
fColor = texture(mySampler, texCoord);
}
However, nothing is displayed on the screen. If I swap out my frag shader with one that just sets a static color, I see the triangle. I have confirmed that I am getting data from the image and printed out a sample set of RGBA data that looks correct (rgb vary, but alpha is constant 0xFF). I've read at least 5 tutorials but can't seem to get it right.
I've also confirmed that if I do:
fColor = vec4(texCoord.x, texCoord.y, 0.0, 1.0);
I get a gradient of colors which I believe tells me I'm getting correctly interpolated values of my UVs coming through.
Apparently these texparams are required for some implementations
glTexParameter( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameter( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameter( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameter( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );

Sampling from a texture in OpenGL is black

I'm trying out my hand at graphics, following a tutorial at http://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_06
Problem: When I try to texture my cube, my sample is black.
Screenshot: http://puu.sh/2JP1H.jpg (note: I set blue = uv.x to test my UVs)
I looked at the threads OpenGL textures appear just black, and
Texture is all black, but it seemed they had different problems.
First I load my texture using SOIL image loading library:
int w, h;
unsigned char* img = SOIL_load_image("resources/images/4x4window.jpg",&w,&h,0,0);
ErrorIf(!img, "%s", SOIL_last_result());
glGenTextures(1, &texture_id);
glBindTexture(GL_TEXTURE, texture_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,w,h,0,GL_RGB,GL_UNSIGNED_BYTE,img);
My render function where I pass my texture to the shader program:
void onDisplay()
{
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(program);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
glUniform1i(uniform_myTexture, /*GL_TEXTURE*/0);
glEnableVertexAttribArray(attribute_coord3d); // open shader var
glBindBuffer(GL_ARRAY_BUFFER, vbo_cube_verts); // put stuff in buffer
glVertexAttribPointer(attribute_coord3d, 3, GL_FLOAT, GL_FALSE, 0, 0); //send
glEnableVertexAttribArray(attribute_texcoord);
glBindBuffer(GL_ARRAY_BUFFER, vbo_cube_texcoords);
glVertexAttribPointer(attribute_texcoord, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo_cube_elements);
int size;
glGetBufferParameteriv(GL_ELEMENT_ARRAY_BUFFER, GL_BUFFER_SIZE, &size);
glDrawElements(GL_TRIANGLES, size / sizeof(GLushort), GL_UNSIGNED_SHORT, 0);
glDisableVertexAttribArray(attribute_coord3d);
glDisableVertexAttribArray(attribute_texcoord);
glutSwapBuffers();
}
Vertex Shader:
attribute vec3 coord3d;
attribute vec2 texcoord;
varying vec2 f_texcoord;
uniform mat4 mvp;
void main(void)
{
gl_Position = mvp * vec4(coord3d, 1.0);
f_texcoord = texcoord;
}
Fragment Shader:
varying vec2 f_texcoord;
uniform sampler2D mytexture;
void main(void)
{
vec4 result = texture2D(mytexture, f_texcoord);
result.z = f_texcoord.x;
result.w = 1;
gl_FragColor = result;
}
The line where the the texture is bound is:
glBindTexture(GL_TEXTURE, texture_id);
that should be:
glBindTexture(GL_TEXTURE_2D, texture_id);