I am currently building a height map terrain generator using OpenGL. It's a simple program that loads a height map image, iterates over the image data and generates vertices, indices and normals. At its current state it can render a height map with a single colour based on the normals.
My problem is generating correct UV coordinates for the diffuse map. It just comes out wrong:
This is the diffuse map I am trying to load:
Here is what I currently have:
Generate Vertices, Normals and Indices
// Generate Vertices and texture coordinates
for (int row = 0; row <= this->imageHeight; row++)
{
for (int column = 0; column <= this->imageWidth; column++)
{
float x = (float)column / (float)this->imageWidth;
float y = (float)row / (float)this->imageHeight;
float pixel = this->imageData[this->imageWidth * row + column];
float z;
if (row == this->imageHeight || column == this->imageWidth || row == 0 || column == 0)
{
z = 0.0f;
}
else
{
z = float(pixel / 256.0)*this->scale;
}
MeshV3 mesh;
mesh.position = glm::vec3(x, y, z);
mesh.normal = glm::vec3(0.0, 0.0, 0.0);
mesh.texture = glm::vec2(x, y);
this->mesh.push_back(mesh);
}
}
// Generate indices
for (int row = 0; row < this->imageHeight; row++)
{
for (int column = 0; column < this->imageWidth; column++)
{
int row1 = row * (this->imageWidth + 1);
int row2 = (row + 1) * (this->imageWidth + 1);
// triangle 1
this->indices.push_back(glm::uvec3(row1 + column, row1 + column + 1, row2 + column + 1));
// triangle 2
this->indices.push_back(glm::uvec3(row1 + column, row2 + column + 1, row2 + column));
}
}
// Generate normals
for (int i = 0; i < this->indices.size(); i++)
{
glm::vec3 v1 = this->mesh[this->indices[i].x].position;
glm::vec3 v2 = this->mesh[this->indices[i].y].position;
glm::vec3 v3 = this->mesh[this->indices[i].z].position;
glm::vec3 edge1 = v1 - v2;
glm::vec3 edge2 = v1 - v3;
glm::vec3 normal = glm::normalize(glm::cross(edge1, edge2));
this->mesh[this->indices[i].x].normal += normal;
this->mesh[this->indices[i].y].normal += normal;
this->mesh[this->indices[i].z].normal += normal;
}
I load the diffuse map with the following method
void Terrein::getDIffuseMap()
{
glGenTextures(1, &this->texture);
glBindTexture(GL_TEXTURE_2D, this->texture); // all upcoming GL_TEXTURE_2D operations now have effect on this texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
int width, height, nrChannels;
std::string path = "assets/diffuse.jpg";
this->diffuseData = stbi_load(path.c_str(), &width, &height, &nrChannels, 0);
if (this->diffuseData)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, this->diffuseData);
glGenerateMipmap(GL_TEXTURE_2D);
}
else
{
std::cout << "Failed to load diffuse texture" << std::endl;
}
}
I can't seem to figure out what might be wrong here. Is there an issue with how I am loading the image? Or am I not calculating the texture coordinates coorectly? Please let me know if there is anything else I should provide. I have been stuck at this for a few days now. Thanks!
By default OpenGL assumes that the start of each row of an image is aligned to 4 bytes.
This is because the GL_UNPACK_ALIGNMENT parameter by default is 4.
Since the image has 3 color channels (GL_RGB), and is tightly packed the size of a row of the image may not be aligned to 4 bytes.
When a RGB image with 3 color channels is loaded to a texture object, then GL_UNPACK_ALIGNMENT has to be set to 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0,
GL_RGB, GL_UNSIGNED_BYTE, this->diffuseData);
The diffuse image in the question has a dimension of 390x390. So each row of the image has a size of 390 * 3 = 1170 bytes.
Since 1170 is not divisible by 4 (1170 / 4 = 292,5), the start of a row is not aligned to 4 bytes.
Related question: Failing to map a simple unsigned byte rgb texture to a quad
Related
I'm trying to create sprite animation with texture array. Right now I have follow code:
int width, height, depth;
stbi_set_flip_vertically_on_load(true);
byte_t* buffer = stbi_load(R"(.\fire.jpg)",
&width, &height, &depth, STBI_rgb_alpha);
if (buffer == nullptr) {
std::cerr << "Could not read texture" << std::endl;
return EXIT_FAILURE;
}
GLuint texture_id;
const GLenum target = GL_TEXTURE_2D_ARRAY;
SAFE_CALL(glGenTextures(1, &texture_id));
SAFE_CALL(glBindTexture(target, texture_id));
SAFE_CALL(glTexParameteri(target, GL_TEXTURE_BASE_LEVEL, 0));
SAFE_CALL(glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, 1));
SAFE_CALL(glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
SAFE_CALL(glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
SAFE_CALL(glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
SAFE_CALL(glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
const GLsizei tile_w_count = 6, tile_h_count = 6;
const GLsizei total_tiles = tile_w_count * tile_h_count;
const GLsizei tile_w = width / tile_w_count,
tile_h = height / tile_h_count;
std::cout << "Texture WxH: " << width << "x" << height;
std::cout << ", Tile WxH: " << tile_w << "x" << tile_h << std::endl;
SAFE_CALL(glTexStorage3D(target, 1, GL_RGBA8, tile_w, tile_h,
total_tiles));
SAFE_CALL(glPixelStorei(GL_UNPACK_ROW_LENGTH, width));
SAFE_CALL(glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, height));
for (GLsizei i = 0; i < total_tiles; ++i) {
SAFE_CALL(glTexSubImage3D(
GL_TEXTURE_2D_ARRAY,
0,
0, 0, i,
tile_w, tile_h, 1,
GL_RGBA,
GL_UNSIGNED_BYTE,
buffer + (i * tile_w * tile_h * depth)
));
}
Fragment shader:
#version 460 core
in vec2 tex_coords;
out vec4 frag_color;
uniform sampler2DArray texture_0;
uniform int current_frame;
uniform int total_frames;
float actual_layer() {
return max(0, min(total_frames - 1,
floor(current_frame + 0.5)));
}
void main() {
frag_color = texture(texture_0, vec3(tex_coords, actual_layer()));
}
And seems like I incorrectly crop source texture, because when I run my program in Nsight debugger I saw follow:
Texture array:
Original image:
Is it issue with cropping source image, or issue with fragment shader? How to make sprite animation correctly?
You calculate the data offset incorrectly. The correct way would be:
Convert i to a 2-d index of the tile:
int ix = i % tile_w_count;
int iy = i / tile_w_count;
The x and y coordinates of its top-left pixel would be at
int x = ix*tile_w;
int y = iy*tile_h;
The offset can be calculated then by:
buffer + 4*(y*width + x)
Note that you shall use 4 instead of depth because stb returns the number of channels that was found in the file rather the number of the channels returned.
I'm now building a Voxel game. In the beginning, I use a texture atlas that stores all voxel textures and it works fine. After that, I decided to use Greedy Meshing in my game, thus texture atlas is not useful anymore. I read some articles which said that should use Texture Array instead. Then I tried to read and use the texture array technique for texturing. However, the result I got was all black in my game. So what am I missing?
This is my texture atlas (600 x 600)
Here is my Texture2DArray, I use this class to read and save a texture array
Texture2DArray::Texture2DArray() : Internal_Format(GL_RGBA8), Image_Format(GL_RGBA), Wrap_S(GL_REPEAT), Wrap_T(GL_REPEAT), Wrap_R(GL_REPEAT), Filter_Min(GL_NEAREST), Filter_Max(GL_NEAREST), Width(0), Height(0)
{
glGenTextures(1, &this->ID);
}
void Texture2DArray::Generate(GLuint width, GLuint height, unsigned char* data)
{
this->Width = width;
this->Height = height;
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
// I cannot decide what the texture array layer (depth) should be (I put here is 1 for layer number)
//Can anyone explain to me how to decide the texture layer here?
glTexImage3D(GL_TEXTURE_2D_ARRAY, 1, this->Internal_Format, this->Width, this->Height, 0, 1 , this->Image_Format, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, this->Wrap_S);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, this->Wrap_T);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_R, this->Wrap_R);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Min);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Max);
//unbind this texture for another creating texture
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
}
void Texture2DArray::Bind() const
{
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
}
Here is my Fragment Shader
#version 330 core
uniform sampler2DArray ourTexture;
in vec2 texCoord;
out vec4 FragColor;
void main(){
// 1 (the layer number) just for testing
FragColor = texture(ourTexture,vec3(texCoord, 1));
}
Here is my Vertex Shader
#version 330 core
layout (location = 0) in vec3 inPos;
layout (location = 1) in vec2 inTexCoord;
out vec2 texCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main(){
gl_Position = projection * view * vec4(inPos,1.0f);
texCoord = inTexCoord;
}
This my rendering result
EDIT 1:
I figured out that texture atlas doesn't work with texture array because it is a grid so OpenGl cannot decide where it should begin. So I create a vertical texture (18 x 72) and try again but it still all black everywhere.
I have checked binding the texture before using it.
When the 3 dimensional texture image is specified, then the depth has to be the number of images which have to be stored in the array (e.g. imageCount). The width and the height parameter represent the width and height of 1 tile (e.g. tileW, tileH). The layer should be 0 and the border parameter has to be 0. See glTexImage3D. glTexImage3D creates the data store for the texture image. The memory which is required for the textures is reserved (GPU). It is possible to pass a pointer to the image data, but it is not necessary.
If all the tiles are stored in a vertical atlas, then the image data can be set directly:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, data);
If the tiles are in the 16x16 atlas, then the tiles have to by extracted from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile). Create the texture image:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
After that use glTexSubImage3D to put the texture data to the data store of the texture object. glTexSubImage3D uses the existing data store and copies data. e.g.:
for (int i = 0; i < imageCount; ++i)
{
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, data[i]);
}
Note, you've to extract the tiles from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile)
An algorithm to extract the tiles and specify the texture image may look as follows
#include <algorithm> // std::copy
#include <vector> // std::vector
unsigned char* data = ...; // 16x16 texture atlas image data
int tileW = ...; // number of pixels in a row of 1 tile
int tileH = ...; // number of pixels in a column of 1 tile
int channels = 4; // 4 for RGBA
int tilesX = 16;
int tilesY = 16;
int imageCount = tilesX * tilesY;
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
std::vector<unsigned char> tile(tileW * tileH * channels);
int tileSizeX = tileW * channels;
int rowLen = tilesX * tileSizeX;
for (int iy = 0; iy < tilesY; ++ iy)
{
for (int ix = 0; ix < tilesX; ++ ix)
{
unsigned char *ptr = data + iy*rowLen + ix*tileSizeX;
for (int row = 0; row < tileH; ++ row)
std::copy(ptr + row*rowLen, ptr + row*rowLen + tileSizeX,
tile.begin() + row*tileSizeX);
int i = iy * tilesX + ix;
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, tile.data());
}
}
I'm writing a terrain renderer with C++ and OpenGL using nested grids and heightmaps, and am having trouble with the higher-detail (closer) grids looking blocky/terraced.
Initially I thought the problem was with the 8-bit heightmaps I was using, but 16-bit ones produce the same result (I'm using l3dt, World Machine and Photoshop to generate different maps).
My code needs to be abstracted from an engine pipeline so the heightmap is applied to the grids using transform feedback in a vertex shader:
void main()
{
float texOffset = 1.0 / mapWidthTexels, mapOffset = scale / mapWidthWorld; //Size of a texel in [0, 1] coordinates and size of a quad in world space
vec2 texCoord = (vertPos.xz * scale + offset) / mapWidthWorld + 0.5; //Texture coordinate to sample heightmap at. vertPos is the input vertex, scale is pow(2, i) where i is the nested grid number, offset is eye position
position = vertPos * scale;
if(vertPos.y == 0.0) //Y coordinate of the input vertex is used as a flag to tell if the vertex is bordering between nested grids
position.y = texture(heightmap, texCoord).r; //If it's not, just sample the heightmap
else
{
//Otherwise get the two adjacent heights and average them
vec2 side = vec2(0.0);
if(abs(vertPos.x) < abs(vertPos.z))
side.x = mapOffset;
else
side.y = mapOffset;
float a = texture(heightmap, texCoord + side).r, b = texture(heightmap, texCoord - side).r;
position.y = (a + b) * 0.5;
}
float mapF = mapWidthWorld * 0.5;
position.xz = clamp(position.xz + offset, -mapF, mapF) - offset; //Vertices outside of the heightmap are clamped, creating degenrate triangles
position.y *= heightMultiplier; //Y component so far is in the [0, 1] range, now multiply it to create the desired height
//Calculate normal
float leftHeight = texture(heightmap, texCoord + vec2(-texOffset, 0.0)).r * heightMultiplier, rightHeight = texture(heightmap, texCoord + vec2(texOffset, 0.0)).r * heightMultiplier;
float downHeight = texture(heightmap, texCoord + vec2(0.0, -texOffset)).r * heightMultiplier, upHeight = texture(heightmap, texCoord + vec2(0.0, texOffset)).r * heightMultiplier;
normal = normalize(vec3(leftHeight - rightHeight, 2.0, upHeight - downHeight));
tex = vertTex; //Pass through texture coordinates
}
RAW 16-bit heightmaps are loaded as such:
std::ifstream file(_path, std::ios::ate | std::ios::binary);
int size = file.tellg();
file.seekg(0, std::ios::beg);
m_heightmapWidth = sqrt(size / 2); //Assume 16-bit greyscale
unsigned short *data = new unsigned short[size / 2];
file.read(reinterpret_cast<char*>(data), size);
if (m_flip16bit) //Dirty endianness fix
{
for (int i = 0; i < size / 2; i++)
data[i] = (data[i] << 8) | ((data[i] >> 8) & 0xFF);
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, m_heightmapWidth, m_heightmapWidth, 0, GL_RED, GL_UNSIGNED_SHORT, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
delete[] data;
Other formats are loaded similarly with stb_image.
The resulting terrain looks like this:
https://imgur.com/a/d8tDPGO
As you can see areas with little to know slope have this terraced appearance. What am I doing wrong?
RAW 16-bit heightmaps are loaded as such:
[...]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, m_heightmapWidth, m_heightmapWidth, 0, GL_RED, GL_UNSIGNED_SHORT, data);
^^^^^^
Nope. The internalFormat parameter controls the format the texture is stored on the GPU, and GL_RED is just 8 Bit in any realistic scenario. You most likely want GL_R16 for a normalized 16Bit unsigned integer format.
Turns out l3dt's textures were the problem, parts that were meant to be underwater turned out terraced. Also, if the height range used in l3dt doesn't match heightMulptiplier in the shader artefacts can arise from that.
For the gui for my game, I have a custom texture object that stores the rgba data for a texture. Each GUI element registered by my game adds to the final GUI texture, and then that texture is overlayed onto the framebuffer after post-processing.
I'm having trouble converting my Texture object to an openGL texture.
First I create a 1D int array that goes rgbargbargba... etc.
public int[] toIntArray(){
int[] colors = new int[(width*height)*4];
int i = 0;
for(int y = 0; y < height; ++y){
for(int x = 0; x < width; ++x){
colors[i] = r[x][y];
colors[i+1] = g[x][y];
colors[i+2] = b[x][y];
colors[i+3] = a[x][y];
i += 4;
}
}
return colors;
}
Where r, g, b, and a are jagged int arrays from 0 to 255. Next I create the int buffer and the texture.
id = glGenTextures();
glBindTexture(GL_TEXTURE_2D, id);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
IntBuffer iBuffer = BufferUtils.createIntBuffer(((width * height)*4));
int[] data = toIntArray();
iBuffer.put(data);
iBuffer.rewind();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_INT, iBuffer);
glBindTexture(GL_TEXTURE_2D, 0);
After that I add a 50x50 red square into the upper left of the texture, and bind the texture to the framebuffer shader and render the fullscreen rect that displays my framebuffer.
frameBuffer.unbind(window.getWidth(), window.getHeight());
postShaderProgram.bind();
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, guiManager.texture()); // this gets the texture id that was created
postShaderProgram.setUniform("gui_texture", 1);
mesh.render();
postShaderProgram.unbind();
And then in my fragment shader, I try displaying the GUI:
#version 330
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texFramebuffer;
uniform sampler2D gui_texture;
void main()
{
outColor = texture(gui_texture, Texcoord);
}
But all it outputs is a black window!
I added a red 50x50 rectangle into the upper left corner and verified that it exists, but for some reason it isn't showing in the final output.
That gives me reason to believe that I'm not converting my texture into an opengl texture with glTexImage2D correctly.
Can you see anything I'm doing wrong?
Update 1:
Here I saw them doing a similar thing using a float array, so I tried converting my 0-255 to a 0-1 float array and passing it as the image data like so:
public float[] toFloatArray(){
float[] colors = new float[(width*height)*4];
int i = 0;
for(int y = 0; y < height; ++y){
for(int x = 0; x < width; ++x){
colors[i] = (( r[x][y] * 1.0f) / 255);
colors[i+1] = (( g[x][y] * 1.0f) / 255);
colors[i+2] = (( b[x][y] * 1.0f) / 255);
colors[i+3] = (( a[x][y] * 1.0f) / 255);
i += 4;
}
}
return colors;
}
...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_FLOAT, toFloatArray());
And it works!
I'm going to leave the question open however as I want to learn why the int buffer wasn't working :)
When you specified GL_UNSIGNED_INT as the type of the "Host" data, OpenGL expected 32 bits allocated for each color. Since OpenGL only maps the output colors in the default framebuffer to the range [0.0f, 1.0f], it'll take your input color values (mapped in the range [0, 255]) and divide all of them by the maximum size of an int (about 4.2 Billion) to get the final color displayed on screen. As an exercise, using your original code, set the "clear" color of the screen to white, and see that a black rectangle is getting drawn on screen.
You have two options. The first is to convert the color values to the range specified by GL_UNSIGNED_INT, which means for each color value, multiply them by Math.pow((long)2, 24), and trust that the integer overflow of multiplying by that value will behave correctly (since Java doesn't have unsigned integer types).
The other, far safer option, is to store each 0-255 value in a byte[] object (do not use char. char is 1 byte in C/C++/OpenGL, but is 2 bytes in Java) and specify the type of the elements as GL_UNSIGNED_BYTE.
In the main function:
img = cvLoadImage("test.JPG");
//openCV functions to load and create matrix
CvMat *mat = cvCreateMat(img->height,img->width,CV_8SC3 );
cvGetMat( img, mat,0,1);
//creating the 3-dimensional array during runtime
data = new float**[img->height];
for(int i=0;i<img->height;i++){
data[i] = new float*[img->width];
}
for(int i=0;i<img->height;i++){
for(int j=0;j<img->width;j++)
data[i][j] = new float[3];
}
//setting RGB values
for(int i=0;i<img->height;i++)
{
for(int j=0;j<img->width;j++)
{
CvScalar scal = cvGet2D( mat,i,j);
data[i][j][0] = scal.val[0];
data[i][j][1] = scal.val[1];
data[i][j][2] = scal.val[2];
}
}
I am using openCV to get the image pixel data, storing it in the dynamically created matrix "data".Now generating textures and binding them:
glGenTextures(1,&texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0 ,GL_RGB, img->width,img->height,0,GL_RGB,GL_FLOAT,data);
glBindTexture(GL_TEXTURE_2D, texName);
glBegin(GL_QUADS);
glTexCoord2f(0,0);
glVertex3f(-1,-1,0);
glTexCoord2f(0,1);
glVertex3f(-1,1,0);
glTexCoord2f(1,1);
glVertex3f(1,1,0);
glTexCoord2f(1,0);
glVertex3f(1,-1,0);
glEnd();
There are no compilation errors but during the runtime, the window displays the square I made, but not the image that I tried to convert into texture.
How do I achieve loading any image into texture using the pixel data that I extract using openCV. I have printed the RGB values and they seem legit enough and the number of triplets printed are as expected.
glBindTexture should be called before the glTexParemeteri...glTexImage2D calls, so openGL knows which texture you're setting up.
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0 ,GL_RGB, img->width,img->height,0,GL_RGB,GL_FLOAT,data);
More importantly, you are not setting up your data variable correctly:
float* data = new float[img->height * img->width * 3];
for (int i = 0; i < img->height; i++)
{
for (int j = 0; j < img->width; j++)
{
CvScalar scal = cvGet2D(mat, i, j);
data[(i * img->width + j) + 0] = scal.val[0];
data[(i * img->width + j) + 1] = scal.val[1];
data[(i * img->width + j) + 2] = scal.val[2];
}
}
Also, you might need to swap the order of color components and/or convert them to 0..1 range, I don't know how openCV loads images.