I am trying to render an obj model with texture. Here is what I do:
Get the 3d model model and the corresponding view matrix view_mat and projection matrix proj_mat from the image.
Project the 3d model to the image using proj_mat * view_mat * model, in this way I can get uv coordinates in the image for every vertex in 3d model.
Use the uv coordinates to render the 3d model.
Here is what I get (on the left is the render result), I think I should get the main steps done right, as the overall texture looks in the right position. But it looks like that triangles are not in the rotation mode.
Here is the part of the code I consider that is related to the texture mapping.
int main() {
Tracker tracker;
tracker.set_image("clooney.jpg");
Viewer viewer(tracker.get_width(), tracker.get_height());
while (tracker.track()) {
Model* model = tracker.get_model();
glm::mat4x4 proj_mat = tracker.get_proj_mat();
proj_mat = glm::transpose(proj_mat);
glm::mat4x4 view_mat = tracker.get_view_mat();
view_mat = glm::transpose(view_mat);
// render 3d shape
viewer.set_model(model);
viewer.draw(view_mat, proj_mat);
waitKey(0);
}
return 0;
}
// initialization of the render part
Viewer::Viewer(int width, int height) {
glfwInit();
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
m_window = glfwCreateWindow(width, height, "demo", NULL, NULL);
if (!m_window)
{
fprintf(stderr, "Failed to open GLFW window\n");
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(m_window);
glfwGetWindowSize(m_window, &m_width, &m_height);
glfwSetFramebufferSizeCallback(m_window, reshape_callback);
gladLoadGLLoader((GLADloadproc)glfwGetProcAddress);
glfwSwapInterval(1);
config();
}
void Viewer::config() {
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDisable(GL_CULL_FACE);
glShadeModel(GL_FLAT);
}
// entry of the drawing function
void Viewer::draw(glm::mat4x4 view_mat, glm::mat4x4 proj_mat) {
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
proj_mat = glm::transpose(proj_mat);
glLoadMatrixf(&proj_mat[0][0]);
glMatrixMode(GL_MODELVIEW);
view_mat = glm::transpose(view_mat);
glLoadMatrixf(&view_mat[0][0]);
// m_pmodel is an instance of Model Class
// set texture
m_pmodel->set_texture(m_image);
// set model uvs
m_pmodel->set_uvs(view_mat, proj_mat);
m_pmodel->draw();
glfwSwapBuffers(m_window);
glfwPollEvents();
}
// set the texture for the model from the image
void Model::set_texture(cv::Mat img) {
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexImage2D(GL_TEXTURE_2D, 0, 3, img.cols, img.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, img.data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_2D);
}
// specify correspondence between image and the model
void Model::set_uvs(glm::mat4x4 view_mat, glm::mat4x4 proj_mat) {
for (int i = 0; i < m_uvs.size(); i++) {
glm::vec4 clip_coord = proj_mat * view_mat * glm::vec4(m_vertices[i], 1);
float w = clip_coord.w;
glm::vec3 normal_coord = glm::vec3(clip_coord.x, clip_coord.y, clip_coord.z) / w;
m_uvs[i] = glm::vec2(normal_coord.x * 0.5f + 0.5f, normal_coord.y * 0.5f + 0.5f);
}
}
// render the 3d model
void Model::draw() const {
glBindTexture(GL_TEXTURE_2D, m_texture);
for (unsigned long i = 0; i < m_faces.size(); ++i) {
glm::ivec3 face = this->m_faces[i];
glBegin(GL_TRIANGLES);
for (int j = 0; j < 3; j++) {
glm::vec3 v = this->m_vertices[face[j]];
glm::vec2 uv = this->m_uvs[face[j]];
glVertex3f(v.x, v.y, v.z);
glTexCoord2f(1 - uv.x,1 - uv.y);
}
glEnd();
}
}
You have to set the current texture coordinate (glTexCoord) before you specify a vertex (glVertex), because the current color, normal, texture coordinates, and fog coordinate are associated with the vertex when glVertex is called.
This means you have to swap glVertex3f and glTexCoord2f:
glTexCoord2f(1 - uv.x,1 - uv.y);
glVertex3f(v.x, v.y, v.z);
Otherwise you would set the texture coordinate which is associated to the next vertex position.
See OpenGL 2.0 API Specification, 2.6 Begin/End Paradigm, page 13:
Each vertex is specified with two, three, or four coordinates. In addition, a current normal, multiple current texture coordinate sets, multiple current generic vertex attributes, current color, current secondary color, and current fog coordinate may be used in processing each vertex.
Related
I'm trying to implement a selection-outline feature. This is what I get up to now.
As you can see, the objects are selected correctly when the mouse hovers and a contour is drawn around the selected object.
What I would like to do now is to outline the visible edges of the object in this way
In the image on the left is what I have now, and in the right image is what I want to achieve.
This is the procedure I use now.
void paintGL()
{
/* ... */
int w = geometry().width();
int h = geometry().height();
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_STENCIL_TEST);
glStencilFunc(GL_NOTEQUAL, 1, 0xFF);
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
glStencilMask(0xFF);
setClearColor(Qt::GlobalColor::darkGray);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilMask(0x00);
DrawGeometry();
if (HoveredSphere != RgbFromColorToString(Qt::GlobalColor::black))
{
glBindFramebuffer(GL_FRAMEBUFFER, addFBO(FBOIndex::OUTLINE));
{
glStencilFunc(GL_ALWAYS, 1, 0xFF);
glStencilMask(0xFF);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
DrawOutline(HoveredSphere, 1.0f - 0.025f);
}
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebufferObject());
glBindFramebuffer(GL_READ_FRAMEBUFFER, addFBO(FBOIndex::OUTLINE));
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, defaultFramebufferObject());
{
// copy stencil buffer
GLbitfield mask = GL_STENCIL_BUFFER_BIT;
glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, mask, GL_NEAREST);
glStencilFunc(GL_NOTEQUAL, 1, 0xFF);
glStencilMask(0x00);
glDepthFunc(GL_LEQUAL);
DrawOutline(HoveredSphere, 1.0f);
}
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebufferObject());
}
update();
}
Where DrawGeometry draws all the objects, and DrawOutline draws the selected object scaled by the factor passed as the second parameter.
Thanks for any suggestions.
By following the tips of #MichaelMahn, I found a solution.
First of all, I draw the silhouette of the visible parts of the selected object in a texture.
And then I use this texture to calculate the outline by checking the neighboring pixels to figure out whether or not I stand on the edge of the silhouette.
outline fragment shader
#version 450
uniform sampler2D silhouette;
in FragData
{
smooth vec2 coords;
} frag;
out vec4 PixelColor;
void main()
{
// if the pixel is black (we are on the silhouette)
if (texture(silhouette, frag.coords).xyz == vec3(0.0f))
{
vec2 size = 1.0f / textureSize(silhouette, 0);
for (int i = -1; i <= +1; i++)
{
for (int j = -1; j <= +1; j++)
{
if (i == 0 && j == 0)
{
continue;
}
vec2 offset = vec2(i, j) * size;
// and if one of the neighboring pixels is white (we are on the border)
if (texture(silhouette, frag.coords + offset).xyz == vec3(1.0f))
{
PixelColor = vec4(vec3(1.0f), 1.0f);
return;
}
}
}
}
discard;
}
paintgl
void paintGL()
{
int w = geometry().width();
int h = geometry().height();
setClearColor(Qt::GlobalColor::darkGray);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
DrawGeometry();
// if we hover a sphere
if (HoveredSphere != RgbFromColorToString(Qt::GlobalColor::black))
{
glBindFramebuffer(GL_READ_FRAMEBUFFER, defaultFramebufferObject());
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, addFBO(FBOIndex::SILHOUETTE));
{
// copy depth buffer
GLbitfield mask = GL_DEPTH_BUFFER_BIT;
glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, mask, GL_NEAREST);
// set clear color
setClearColor(Qt::GlobalColor::white);
// enable depth test
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
// clear color buffer
glClear(GL_COLOR_BUFFER_BIT);
// draw silhouette
DrawSilhouette(HoveredSphere);
}
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebufferObject());
// clear depth buffer
glClear(GL_DEPTH_BUFFER_BIT);
// draw outline
DrawOutline();
}
}
PROBLEM :: Now I'd like to parameterize the width of the contour, whose thickness is currently fixed at 1 pixel.
Thank you so much for any suggestion!
Thanks to the advice of #Andrea I found the following solution.
outline fragment shader
#version 450
uniform sampler2D silhouette;
in FragData
{
smooth vec2 coords;
} frag;
out vec4 PixelColor;
void main()
{
// outline thickness
int w = 3;
// if the pixel is black (we are on the silhouette)
if (texture(silhouette, frag.coords).xyz == vec3(0.0f))
{
vec2 size = 1.0f / textureSize(silhouette, 0);
for (int i = -w; i <= +w; i++)
{
for (int j = -w; j <= +w; j++)
{
if (i == 0 && j == 0)
{
continue;
}
vec2 offset = vec2(i, j) * size;
// and if one of the pixel-neighbor is white (we are on the border)
if (texture(silhouette, frag.coords + offset).xyz == vec3(1.0f))
{
PixelColor = vec4(vec3(1.0f), 1.0f);
return;
}
}
}
}
discard;
}
Now I still have a small problem when the selected object is at the edge of the window.
As you can see, the outline is cut sharply.
I tried to "play" with glTexParameter on the parameters GL_TEXTURE_WRAP_S and GL_TEXTURE_WRAP_T.
In the image above you can see the effect I get with
glTexParameters(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameters(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
while in the image below you can see the effect I get with
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, &(QVector4D (1.0f, 1.0f, 1.0f, 1.0f)[0]));
glTexParameters(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameters(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
I would like the outline to show up at the edge of the window, but only where necessary.
Thanks a lot!
I am attempting to procedurally generate a star-filled background in OpenGL.
The approach I am taking is to create a skybox with a cubemap texture. Each side of the cubemap texture essentially consists of a 2048x2048 black image with randomly selected texels set to White. Here is the result:
I'm not sure how obvious it is from the image, but when moving around a very distinct box shape can be made out as stars close to the edge of the box appear smaller and closer together. How can I prevent this? Do I need to abandon the skybox approach and use something like a skysphere instead?
EDIT: here is how I am mapping the cubemap onto the sky.
// Create and bind texture.
glGenTextures(1, &texture_);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_CUBE_MAP, texture_);
for (unsigned int i = 0; i < 6; ++i) {
std::vector<std::uint8_t> image = generateTexture(TEXTURE_WIDTH, TEXTURE_HEIGHT);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, TEXTURE_WIDTH, TEXTURE_HEIGHT,
0, GL_RGB, GL_UNSIGNED_BYTE, image.data());
}
// Set texture parameters.
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
Here is the definition of the generateTexture function:
std::vector<std::uint8_t> Stars::generateTexture(GLsizei width, GLsizei height) {
std::vector<std::uint8_t> image(static_cast<std::size_t>(3 * width * height));
add_stars(image, NUM_STARS);
return image;
}
void Stars::add_stars(std::vector<std::uint8_t>& image, unsigned int nStars) {
std::default_random_engine eng;
std::uniform_int_distribution<std::size_t> dist(0, image.size() / 3 - 1);
while (nStars--) {
std::size_t index = 3 * dist(eng);
image[index++] = 255;
image[index++] = 255;
image[index++] = 255;
}
}
EDIT2: here is the draw function used to render the sky.
void Stars::draw(const Camera& camera) const {
// Skybox will be rendered last. In order to ensure that the stars are rendered at the back of
// the scene, the depth buffer is filled with values of 1.0 for the skybox -- this is done in
// the vertex shader. We need to make sure that the skybox passes the depth te3t with values
// less that or equal to the depth buffer.
glDepthFunc(GL_LEQUAL);
program_.enable();
// Calculate view-projection matrix and set the corresponding uniform. The view matrix must be
// stripped of translation components so that the skybox follows the camera.
glm::mat4 view = glm::mat4(glm::mat3(camera.viewMatrix()));
glm::mat4 projection = camera.projectionMatrix();
glm::mat4 VP = projection * view;
glUniformMatrix4fv(program_.uniformLocation("VP"), 1, GL_FALSE, glm::value_ptr(VP));
// Bind buffer objects and texture to current context and draw.
glBindVertexArray(vao_);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo_);
glBindTexture(GL_TEXTURE_CUBE_MAP, texture_);
glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(INDICES.size()), GL_UNSIGNED_INT,
reinterpret_cast<GLvoid *>(0));
glBindVertexArray(0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
program_.disable();
glDepthFunc(GL_LESS);
}
generate stars uniformly in some cubic volume
x=2.0*Random()-1.0; // <-1,+1>
y=2.0*Random()-1.0; // <-1,+1>
z=2.0*Random()-1.0; // <-1,+1>
project them on unit sphere
So just compute the length of vector (x,y,z) and divide the coordinates by it.
project the result onto the cube map
Each side of cube is defined by the plane so find intersection of ray casted from (0,0,0) through Cartesian star position and the planes. Take the intersection with shortest distance to (0,0,0) and use that as final star position.
The implementation could be something like this OpenGL&C++ code:
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i,n=10000;
float a,b,x,y,z;
//RandSeed=8123456789;
n=obj.pnt.num; // triangulated sphere point list
glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glPointSize(2.0);
glBegin(GL_POINTS);
for (i=0;i<n;i++)
{
// equidistant points instead of random to test this
x=obj.pnt[i].p[0];
y=obj.pnt[i].p[1];
z=obj.pnt[i].p[2];
/*
// random star spherical position
a=2.0*M_PI*Random();
b=M_PI*(Random()-0.5);
// spherical 2 cartessian r=1;
x=cos(a)*cos(b);
y=sin(a)*cos(b);
z= sin(b);
*/
// redish sphere map
glColor3f(0.6,0.3,0.0); glVertex3f(x,y,z);
// cube half size=1 undistort // using similarities like: yy/xx = y/x
if ((fabs(x)>=fabs(y))&&(fabs(x)>=fabs(z))){ y/=x; z/=x; if (x>=0) x=1.0; else x=-1.0; }
else if ((fabs(y)>=fabs(x))&&(fabs(y)>=fabs(z))){ x/=y; z/=y; if (y>=0) y=1.0; else y=-1.0; }
else if ((fabs(z)>=fabs(x))&&(fabs(z)>=fabs(y))){ x/=z; y/=z; if (z>=0) z=1.0; else z=-1.0; }
// bluish cube map
glColor3f(0.0,0.3,0.6); glVertex3f(x,y,z);
}
glEnd();
glPointSize(1.0);
glDisable(GL_BLEND);
glFlush();
SwapBuffers(hdc);
Looks like it works as it should here preview (of the blended sphere/cube map):
Although it looks like there are holes but there are none (it is may be some blend error) if I disable the sphere map render then there are no visible holes or distortions in the mapping.
The sphere triangulation mesh obj used to test this is taken from here:
Sphere triangulation
[Edit1] yes there was a silly blending error
I repaired the code ... but the problem persists anyway. does not matter this mapping is working as should here the updated code result:
So just adapt the code to your texture generator ...
[Edit2] Random stars
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i;
float x,y,z,d;
RandSeed=8123456789;
glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glPointSize(2.0);
glBegin(GL_POINTS);
for (i=0;i<1000;i++)
{
// uniform random cartesian stars inside cube
x=(2.0*Random())-1.0;
y=(2.0*Random())-1.0;
z=(2.0*Random())-1.0;
// project on unit sphere
d=sqrt((x*x)+(y*y)+(z*z));
if (d<1e-3) { i--; continue; }
d=1.0/d;
x*=d; y*=d; z*=d;
// redish sphere map
glColor3f(0.6,0.3,0.0); glVertex3f(x,y,z);
// cube half size=1 undistort using similarities like: y/x = y'/x'
if ((fabs(x)>=fabs(y))&&(fabs(x)>=fabs(z))){ y/=x; z/=x; if (x>=0) x=1.0; else x=-1.0; }
else if ((fabs(y)>=fabs(x))&&(fabs(y)>=fabs(z))){ x/=y; z/=y; if (y>=0) y=1.0; else y=-1.0; }
else if ((fabs(z)>=fabs(x))&&(fabs(z)>=fabs(y))){ x/=z; y/=z; if (z>=0) z=1.0; else z=-1.0; }
// bluish cube map
glColor3f(0.0,0.3,0.6); glVertex3f(x,y,z);
}
glEnd();
glPointSize(1.0);
glDisable(GL_BLEND);
glFlush();
SwapBuffers(hdc);
Here Blend of booth (1000 stars):
And Here only the cube-map (10000 stars)
[Edit3] The Blend problem solved
It was caused by Z-fighting and occasional changing of sign for some coordinates during the projection due to forgotten fabs here fixed code:
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int i;
float x,y,z,d;
RandSeed=8123456789;
glDepthFunc(GL_ALWAYS);
// glDepthFunc(GL_LEQUAL);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
glPointSize(2.0);
glBegin(GL_POINTS);
for (i=0;i<25000;i++)
{
// uniform random cartesian stars inside cube
x=(2.0*Random())-1.0;
y=(2.0*Random())-1.0;
z=(2.0*Random())-1.0;
// project on unit sphere
d=sqrt((x*x)+(y*y)+(z*z));
if (d<1e-3) { i--; continue; }
d=1.0/d;
x*=d; y*=d; z*=d;
// redish sphere map
glColor3f(0.6,0.3,0.0); glVertex3f(x,y,z);
// cube half size=1 undistort using similarities like: y/x = y'/x'
if ((fabs(x)>=fabs(y))&&(fabs(x)>=fabs(z))){ y/=fabs(x); z/=fabs(x); if (x>=0) x=1.0; else x=-1.0; }
else if ((fabs(y)>=fabs(x))&&(fabs(y)>=fabs(z))){ x/=fabs(y); z/=fabs(y); if (y>=0) y=1.0; else y=-1.0; }
else if ((fabs(z)>=fabs(x))&&(fabs(z)>=fabs(y))){ x/=fabs(z); y/=fabs(z); if (z>=0) z=1.0; else z=-1.0; }
// bluish cube map
glColor3f(0.0,0.3,0.6); glVertex3f(x,y,z);
}
glEnd();
glPointSize(1.0);
glDisable(GL_BLEND);
glFlush();
SwapBuffers(hdc);
And here the Blend result finally the colors are as should be so the sphere and cube stars overlaps perfectly (white) while viewing from (0,0,0):
So I've recently been learning some openGL. I've initially been using the SDL library to print images on screen but I figured it would be interested to try and achieve something similar with openGL and thus also being able to apply shaders to my images for neat effects such as lighting effects and night/day cycles and such. What I'm doing right now is simply loading a texture, then applying that texture to a quad with the same size of the texture. This works well.
Now I want to apply some shaders. This is an example of a vertex and fragment shader that I could apply to one of my textured quads:
in vec2 LVertexPos2D;
void main()
{
gl_Position = vec4( LVertexPos2D.x, LVertexPos2D.y, 0, 1);
}
which does nothing, then my fragment shader:
out vec4 LFragment;
void main()
{
LFragment = vec4(1.0, 1.0, 1.0, 1.0);
}
Which obviously just turns the texture I'm applying it on into a white block, which isn't exactly what I want. Somehow I need to retrieve the current texel data so I can modify that instead of simply changing it.
I've read that the function call to texture2D is supposed to return a vec4 of the current pixel data but I haven't gotten this to work. (Having a hard time finding a good explanation of the function inputs and how it works). Furthermore texture2D is supposedly deprecated but I can't get its replacement (texture()) to work either. Any nudges in the right direction would be greatly appreciated!
Edit: I'll throw in some more info on how I'm doing things, this is the function that loads my textures:
texture makeTexture(std::string fileLocation)
{
texture tempTexture;
SDL_Surface *mySurface = IMG_Load(fileLocation.c_str());
if (mySurface == NULL)
{
std::cout << "Error in loading image at: " << fileLocation << std::endl;
return tempTexture;
}
GLuint myTexture;
glGenTextures(1, &myTexture);
glBindTexture(GL_TEXTURE_2D, myTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mySurface->w, mySurface->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, mySurface->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
SDL_FreeSurface(mySurface);
tempTexture.texture_id = myTexture;
tempTexture.h = mySurface->h;
tempTexture.w = mySurface->w;
return tempTexture;
}
Where this is my texture struct:
struct texture
{
int w;
int h;
GLuint texture_id;
};
and this function draws any texture to a given x and y coordinate:
void draw(int y, int x, texture &tempTexture)
{
glBindTexture(GL_TEXTURE_2D, tempTexture.texture_id);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(-1 + ((float)(x) / SCREEN_WIDTH) * 2, 1 - ((float)(y + tempTexture.h) / SCREEN_HEIGHT) * 2); //Bottom left
glTexCoord2f(1, 1);
glVertex2f(-1 + ((float)(x + tempTexture.w)/SCREEN_WIDTH)*2, 1 - ((float)(y + tempTexture.h) / SCREEN_HEIGHT) * 2); //Bottom right?
glTexCoord2f(1, 0);
glVertex2f(-1 + ((float)(x + tempTexture.w) / SCREEN_WIDTH) * 2, 1.0 - ((float)y / SCREEN_HEIGHT) * 2); //top right
glTexCoord2f(0, 0);
glVertex2f(-1 + ((float)(x) / SCREEN_WIDTH) * 2, 1.0 - ((float)y / SCREEN_HEIGHT) * 2); //Top left (notification: Coordinates are (x,y), not (y,x).
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
then in my main render function I'm now doing:
draw(0, 0, myTexture);
glUseProgram(gProgramID);
glUniform1i(baseImageLoc, myTexture2.texture_id);
draw(100, 100, myTexture2);
glUseProgram(NULL);
where myTexture is just a meadow of grass and myTexture2 is a player character that I want to apply some shading shenanigans to. gPriogramID is a program that has my two aformentioned shaders loaded to it.
In order to access texture data in a shader you have to do the following:
First you need to glBind your texture to a specific texture unit (change active texture unit using glActiveTexture.
Pass the texture unit index as a uniform sampler to the shader.
Access the texture in the shader like the following.
// tex holds the value of the texture unit to be used (not the texture)
uniform sampler2D tex;
void main()
{
vec4 color = texture(tex,texCoord);
LFragment = color;
}
You also need to pass texCoords to the shader as in vertex attribute.
I'm rendering a scene using opengl with textures loaded using some sample code and the FreeImage API.
Here is a link to what i'm seeing
[Image Removed]
I can confirm that all texture coordinates are provided to glTexCoord2f between 0.0f and 1.0f as required along with the vertex coordinates.
Each of the rendered triangles appears to have the full texture pasted across it (and repeated) not the area of the texture specified by the coordinates.
The texture is 1024x1024.
This is the function for loading the texture
bool loadImageToTexture(const char* image_path, unsigned int &handle)
{
if(!image_path)
return false;
FREE_IMAGE_FORMAT fif = FIF_UNKNOWN;
FIBITMAP* dib(0);
BYTE* bits(0);
unsigned int width(0), height(0);
//open the file
fif = FreeImage_GetFileType(image_path, 0);
if(fif == FIF_UNKNOWN)
fif = FreeImage_GetFIFFromFilename(image_path);
if(fif == FIF_UNKNOWN)
return false;
if(FreeImage_FIFSupportsReading(fif))
dib = FreeImage_Load(fif, image_path);
if(!dib)
return false;
//is the file of the correct type
FREE_IMAGE_COLOR_TYPE type = FreeImage_GetColorType(dib);
if(FIC_RGBALPHA != type)
{
//convert to type
FIBITMAP* ndib = FreeImage_ConvertTo32Bits(dib);
dib = ndib;
}
//get data for glTexImage2D
bits = FreeImage_GetBits(dib);
width = FreeImage_GetWidth(dib);
height = FreeImage_GetHeight(dib);
if((bits == 0) || (width == 0) || (height == 0))
return false;
//create the texture in open gl
glGenTextures(1, &handle);
glBindTexture(GL_TEXTURE_2D, handle);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glPixelStorei(GL_TEXTURE_2D, 4);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height,
0, GL_RGBA, GL_UNSIGNED_BYTE, bits);
//unload the image now loaded
FreeImage_Unload(dib);
return true;
}
These are the functions for rendering
inline void glTexture(float textureSize, float t, float u)
{
glTexCoord2f(u/textureSize, (textureSize - t)/textureSize);
}
void processTriangle(const btVector3* triangle, const textureCoord* tc, const btVector3& normal,
const int partId, const int triangleIndex, const bool wireframe)
{
if(wireframe)
{
glBegin(GL_LINES);
glColor3f(1, 0, 0);
glVertex3d(triangle[0].getX(), triangle[0].getY(), triangle[0].getZ());
glVertex3d(triangle[1].getX(), triangle[1].getY(), triangle[1].getZ());
glColor3f(0, 1, 0);
glVertex3d(triangle[2].getX(), triangle[2].getY(), triangle[2].getZ());
glVertex3d(triangle[1].getX(), triangle[1].getY(), triangle[1].getZ());
glColor3f(0, 0, 1);
glVertex3d(triangle[2].getX(), triangle[2].getY(), triangle[2].getZ());
glVertex3d(triangle[0].getX(), triangle[0].getY(), triangle[0].getZ());
glEnd();
}
else
{
glBegin(GL_TRIANGLES);
glColor3f(1, 1, 1);
//Normals are per triangle
glNormal3f(normal.getX(), normal.getY(), normal.getZ());
glTexture(1024.0f, tc[0].t, tc[0].u);
glVertex3d(triangle[0].getX(), triangle[0].getY(), triangle[0].getZ());
glTexture(1024.0f, tc[1].t, tc[1].u);
glVertex3d(triangle[1].getX(), triangle[1].getY(), triangle[1].getZ());
glTexture(1024.0f, tc[2].t, tc[2].u);
glVertex3d(triangle[2].getX(), triangle[2].getY(), triangle[2].getZ());
glEnd();
}
}
void processAllTriangles(const btVector3& worldBoundsMin, const btVector3& worldBoundsMax)
{
btVector3 triangle[3];
textureCoord tc[3];
//go through the index list build triangles and draw them
unsigned int k = 0;
for(unsigned int i = 0; i<dVertices.size(); i+=3, k++)
{
//first vertex
triangle[0] = dVertices[i];
tc[0] = dTexCoords[i];
//second vertex
triangle[1] = dVertices[i+1];
tc[1] = dTexCoords[i+1];
//third vertex
triangle[2] = dVertices[i+2];
tc[2] = dTexCoords[i+2];
processTriangle(triangle, tc, dNormals[k], 0, 0, false);
}
}
//draw the world given the players position
void render(btScalar* m, const btCollisionShape* shape, const btVector3& color, int debugMode,
const btVector3& worldBoundsMin, const btVector3& worldBoundsMax)
{
//render the world using the generated OpenGL lists
//enable and specify pointers to vertex arrays
glPushMatrix();
glMultMatrixf(m);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBindTexture(GL_TEXTURE_2D, m_texturehandle);
processAllTriangles(worldBoundsMin, worldBoundsMax);
glPopMatrix();
}
I'm working in a larger code base and texturing is being done differently in different areas mean state from those other textured objects was not being disabled.
Before per vertex texturing is done make sure to turn off other varieties of texturing.
glDisable(GL_TEXTURE_GEN_S);
glDisable(GL_TEXTURE_GEN_T);
glDisable(GL_TEXTURE_GEN_R);
I want to draw a 2D array of pixel data (RGB / grayscale values) on the screen as fast as possible, using OpenGL. The pixel data changes frequently.
I had hoped that I would find a simple function that would let me push in a pointer to an array representing the pixel data, since this is probably the fastest approach. Unfortunately, I have found no such function.
What is the best way to accomplish this task?
Maybe glDrawPixels is the function you are looking for? Though if the data is static it would be better to create a texture with it, and then draw that each frame.
I recently had a similar problem, as I am trying to render a video to screen (ie repeatedly upload pixel data to the VRAM), my approach is:
use glTexImage2D and glTexSubImage2D to upload the data to the texture (ie bind the texture (and texture unit, if applicable) before calling that)
in my case as the video frame rate (usually about 24 fps) is lower than the framerate of my application (aimed at 60 fps), in order to avoid uploading the same data again I use a framebuffer object (check out glGenFramebuffers/glBindFramebuffer/glDeleteFramebuffers) and link my texture with the framebuffer (glFramebufferTexture2D). I then upload that texture once, and draw the same frame multiple times (just normal texture access with glBindTexture)
I don't know which platform you are using, but as I am targetting Mac I use some Apple extensions to ensure the data transfer to the VRAM happens through DMA (ie make glTexSubImage2D return immediately to let the CPU do other work) - please feel free to ask me for more info if you are using Mac too
also as you are using just grayscale, you might want to consider just using a GL_LUMINANCE texture (ie 1 byte per pixel) rather than RGB based format to make the upload faster (but that depends on the size of your texture data, I was streaming HD 1920x1080 video so I needed to make sure to keep it down)
also be aware of the format your hardware is using to avoid unnecessary data conversions (ie normally it seems better to use BGRA data than for example just RGB)
finally, in my code I replaced all the fixed pipeline functionality with shaders (in particular the conversion of the data from grayscale or YUV format to RGB), but again all that depends on the size of your data, and the workload of your CPU or GPU
Hope this helps, feel free to message me if you need further info
I would think the fastest way would be to draw a screen sized quad with ortho projection and use a pixel shader and Texture Buffer Object to draw directly to the texture in the pixel shader. Due to latency transferring to/from the TBO you may want to see if double buffering would help.
If speed isn't much of a concern (you just need fairly interactive framerates) glDrawPixels is easy to use and works well enough for many purposes.
My solution for getting dynamically changing image data to the screen in OpenGL,
#define WIN32_LEAN_AND_MEAN
#include "wx/wx.h"
#include "wx/sizer.h"
#include "wx/glcanvas.h"
#include "BasicGLPane.h"
// include OpenGL
#ifdef __WXMAC__
#include "OpenGL/glu.h"
#include "OpenGL/gl.h"
#else
#include <GL/glu.h>
#include <GL/gl.h>
#endif
#include "ORIScanMainFrame.h"
BEGIN_EVENT_TABLE(BasicGLPane, wxGLCanvas)
EVT_MOTION(BasicGLPane::mouseMoved)
EVT_LEFT_DOWN(BasicGLPane::mouseDown)
EVT_LEFT_UP(BasicGLPane::mouseReleased)
EVT_RIGHT_DOWN(BasicGLPane::rightClick)
EVT_LEAVE_WINDOW(BasicGLPane::mouseLeftWindow)
EVT_SIZE(BasicGLPane::resized)
EVT_KEY_DOWN(BasicGLPane::keyPressed)
EVT_KEY_UP(BasicGLPane::keyReleased)
EVT_MOUSEWHEEL(BasicGLPane::mouseWheelMoved)
EVT_PAINT(BasicGLPane::render)
END_EVENT_TABLE()
// Test data for image generation. floats range 0.0 to 1.0, in RGBRGBRGB... order.
// Array is 1024 * 3 long. Note that 32 * 32 is 1024 and is the largest image we can randomly generate.
float* randomFloatRGB;
float* randomFloatRGBGrey;
BasicGLPane::BasicGLPane(wxFrame* parent, int* args) :
wxGLCanvas(parent, wxID_ANY, args, wxDefaultPosition, wxDefaultSize, wxFULL_REPAINT_ON_RESIZE)
{
m_context = new wxGLContext(this);
randomFloatRGB = new float[1024 * 3];
randomFloatRGBGrey = new float[1024 * 3];
// In GL images 0,0 is in the lower left corner so the draw routine does a vertical flip to get 'regular' images right side up.
for (int i = 0; i < 1024; i++) {
// Red
randomFloatRGB[i * 3] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Green
randomFloatRGB[i * 3 + 1] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Blue
randomFloatRGB[i * 3 + 2] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Telltale 2 white pixels in 0,0 corner.
if (i < 2) {
randomFloatRGB[i * 3] = randomFloatRGB[i * 3 + 1] = randomFloatRGB[i * 3 + 2] = 1.0f;
}
randomFloatRGBGrey[i * 3] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 1] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 2] = randomFloatRGB[i * 3];
}
// To avoid flashing on MSW
SetBackgroundStyle(wxBG_STYLE_CUSTOM);
}
BasicGLPane::~BasicGLPane()
{
delete m_context;
}
void BasicGLPane::resized(wxSizeEvent& evt)
{
// wxGLCanvas::OnSize(evt);
Refresh();
}
int BasicGLPane::getWidth()
{
return GetSize().x;
}
int BasicGLPane::getHeight()
{
return GetSize().y;
}
void BasicGLPane::render(wxPaintEvent& evt)
{
assert(GetParent());
assert(GetParent()->GetParent());
ORIScanMainFrame* mf = dynamic_cast<ORIScanMainFrame*>(GetParent()->GetParent());
assert(mf);
switch (mf->currentMainView) {
case ORIViewSelection::ViewCamera:
renderCamera(evt);
break;
case ORIViewSelection::ViewDepth:
renderDepth(evt);
break;
case ORIViewSelection::ViewPointCloud:
renderPointCloud(evt);
break;
case ORIViewSelection::View3DModel:
render3DModel(evt);
break;
default:
renderNone(evt);
}
}
void BasicGLPane::renderNone(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopAttrib();
}
GLuint makeOpenGlTextureFromDataLuninanceFloats(int width, int height, float* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_FLOAT, width, height, 0, GL_FLOAT, GL_LUMINANCE, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
GLuint makeOpenGlTextureFromRGBInts(int width, int height, unsigned int* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_INT, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
/// <summary>
/// Range of each float is 0.0f to 1.0f
/// </summary>
/// <param name="width"></param>
/// <param name="height"></param>
/// <param name="floatRGB"></param>
/// <returns></returns>
GLuint makeOpenGlTextureFromRGBFloats(int width, int height, float* floatRGB) {
GLuint textureID;
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, floatRGB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
void BasicGLPane::DrawTextureToScreenFloat(int w, int h, float* floatDataPtr, GLuint (*textureFactory)(int width, int height, float* floatRGB)) {
if (w <= 0 || h <= 0 || floatDataPtr == NULL || w > 5000 || h > 5000) {
assert(false);
return;
}
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS);
glClearColor(0.15f, 0.11f, 0.02f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
float onePixelW = (float)getWidth() / (float)w;
float onePixelH = (float)getHeight() / (float)h;
float orthoW = w;
float orthoH = h;
if (onePixelH > onePixelW) {
orthoH = h * onePixelH / onePixelW;
}
else {
orthoW = w * onePixelW / onePixelH;
}
// We want the image at the top of the window, not the bottom if the window is too tall.
int topOfScreen = (float)getHeight() / onePixelH;
// If the winjdow resizes after creation you need to change the viewport.
glViewport(0, 0, getWidth(), getHeight());
gluOrtho2D(0.0, orthoW, (double)topOfScreen - (double)orthoH, topOfScreen);
GLuint myTextureName = textureFactory(w, h, floatDataPtr);
glBegin(GL_QUADS);
{
// This order of UV coords and verticies will do the vertical flip of the image to get the 'regular' image 0,0
// in the top left corner.
glTexCoord2f(0.0f, 1.0f); glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f(0.0f + w, 0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f(0.0f + w, 0.0f + h, 0.0f);
glTexCoord2f(0.0f, 0.0f); glVertex3f(0.0f, 0.0f + h, 0.0f);
}
glEnd();
glDeleteTextures(1, &myTextureName);
glFlush();
SwapBuffers();
glPopClientAttrib();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::DrawTextureToScreenMat(wxPaintEvent& evt, cv::Mat m, float brightness) {
m.type();
if (m.empty()) {
renderNone(evt);
return;
}
if (m.type() == CV_32FC1) { // Grey scale.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromDataLuninanceFloats);
}
if (m.type() == CV_32FC3) { // Color.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromRGBFloats);
}
else {
renderNone(evt);
}
}
void BasicGLPane::renderCamera(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->im_white);
}
void BasicGLPane::renderDepth(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->depth_map);
}
void BasicGLPane::render3DModel(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::renderPointCloud(wxPaintEvent& evt) {
if (!IsShown())
return;
boost::unique_lock<boost::mutex> lk(ORITopControl::Instance->pointCloudCacheMutex);
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, getWidth(), getHeight());
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (ORITopControl::Instance->pointCloudCache.size() > 0) {
glMatrixMode(GL_PROJECTION);
gluPerspective( /* field of view in degree */ 40.0,
/* aspect ratio */ 1.0,
/* Z near */ 1.0, /* Z far */ 500.0);
glMatrixMode(GL_MODELVIEW);
gluLookAt(100, 70, 200, // Eye
25, 25, 25, // Look at pt
0, 0, 1); // Up Vector
glPointSize(2.0);
glBegin(GL_POINTS);
// Use explicit for loop because pointCloudFragments can grow asynchronously.
for (int i = 0; i < ORITopControl::Instance->pointCloudCache.size(); i++) {
auto frag = ORITopControl::Instance->pointCloudCache[i];
auto current_point_cloud_ptr = frag->cloud;
glPushMatrix();
// glMultMatrixf(frag->xform.data());
for (size_t n = 0; n < current_point_cloud_ptr->size(); n++) {
glColor3ub(255, 255, 255);
glVertex3d(current_point_cloud_ptr->points[n].x, current_point_cloud_ptr->points[n].y, current_point_cloud_ptr->points[n].z);
}
glPopMatrix();
}
glEnd();
}
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}