I'm currently working on a procedural planet generation tool that works by taking a cube, mapping it to a sphere and then applying a heightmap to each face to generate terrain.
I'm using a VBO for each face which is created using the following method:
void Planet::setVertexBufferObject()
{
Vertex* vertices;
int currentVertex;
Vertex* vertex;
for(int i = 0; i < 6; i++)
{
// bottom face
if(i == 0)
{
glBindBuffer(GL_ARRAY_BUFFER, bottomVBO);
}
// top face
else if(i == 1)
{
glBindBuffer(GL_ARRAY_BUFFER, topVBO);
}
// front face
else if(i == 2)
{
glBindBuffer(GL_ARRAY_BUFFER, frontVBO);
}
// back face
else if(i == 3)
{
glBindBuffer(GL_ARRAY_BUFFER, backVBO);
}
// left face
else if(i == 4)
{
glBindBuffer(GL_ARRAY_BUFFER, leftVBO);
}
// right face
else
{
glBindBuffer(GL_ARRAY_BUFFER, rightVBO);
}
vertices = (Vertex*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
currentVertex = 0;
for(int x = 0; x < size; x++)
{
for(int z = 0; z < size; z++)
{
currentVertex = z * size + x;
vertex = &vertices[currentVertex];
vertex->xTextureCoord = (x * 1.0f) / 512.0f;
vertex->zTextureCoord = (z * 1.0f) / 512.0f;
Vector3 normal;
vertex->xNormal = normal.x;
vertex->yNormal = normal.y;
vertex->zNormal = normal.z;
vertex->x = heightMapCubeFace[i][x][z][0];
vertex->y = heightMapCubeFace[i][x][z][1];
vertex->z = heightMapCubeFace[i][x][z][2];
vertex->x *= (1.0f +((heightMaps[i][z][x]/256.0f) * 0.1));
vertex->y *= (1.0f +((heightMaps[i][z][x]/256.0f) * 0.1));
vertex->z *= (1.0f +((heightMaps[i][z][x]/256.0f) * 0.1));
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
}
I have left out the setIndexBufferObject() method as that is working OK.
I am then rendering the sphere using this method:
void Planet::render()
{
// bottom face
glBindBuffer(GL_ARRAY_BUFFER, bottomVBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bottomIBO);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), BUFFER_OFFSET(6 * sizeof(float)));
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, sizeof(Vertex), BUFFER_OFFSET(3 * sizeof(float)));
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), BUFFER_OFFSET(0));
TextureManager::Inst()->BindTexture(textueIDs[0]);
glClientActiveTexture(GL_TEXTURE0+textueIDs[0]);
glDrawElements(GL_TRIANGLE_STRIP, numberOfIndices, GL_UNSIGNED_INT, BUFFER_OFFSET(0));
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// next face and so on...
The textures are being loaded in using free image, as you can see from the above code I am just using the example texture manager that came with freeimage.
Why is binding the texture not working?
Why is binding the texture not working?
Describing very specifically the behavior you're seeing, or better attaching a URL to a screenshot, goes a long way when trying to get through graphics questions.
Next, you need to include the gl error state or at least demonstrate that within the code that you're checking and failing if glGetError() doesn't return GL_NO_ERROR.
Your VBO / draw element state looks reasonable though you've deliberately omitted the definition of the index buffer object so we'll take it on faith that you're sending real indices into your glDrawElements() call. For a sanity check, do the glDrawElements() call with the raw indices pointer rather than binding a VBO for the elements.
You've also omitted the type definition of Vertex which is required to know if the offsets you're supplying to glTexCoordPointer() are consistent with the struct definition.
Lastly, I'm guessing most people on the forum don't know "free image" which I believe is what you've stated you're using to load textures with. If there's a texturing problem it's impossible to see because of the opaque nature of using this 3rd party library to set up texturing on your behalf.
If they've confused texture IDs, set up a wrap mode that isn't supported, not set the minification filter consistent with the expected mipmapping (on/off), not enabled texturing, or set the texture environment mode such that the modulation with the base geometry color is not as you expect - all of these would make texturing not work / appear to be working.
To troubleshoot just use the library for texture load and use the texture ID they provide. Then set up the filter modes and texture environment yourself. Turn off mipmapping while troubleshooting. Incomplete mipmap chains is a very common error when texturing.
You can also turn off per-fragment operations to simplify your troubleshooting. Disable blending, depth testing, and scissoring.
Assuming your texture is power of two dimensions or your implementation supports NPOT, try these settings immediately before drawing:
glDisable(GL_DEPTH_TEST);
glDisable(GL_BLEND);
glDisable(GL_SCISSOR_TEST);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glBindTexture(GL_TEXTURE_2D, texID);
glTexEnvi(GL_TEXTURE_ENV_MODE, GL_REPLACE); // turn off modulation
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); // turn off mipmapping
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); // turn off repeats
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glClientActiveTexture(GL_TEXTURE0+textueIDs[0]);
What are you trying to do here?
The active texture unit has nothing to do with a random texture object.
Related
I want to make a program that shows the earth with a space texture as the background.
The earth is a 3D Uniform with a earth texture (.bmp).
The space with the stars is a texture (.bmp).
I have summarized what I have to do:
Create a new Model Matrix
Position it at the same place where the camera is
Disable depth test before drawing
Reverse culling
This is the Load function:
void load(){
//Load The Shader
Shader simpleShader("src/shader.vert", "src/shader.frag");
g_simpleShader = simpleShader.program;
// Create the VAO where we store all geometry (stored in g_Vao)
g_Vao = gl_createAndBindVAO();
//Create vertex buffer for positions, colors, and indices, and bind them to shader
gl_createAndBindAttribute(&(shapes[0].mesh.positions[0]), shapes[0].mesh.positions.size() * sizeof(float), g_simpleShader, "a_vertex", 3);
gl_createIndexBuffer(&(shapes[0].mesh.indices[0]), shapes[0].mesh.indices.size() * sizeof(unsigned int));
gl_createAndBindAttribute(uvs, uvs_size, g_simpleShader, "a_uv", 2);
gl_createAndBindAttribute(normal, normal_size, g_simpleShader, "a_normal", 2);
//Unbind Everything
gl_unbindVAO();
//Store Number of Triangles (use in draw())
g_NumTriangles = shapes[0].mesh.indices.size() / 3;
//Paths of the earth and space textures
Image* image = loadBMP("assets/earthmap1k.bmp");
Image* space = loadBMP("assets/milkyway.bmp");
//Generate Textures
glGenTextures(1, &texture_id);
glGenTextures(1, &texture_id2);
//Bind Textures
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//We assign your corresponding data
glTexImage2D(GL_TEXTURE_2D,1,GL_RGB,image->width, image->height,GL_RGB,GL_UNSIGNED_BYTE,image->pixels);
glTexImage2D(GL_TEXTURE_2D,1,GL_RGB,space->width, space->height,GL_RGB,GL_UNSIGNED_BYTE,space->pixels);
}
This is the Draw function:
void draw(){
//1. Enable/Disable
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
//2. Shader Activation
glUseProgram(g_simpleShader);
//3. Get All Uniform Locations
//Space:
GLuint model_loc2 = glGetUniformLocation (g_simpleShader, "u_model");
GLuint u_texture2 = glGetUniformLocation(g_simpleShader, "u_texture2");
GLuint u_light_dir2 = glGetUniformLocation(g_simpleShader,"u_light_dir2");
//Earth
GLuint model_loc = glGetUniformLocation(g_simpleShader, "u_model");
GLuint projection_loc = glGetUniformLocation(g_simpleShader, "u_projection");
GLuint view_loc = glGetUniformLocation(g_simpleShader, "u_view");
GLuint u_texture = glGetUniformLocation(g_simpleShader, "u_texture");
GLuint u_light_dir = glGetUniformLocation(g_simpleShader, "u_light_dir");
//4. Get Values From All Uniforms
mat4 model_matrix2 = translate(mat4(1.0f), vec3(1.0f,-3.0f,1.0f));
mat4 model_matrix = translate(mat4(1.0f),vec3(0.0f,-0.35f,0.0f);
mat4 projection_matrix = perspective(60.0f,1.0f,0.1f,50.0f);
mat4 view_matrix = lookAt(vec3( 1.0f, -3.0f, 1.0f),vec3(0.0f, 0.0f, 0.0f), vec3(0.0f, 1.0f, 0.0f)glm::vec3(0,1,0));
//5. Upload Uniforms To Shader
glUniformMatrix4fv(model_loc2, 1, GL_FALSE, glm::value_ptr(model_matrix2));
glUniformMatrix4fv(model_loc, 1, GL_FALSE, glm::value_ptr(model_matrix));
glUniformMatrix4fv(projection_loc, 1, GL_FALSE, glm::value_ptr(projection_matrix));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, glm::value_ptr(view_matrix));
glUniform1i(u_texture, 0);
glUniform3f(u_light_dir, g_light_dir.x, g_light_dir.y, g_light_dir.z);
glUniform1i(u_texture2, 1);
glUniform3f(u_light_dir2, g_light_dir.x, g_light_dir.y, g_light_dir.z);
//6. Activate Texture Unit 0 and Bind our Texture Object
glActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
//7. Bind VAO
gl_bindVAO(g_Vao);
//8. Draw Elements
glDrawElements(GL_TRIANGLES, 3 * g_NumTriangles, GL_UNSIGNED_INT, 0);
}
Also I have two Fragment Shaders:
The first one returns this:
fragColor = vec4(final_color, 1.0);
The second one returns this:
fragColor = vec4(texture_color.xyz, 1.0);
Also the Vertex Shader returns the position of the vertex:
gl_Position = u_projection * u_view * u_model * vec4( a_vertex , 1.0 );
When I compile, it only shows the earth while it should show the earth and the space as background. I have reviewed the code several times but I can not find out what it is.
Suposed result:
My Result
If I see it right among other things you are wrongly binding textures
glActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
should be:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id2);
but I prefer that last set active units is 0 ...
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id2);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
that will save you a lot of troubles when you start combine code with single texture unit code ... Also hope you are properly unbinding the used texture units for the same reasons...
You got ugly seam on the edge 0/360deg of longitude
this might be caused by wrongly computed normal for lighting, wrong not seamless texture or just by forgeting to duplicate the edge points with correct texture coordinates for the last patch. See:
Applying map of the earth texture a Sphere
You can also add atmosphere,bump map, clouds to your planet:
Bump-map a sphere with a texture map
Andrea is right...
set matrices as unit matrix and render (+/-)1.0 rectangle at z=0.0 +/- aspect ratio correction without depth test, face culling and depth write ... That way you will avoid jitter and flickering stuff due to floating point errors.
Skybox is better but there are also other options to enhance
Is it possible to make realistic n-body solar system simulation in matter of size and mass?
and all the sublinks in there especially stars. You can combine skybox and stellar catalog together and much much more...
I generate a PointCloud in my program, and now, I want to be able to click on a point in this point cloud rendered to my screen using OpenGL.
In order to do so, I used the trick of giving to each pixel in an offscreen render a colour based on its index in the VBO. I use the same camera for my offscreen render and my onscreen render so they move together, and when I click, I get values of my offscreen render to retrieve the position in the VBO to get the point I clicked on. This is the theory since when I click, I have only (0,0,0). I believe that means my FBO is not well renderer but I'm not sure whether it is that or if the problem comes from somewhere else...
So here are the steps. clicFBO is the FBO I'm using for offscreen render, and clicTextureColorBuf is the texture in which I write in the FBO
glGenFramebuffers(1, &clicFBO);
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
glGenTextures(1, &clicTextureColorBuf);
glBindTexture(GL_TEXTURE_2D, clicTextureColorBuf);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, clicTextureColorBuf, 0);
GLenum DrawBuffers[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers(1, DrawBuffers);
After that, I wrote a shader that gives to each point the color of its index in the VBO...
std::vector<cv::Point3f> reconstruction3D; //Will contain the position of my points
std::vector<float> indicesPointsVBO; //Will contain the indexes of each point
for (int i = 0; i < pts3d.size(); ++i) {
reconstruction3D.push_back(pts3d[i].pt3d);
colors3D.push_back(pt_tmp);
indicesPointsVBO.push_back(((float)i / (float)pts3d.size() ));
}
GLuint clicVAO, clicVBO[2];
glGenVertexArrays(1, &clicVAO);
glGenBuffers(2, &clicVBO[0]);
glBindVertexArray(clicVAO);
glBindBuffer(GL_ARRAY_BUFFER, clicVBO[0]);
glBufferData(GL_ARRAY_BUFFER, reconstruction3D.size() * sizeof(cv::Point3f), &reconstruction3D[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glEnable(GL_PROGRAM_POINT_SIZE);
glBindBuffer(GL_ARRAY_BUFFER, clicVBO[1]);
glBufferData(GL_ARRAY_BUFFER, indicesPointsVBO.size() * sizeof(float), &indicesPointsVBO[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
and the vertex shader:
layout (location = 0) in vec3 pos;
layout (location = 1) in float col;
out float Col;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform int pointSize;
void main()
{
gl_PointSize = pointSize;
gl_Position = projection * view * model * vec4(pos, 1.0);
Col = col;
}
And the Fragment:
#version 330 core
layout(location = 0) out vec4 FragColor;
in float Col;
void main()
{
FragColor = vec4(Col, Col, Col ,1.0);
}
And this is how I render this texture:
glm::mat4 view = camera.GetViewMatrix();
glm::mat4 projection = glm::perspective(glm::radians(camera.Zoom), (float)SCR_WIDTH / (float)SCR_HEIGHT, 1.0f, 100.0f);
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
clicShader.use();
glDisable(GL_DEPTH_TEST);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
clicShader.setMat4("projection", projection);
clicShader.setMat4("view", view);
model = glm::mat4();
clicShader.setMat4("model", model);
clicShader.setInt("pointSize", pointSize);
glBindVertexArray(clicVAO);
glDrawArrays(GL_POINTS, 0, (GLsizei)reconstruction3D.size());
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And then, when I click, I Use this piece of Code:
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
glReadBuffer(GL_COLOR_ATTACHMENT0);
int width = 11, height = 11;
std::array<GLfloat, 363> arry{ 1 };
glReadPixels(Xpos - 5, Ypos - 5, width, height, GL_RGB, GL_UNSIGNED_BYTE, &arry);
for (int i = 0; i < 363; i+=3) { // It's 3 time the same number anyways for each number
std::cout << arry[i] << " "; // It gives me only 0's
}
std::cout << std::endl << std::endl;
glBindFramebuffer(GL_FRAMEBUFFER, clicFBO);
I know the error might be really stupid but I still have some problems with how OpenGL works.
I put what I thought was necessary to understand the problem (without extending too much), but if you need more code, I can write it too.
I know this is not a question in which you can say Yes or No and it's more like debugging my program, but since I really don't find from where the problem comes from, I'm looking toward someone who can explain to me what I did wrong. I do not necessarily seek the solution itself, but clues that could help me understand where my error is ...
Using a framebuffer object FBO to store a "object identifier" is a cool method. But also want to see the objects, right? Then you must render also to the default frame buffer (let me call it "defFB", which is not a FBO).
Because you need to render to two different targets, you need one of these techniques:
Draw objects twice (e.g. with two glDrawArrays calls), one to the FBO and a second one to the defFB.
Draw to two FBO's images at once and later blit one of then (with colors) to the defFB.
For the first technique you may use a texture attached to a FBO (as you currently do). Or you can use a "Renderbuffer" and draw to it.
The second approach needs a second "out" in the fragment shader:
layout(location = 0) out vec3 color; //GL_COLOR_ATTACHMENT0
layout(location = 1) out vec3 objID; //GL_COLOR_ATTACHMENT1
and setting the two attachments with glDrawBuffers.
For the blit part, read this answer.
Note that both "out" have the same format, vec3 in this example.
A fail in your code is that you set a RGB texture format and also use this format at glReadPixels, but your "out" in the FS is vec4 instead of vec3.
More concerns are:
Check the completeness with glCheckFramebufferStatus
Using a "depth attachment" to the FBO may be needed, even it will not be used for reading.
Disabling the depth test will put all elements if the frame. Your point-picking will select the last drawn, not the nearest.
I found the problem.
There were 2 failures in my code :
The first one is that in OpenGL, there is an Y inversion between the image and the framebuffer. So in order to pick the good point, you have to flip Y using the size of the viewport : I did it like this :
GLint m_viewport[4];
glGetIntegerv(GL_VIEWPORT, m_viewport);
int YposTMP = m_viewport[3] - Ypos - 1;
The second one is the use of
glReadPixels(Xpos - 2, Ypos - 2, width, height, GL_RGB, GL_UNSIGNED_BYTE, &pixels[0]);, the 6th parameter must be GL_FLOAT since the datas i'm returning are float.
Thanks all!
Best regards,
R.S
I implemented simple OBJ-parser and using parallelepiped as example model. I added rotation feature based on quaternions. Next goal - adding light. I parsed normals and decided drawing normals as "debug" feature (for further better understanding light). But I stuck after that:
Here my parallelepiped with small rotation.
Look at the right further bottom vertice and normal. I can't understand why it rendered through my parallelepiped. It should be hidden.
I use depth buffer (because without it parallelepiped looking weird while I rotate it). So I initialize it:
glGenRenderbuffers(1, &_depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, _frameBufferWidth, _frameBufferHeight);
and enable it:
glEnable(GL_DEPTH_TEST);
I generate 4 VBOs: vertex and index buffers for parallelepiped, vertex and index buffers for lines(normals).
I use one simple shader for both models(if it will be needed - I can add code later but I think everything is ok with it).
At first I draw parallelepiped, after that - normals.
Here my code:
// _field variable - parallelepiped
glClearColor(0.3, 0.3, 0.4, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
int vertexSize = Vertex::size();
int colorSize = Color::size();
int normalSize = Normal::size();
int totalSize = vertexSize + colorSize + normalSize;
GLvoid *offset = (GLvoid *)(sizeof(Vertex));
glBindBuffer(GL_ARRAY_BUFFER, _geomBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indicesBufferID);
glVertexAttribPointer(_shaderAtributePosition, vertexSize, GL_FLOAT, GL_FALSE, sizeof(Vertex::oneElement()) * totalSize, 0);
glVertexAttribPointer(_shaderAttributeColor, colorSize, GL_FLOAT, GL_FALSE, sizeof(Color::oneElement()) * totalSize, offset);
glDrawElements(GL_TRIANGLES, _field->getIndicesCount(), GL_UNSIGNED_SHORT, 0);
#ifdef NORMALS_DEBUG_DRAWING
glBindBuffer(GL_ARRAY_BUFFER, _normalGeomBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _normalIndexBufferID);
totalSize = vertexSize + colorSize;
glVertexAttribPointer(_shaderAtributePosition, vertexSize, GL_FLOAT, GL_FALSE, sizeof(Vertex::oneElement()) * totalSize, 0);
glVertexAttribPointer(_shaderAttributeColor, colorSize, GL_FLOAT, GL_FALSE, sizeof(Color::oneElement()) * totalSize, offset);
glDrawElements(GL_LINES, 2 * _field->getVertexCount(), GL_UNSIGNED_SHORT, 0);
#endif
I understand for example if I merge this two draw calls in one (And use same VBOs for parallelepiped and normals - everything will be fine).
But it will be uncomfortable because I use lines and triangles.
There are should be another way for fixing Z order. I can't believe that complex scene (for example sky, land and buildings) draws via one draw call.
So, what I am missing?
Thanks in advance.
If you are rendering into a window surface you need to request depth as part of your EGL configuration request. The depth renderbuffer you have allocated is only useful if you attach it to a Framebuffer Object (FBO) for off-screen rendering.
I want to draw a cube and a sphere and apply a different texture to each.
I use blender to create the scene and then export to an obj file which then includes the vertices, normals, uvs and faces for both objects as well as the textures.
I have created a routine which loads all the data from the obj file. This all works as I can load the objects and display them etc but with only one texture. As I say I have gone through pages and pages of code and posts and 99% only deal with 1 texture to 1 object and those that deal with multiple textures only deal with one object or are in a very old version of openGL.
The one thing I haven't tried is uniform sample2D arrays in the fragment shader but I haven't found an explanation on that so haven't tried it.
My code that I have below:
ObjLoader *obj = new ObjLoader();
string _filepath = "objects\\" + _filename;
//bool res = obj->loadObjWithStaticColor(_filepath.c_str(), _vertices, _normals, vertex_colors, _colors, 1.0);
bool res = obj->loadObjWithTextures(_filepath.c_str(), _objects, _textures);
program = InitShader("shaders\\vshader.glsl", "shaders\\fshader.glsl");
glUseProgram(program);
GLuint vao_world_objects;
glGenVertexArrays(1, &vao_world_objects);
glBindVertexArray(vao_world_objects);
//GLuint vbo_world_objects;
//glGenBuffers(1, &vbo_world_objects);
//glBindBuffer(GL_ARRAY_BUFFER, vbo_world_objects);
NumVertices = _objects[_objects.size() - 1]._stop + 1;
for (size_t i = 0; i < _objects.size(); i++)
{
_vertices.insert(_vertices.end(), _objects[i]._vertices.begin(), _objects[i]._vertices.end());
_normals.insert(_normals.end(), _objects[i]._normals.begin(), _objects[i]._normals.end());
_uvs.insert(_uvs.end(), _objects[i]._uvs.begin(), _objects[i]._uvs.end());
}
GLuint _vSize = _vertices.size() * sizeof(point4);
GLuint _nSize = _normals.size() * sizeof(point4);
GLuint _uSize = _uvs.size() * sizeof(point2);
GLuint _totalSize = _vSize + _uSize; // normals + vertices + uvs
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, _vSize, &_vertices[0], GL_STATIC_DRAW);
GLuint uvbuffer;
glGenBuffers(1, &uvbuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glBufferData(GL_ARRAY_BUFFER, _uSize, &_uvs[0], GL_STATIC_DRAW);
TextureID = glGetUniformLocation(program, "myTextureSampler");
TextureObjects = new GLuint[_textures.size()];
glGenTextures(_textures.size(), TextureObjects);
for (size_t i = 0; i < _textures.size(); i++)
{
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, TextureObjects[i]);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _textures[i].width, _textures[i].height, 0, GL_BGR, GL_UNSIGNED_BYTE, _textures[i]._tex_data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
for (size_t i = 0; i < _objects.size(); i++)
{
if (i == 0)
{
glActiveTexture(GL_TEXTURE0);
}
else
{
glActiveTexture(GL_TEXTURE1);
}
glBindTexture(GL_TEXTURE_2D, TextureObjects[i]);
GLuint _v_size = _objects[i]._vertices.size() * sizeof(point4);
GLuint _u_size = _objects[i]._uvs.size() * sizeof(point2);
GLuint vPosition = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(vPosition);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
if (i == 0)
{
glVertexAttribPointer(vPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
}
else
{
glVertexAttribPointer(vPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(_v_size));
}
GLuint vUV = glGetAttribLocation(program, "vUV");
glEnableVertexAttribArray(vUV);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
if (i == 0)
{
glVertexAttribPointer(vUV, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
}
else
{
glVertexAttribPointer(vUV, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(_u_size));
}
if (i == 0)
{
glUniform1i(TextureID, 0);
}
else
{
glUniform1i(TextureID, 1);
}
}
_scale = Scale(zoom, zoom, zoom);
_projection = Perspective(45.0, 4.0 / 3.0, 0.1, 100.0);
_view = LookAt(point4(Camera.x, Camera.y, Camera.z, 0), point4(0, 0, 0, 0), point4(0, 1, 0, 0));
_model = mat4(1.0); // identity matrix
_mvp = _projection * _view * _model;
MVP = glGetUniformLocation(program, "MVP");
theta = glGetUniformLocation(program, "theta");
Zoom = glGetUniformLocation(program, "Zoom");
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_CULL_FACE);
glClearColor(1.0, 1.0, 1.0, 1.0);
I understand that I have to switch between the active textures when drawing an object but I can't figure out how.
UPDATE
#immibis Ok I tried to do that yesterday but it didn't work but it was late and I was highly frustrated. SO just to get my thinking correct here, do I have to create a buffer every time (glGenBuffer) and then fill it, activate texture and then glDrawArrays or do I just create the buffer and then fill it every time with the different vetices and uvs for each object, set the offsets and then call glDrawArray for each object?
When I tried this originally I didn't know where the
glGetAttribLocation / glEnableVertexAttribArray /glBindBuffer
should go. So if I understand correctly every time I do a transformation like rotating around the x axis then buffers have to be filled etc so the code needs to go in the display function. Is that correct?
SOLVED
Ok so thanx to immibus' comments, it got me looking in a different direction. I was staring the whole time into how the data was pumped into the arrays that I never even looked at glDrawArrays. I was searching the web again and I came across a piece of code in a tutorial and the person explained glDrawArrays and I saw that you can tell it what to draw.
So then this became easy, as I originally thought it was supposed to be. I changed my code back to pumping everything in the buffers and since I have a start and stop property on my objects returned from my loader it was real easy to tell glDrawArrays what to do.
Thank you.
I recently switched from intermediate mode and have a new rendering process. There must be something I am not understanding. I think it has something to do with the indices.
Here is my diagram: Region->Mesh->Polygon Array->3 vertex indices which references the master list of vertices.
Here my render code:
// Render the mesh
void WLD::render(GLuint* textures, long curRegion, CFrustum cfrustum)
{
int num = 0;
// Set up rendering states
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Set up my indices
GLuint indices[3];
// Cycle through the PVS
while(num < regions[curRegion].visibility.size())
{
int i = regions[curRegion].visibility[num];
// Make sure the region is not "dead"
if(!regions[i].dead && regions[i].meshptr != NULL)
{
// Check to see if the mesh is in the frustum
if(cfrustum.BoxInFrustum(regions[i].meshptr->min[0], regions[i].meshptr->min[2], regions[i].meshptr->min[1], regions[i].meshptr->max[0], regions[i].meshptr->max[2], regions[i].meshptr->max[1]))
{
// Cycle through every polygon in the mesh and render it
for(int j = 0; j < regions[i].meshptr->polygonCount; j++)
{
// Assign the index for the polygon to the index in the huge vertex array
// This I think, is redundant
indices[0] = regions[i].meshptr->poly[j].vertIndex[0];
indices[1] = regions[i].meshptr->poly[j].vertIndex[1];
indices[2] = regions[i].meshptr->poly[j].vertIndex[2];
// Enable texturing and bind the appropriate texture
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textures[regions[i].meshptr->poly[j].tex]);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), &vertices[0].x);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &vertices[0].u);
// Draw
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, indices);
}
}
}
num++;
}
// End of rendering - disable states
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
Sorry if I left anything out. And I really appreciate feedback and help with this. I would even consider paying someone who is good with OpenGL and optimization to help me with this.
There is no point in using array rendering if you're only rendering 3 vertices at a time. The idea is to send thousands through with a single call. That is, you render a single "Polygon Array" or "Mesh" with one call.