Related
// Vertices information
GLfloat vertices[] = { 1, 1, 1, -1, 1, 1, -1,-1, 1, 1,-1, 1, // (front)
1, 1, 1, 1,-1, 1, 1,-1,-1, 1, 1,-1, // (right)
1, 1, 1, 1, 1,-1, -1, 1,-1, -1, 1, 1, // (top)
-1, 1, 1, -1, 1,-1, -1,-1,-1, -1,-1, 1, // (left)
-1,-1,-1, 1,-1,-1, 1,-1, 1, -1,-1, 1, // (bottom)
1,-1,-1, -1,-1,-1, -1, 1,-1, 1, 1,-1 }; // (back)
// Normal information
GLfloat normals[72] = { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, // (front)
1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, // (right)
0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, // (top)
-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, // (left)
0,-1, 0, 0,-1, 0, 0,-1, 0, 0,-1, 0, // (bottom)
0, 0,-1, 0, 0,-1, 0, 0,-1, 0, 0,-1 }; // (back)
// drawing cube
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glNormalPointer(GL_FLOAT, 0, normals);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, indices);
glPopMatrix();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
I want to make sure there is no color difference between the two sides.
Is there a solution for this?
The issue is causes, because you've activated the flat shading mode (see Detecting legacy OpenGL and glShadeModel).
Thus the light is computed for the vertices of the triangle primitive, but the color of the triangle is determined by the Provoking vertex.
Switch to the the smooth (GL_SMOOTH) shading model and ensure that the light is diffuse (GL_DIFFUSE parameter - see glLight):
glShadeModel(GL_SMOOTH);
See the difference between shading mode GL_FLAT (left) and GL_SMOOTH (right):
Another option would be to render GL_QUADS primitives instead of GL_TRIANGLES primitives:
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, indices)
glDrawArrays(GL_QUADS, 0, 24);
Again, GL_FLAT (left) and GL_SMOOTH (right):
In the code below, I don't understand why some faces have their normals reversed.
The triangles looks ordered in the anti-clockwise direction, but some faces remain black.
When I modify the fragment shader with color = -vnormal; the two black faces are rendered correctly, but not the others, obviously.
Thanks for any help given
// minimalist but fonctional code using glew, glfw, glm
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/vec3.hpp>
#include <glm/vec4.hpp>
#include <glm/mat4x4.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "shaders.h"
GLuint myVAO;
void createCube() {
// v6----- v5
// /| /|
// v1------v0|
// | | | |
// | |v7---|-|v4
// |/ |/
// v2------v3
const GLfloat cube_vertices[] = {
1, 1, 1, -1, 1, 1, -1,-1, 1, // v0-v1-v2 (front)
-1,-1, 1, 1,-1, 1, 1, 1, 1, // v2-v3-v0
1, 1, 1, 1,-1, 1, 1,-1,-1, // v0-v3-v4 (right)
1,-1,-1, 1, 1,-1, 1, 1, 1, // v4-v5-v0
1, 1, 1, 1, 1,-1, -1, 1,-1, // v0-v5-v6 (top)
-1, 1,-1, -1, 1, 1, 1, 1, 1, // v6-v1-v0
-1, 1, 1, -1, 1,-1, -1,-1,-1, // v1-v6-v7 (left)
-1,-1,-1, -1,-1, 1, -1, 1, 1, // v7-v2-v1
-1,-1,-1, 1,-1,-1, 1,-1, 1, // v7-v4-v3 (bottom)
1,-1, 1, -1,-1, 1, -1,-1,-1, // v3-v2-v7
1,-1,-1, -1,-1,-1, -1, 1,-1, // v4-v7-v6 (back)
-1, 1,-1, 1, 1,-1, 1,-1,-1 }; // v6-v5-v4
// normal array
const GLfloat cube_normalsI[] = {
0, 0, 1, 0, 0, 1, 0, 0, 1, // v0-v1-v2 (front)
0, 0, 1, 0, 0, 1, 0, 0, 1, // v2-v3-v0
1, 0, 0, 1, 0, 0, 1, 0, 0, // v0-v3-v4 (right)
1, 0, 0, 1, 0, 0, 1, 0, 0, // v4-v5-v0
0, 1, 0, 0, 1, 0, 0, 1, 0, // v0-v5-v6 (top)
0, 1, 0, 0, 1, 0, 0, 1, 0, // v6-v1-v0
-1, 0, 0, -1, 0, 0, -1, 0, 0, // v1-v6-v7 (left)
-1, 0, 0, -1, 0, 0, -1, 0, 0, // v7-v2-v1
0,-1, 0, 0,-1, 0, 0,-1, 0, // v7-v4-v3 (bottom)
0,-1, 0, 0,-1, 0, 0,-1, 0, // v3-v2-v7
0, 0,-1, 0, 0,-1, 0, 0,-1, // v4-v7-v6 (back)
0, 0,-1, 0, 0,-1, 0, 0,-1 }; // v6-v5-v4
// Upload per-vertex positions
GLuint positionVBO = 0;
glGenBuffers(1, &positionVBO);
glBindBuffer(GL_ARRAY_BUFFER, positionVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_vertices) * sizeof(GLfloat), cube_vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Upload per-vertex normals
GLuint normalVBO = 0;
glGenBuffers(1, &normalVBO);
glBindBuffer(GL_ARRAY_BUFFER, normalVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_normalsI) * sizeof(GLfloat), cube_normalsI, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Hook up vertex/normals buffers to a "vertex array object" (VAO)
glGenVertexArrays(1, &myVAO);
glBindVertexArray(myVAO);
// Attach position buffer as attribute 0
glBindBuffer(GL_ARRAY_BUFFER, positionVBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Attach normal buffer as attribute 1
glBindBuffer(GL_ARRAY_BUFFER, normalVBO);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
int main(int argc, char** argv) {
glfwInit();
GLFWwindow* window = glfwCreateWindow(768, 768, "", NULL, NULL);
glfwMakeContextCurrent(window);
glewInit();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE); // same problem with glEnable(GL_FRONT_AND_BACK);
glClearColor(0.8f, 0.7f, 0.5f, 1.0f);
unsigned int program = shaders::CreateShader("simple.vert", "simple.frag");
createCube();
while (glfwWindowShouldClose(window) == GL_FALSE) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glm::mat4 model = glm::translate(glm::mat4(1.0f), glm::vec3(0.0, 0.0, -4.0));
glm::mat4 view = glm::lookAt(glm::vec3(-2.0, -2.0, 0.0), glm::vec3(0.0, 0.0, -4.0), glm::vec3(0.0, 1.0, 0.0));
glm::mat4 projection = glm::perspective(45.0f, 1.0f, 0.1f, 10.0f);
glm::mat4 mvp = projection * view * model;
glUseProgram(program);
GLuint imvp = glGetUniformLocation(program, "mvp");
glUniformMatrix4fv(imvp, 1, false, glm::value_ptr(mvp));
glBindVertexArray(myVAO);
glDrawArrays(GL_TRIANGLES, 0, 36);
glBindVertexArray(0);
glUseProgram(0);
glfwSwapBuffers(window);
}
}
The vertex shader:
#version 330 core
layout (location = 0) in vec3 in_position;
layout (location = 1) in vec3 in_normal;
uniform mat4 mvp;
out vec3 vnormal;
void main() {
vnormal = in_normal;
gl_Position = mvp * vec4(in_position,1);
}
The fragment shader:
#version 330 core
in vec3 vnormal;
out vec3 color;
void main() {
color= vnormal;
}
Output colors get clamped to the 0.0-1.0 range.
So your negative normals like -1, 0, 0 end up as RGB(0,0,0) in the color buffer.
In the following code, I have two histograms, that for this simple example, I hard-coded in the source. Each histogram have 128 bins, where the 64 firsts bins correspond to one histogram, and the 64 other correspond to another histogram. However the resultant distance is 0, even though there are clear differences in the latter 64 bins of the 128 bins of each vector. I don't understand how it's possible why two different vectors have a null distance.
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/shape/emdL1.hpp>
using Vec128f = cv::Vec<float, 128>;
float sum_of_emd_dists(const Vec128f& a, const Vec128f& b)
{
const cv::Mat a_color(cv::Size{64, 1}, CV_32FC1, (void*)(&a.val[0]));
const cv::Mat a_label(cv::Size{64, 1}, CV_32FC1, (void*)(&a.val[64]));
const cv::Mat b_color(cv::Size{64, 1}, CV_32FC1, (void*)(&b.val[0]));
const cv::Mat b_label(cv::Size{64, 1}, CV_32FC1, (void*)(&b.val[64]));
float dist = cv::EMDL1(a_color, b_color) + cv::EMDL1(a_label, b_label);
return dist;
}
int main()
{
Vec128f a = {64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.265625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.734375, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
Vec128f b = {64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.109375, 0, 0, 0, 0.109375, 0, 0, 0.09375, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0, 0, 0.09375, 0, 0, 0, 0.046875, 0.046875, 0, 0, 0, 0, 0, 0, 0, 0.078125, 0.140625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.09375, 0, 0, 0.0625, 0, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0};
std::cerr << "dist = " << sum_of_emd_dists(a, b) << std::endl;
return 0;
}
Result:
dist = 0
Thank you for any help explaining why theEMD-L1 distance is 0.
Thats because your matrix size is 1 row and 64 cols and you need single col martix.
Situation
I have a matrix which is 300 columns and 1 row. When I cout << it, I get:
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
...which is of the form I expect/want.
Problem
However, when I loop through it, I want to get each single value each iteration. However, instead I get a slightly different order (sometimes it is quite similar, though).
Code
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
using namespace std;
using namespace cv;
int main(){
Mat test(1,300,CV_8UC1, 255);
cout << test;
Mat frame, grayFrame,threshFrame,smaller;
VideoCapture cap(0);
while(true){
cap.read(frame);
cvtColor(frame, grayFrame, cv::COLOR_RGB2GRAY);
threshold(grayFrame, threshFrame, 160, 255, THRESH_BINARY);
smaller = threshFrame(Rect(0,0,300,1));
cout << smaller;
for(int x=0;x<smaller.cols;x++){
int color = smaller.at<Vec3b>(x,1)[0];
cout << color;
}
break;
}
}
... And the weird output that does not follow the exact same order of 0s and 255s as the original Matrix:
00000000000000000000000000000000000000000000000000000000000000000000000000000000000002552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552552550000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
The matrix has many 0s at first, and few 255s, where as the loop output has many 255s, and not so much beginning 0s.
Essentially, I want to loop through the matrix first shown, and for each iteration, get each value. So 0,0,255,255... etc.
You're reading garbage.
The at function needs (row, col), and not (x, y). Remember that row = y, and col = x.
If your matrix is just a single row, the row index must be 0, not 1.
Your matrix is a single channel of unsigned char, so you need to use at<uchar>
In practice, use:
uchar color = smaller.at<uchar>(0, x);
cout << int(color);
or using indices:
uchar color = smaller.at<uchar>(x);
So I have a 2D array which acts as a map for my tiles to be drawn.
int sMap[12][20] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1},
{1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1},
{1, 0, 1, 0, 1, 2, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1},
{1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1},
{1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
{1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1},
{1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 1},
{1, 1, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 1, 0, 1, 1, 2, 2, 2, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
};
Once my tiles have been loaded in, I use this function() to place the tiles:
for (int y = 0; y < 12; y++){
for (int x = 0; x < 20; x++){
if (sMap[y][x] == 1)
glBindTexture( GL_TEXTURE_2D, brick1);
else if (sMap[y][x] == 2)
glBindTexture( GL_TEXTURE_2D, brick2);
else
glBindTexture( GL_TEXTURE_2D, wall );
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(float(x + offsetx), float(MAP_SIZEY - (y + offsety)), 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f(float(x + 1 + offsetx), float(MAP_SIZEY - (y + offsety)), 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f(float(x + 1 + offsetx), float(MAP_SIZEY - (y + 1 + offsety)), 0.0f);
glTexCoord2f(0.0f, 1.0f); glVertex3f(float(x + offsetx), float(MAP_SIZEY - (y + 1 + offsety)), 0.0f);
glEnd();
}
}
I think I may have confused myself with the coordinate system of the tiles because when I draw a basic OpenGL square which acts as a sprite, I just get a black screen upon running the program.
I'm unsure whether this means the scale of the sprite to the tiles is wrong, or whether the sprite and tiles are on different Zplanes...
I would appreciate if someone could explain the coordinate system in case I don't understand it as much as I thought and also advise me how to draw an OpenGLsquare on the same coordinates.
Currently I have this to draw my basic sprite:
struct RECT{float x, y, w, h;};
RECT sprite = {0, 0, 10, 10};
void drawSprite (RECT rect){
glBegin(GL_QUADS);
glColor3f(1.0f, 0.0f, 0.0f);
glVertex3f(rect.x, rect.y, 0.0);
glVertex3f(rect.x, rect.y+rect.h, 0.0);
glVertex3f(rect.x+rect.w, rect.y+rect.h, 0.0);
glVertex3f(rect.x+rect.w, rect.y, 0.0);
glEnd();
}
EDIT:
resize screen:
glViewport(0,0,width,height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0f,(GLfloat)width/(GLfloat)height,0.1f,20.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
draw scene:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
gluLookAt(10.0f, 8.0f, 20.0f, 10.0f, 8.0f, 0.0f, 0.0f, 1.0f, 0.0f);
glTranslatef(5.0f,4.0f,0.0f);
draw_tiles();
draw_sprite();
In the draw_tiles function it looks like you might be passing incorrect coordinates - maybe you should be multiplying the x and y values by your tile size.
Also try turning off depth testing and backface culling to help resolve your black screen problem.
glDisable( GL_DEPTH_TEST );
glDisable( GL_CULL_FACE );