I am trying to get the 2d world coordinates on a 2D plane (Z = 0) where I clicked with the mouse in a 3D scene. I figured out that ray-casting would probably be the best method.
This code is that I scavenged from the Internet:
glm::vec3 Drawer::MouseToWorldCoords(glm::vec2 coords)
{
//getting camera position
glm::mat3 rotMat(view);
glm::vec3 d(view[3]);
glm::vec3 retVec = -d * rotMat;
//std::cout << " x " << retVec.x << " y " << retVec.y << " z " << retVec.z << std::endl;
//getting mouse coords
float x = 2.0 * coords.x / WINDOW_WIDTH - 1;
float y = -2.0 * coords.y / WINDOW_HEIGHT + 1;
float z = -1.0f;
//raycasting
glm::vec4 ray(x, y, z,1.0f);
glm::vec4 ray_eye = inverse(proj) * ray;
ray_eye = glm::vec4(ray_eye.x,ray_eye.y, 1.0, 0.0);
glm::vec3 ray_world = glm::vec3((glm::inverse(view) * ray_eye));
ray_world = glm::normalize(ray_world);
//intersecting plane with ray
glm::vec3 ba = retVec - ray_world ;
float nDotA = glm::dot(glm::vec3(0.0f,0.0f,1.0f), ray_world);
float nDotBA = glm::dot(glm::vec3(0.0f,0.0f,1.0f), ba);
glm::vec3 intersect = (ray_world + (((0.0f - nDotA) / nDotBA) * ba)) ;
return glm::vec3( -intersect.x * 10.0f,-intersect.y * 10.0f,0.0f );
}
This snippet of code does not work the way it should though. As you can see in the image:
The program simply spawns cubes at the location returned by the function. To produce this result I clicked only on the edges of the screen (except for the 2 in the middle of course).
Related
I cannot understand the math behind this problem, I am trying to create an FPS camera where I can look freely with my mouse input.
I am trying to rotate and position my lookat point with 180 degrees of freedom. I understand the easier solution is to glRotate the world to fit my perspective, but I do not want this approach. I am fairly unfamiliar with the trigonometry involved here and cannot figure out how to solve this problem the way I want to...
here is my attempt to do this so far...
code to get mouse coordinates relative to the center of the window, then process it in my camera object
#define DEG2RAD(a) (a * (M_PI / 180.0f))//convert to radians
static void glutPassiveMotionHandler(int x, int y) {
glf centerX = WinWidth / 2; glf centerY = WinHeight / 2;//get windows origin point
f speed = 0.2f;
f oldX = mouseX; f oldY = mouseY;
mouseX = DEG2RAD(-((x - centerX)));//get distance from 0 and convert to radians
mouseY = DEG2RAD(-((y - centerY)));//get distance from 0 and convert to radians
f diffX = mouseX - oldX; f diffY = mouseY - oldY;//get difference from last frame to this frame
if (mouseX != 0 || mouseY != 0) {
mainCamera->Rotate(diffX, diffY);
}
Code to rotate the camera
void Camera::Rotate(f angleX, f angleY) {
Camera::refrence = Vector3D::NormalizeVector(Camera::refrence * cos(angleX)) + (Camera::upVector * sin(angleY));//rot up
Camera::refrence = Vector3D::NormalizeVector((Camera::refrence * cos(angleY)) - (Camera::rightVector * sin(angleX)));//rot side to side
};
Camera::refrence is our lookat point, processing the lookat point is handled as follows
void Camera::LookAt(void) {
gluLookAt(
Camera::position.x, Camera::position.y, Camera::position.z,
Camera::refrence.x, Camera::refrence.y, Camera::refrence.z,
Camera::upVector.x, Camera::upVector.y, Camera::upVector.z
);
};
The camera is defined by a position point (position) a target point (refrence) and a up-vector upVector. If you want to change the orientation of the camera, then you've to rotate the direction vector from the position (position) to the target (refrence) rather then the target point by a Rotation matrix.
Note, since the 2 angles are angles which should change an already rotated view, you've to use a rotation matrix, to rotate the vectors which point in an arbitrary direction.
Write a function which set 3x3 rotation matrix around an arbitrary axis:
void RotateMat(float m[], float angle_radians, float x, float y, float z)
{
float c = cos(angle_radians);
float s = sin(angle_radians);
m[0] = x*x*(1.0f-c)+c; m[1] = x*y*(1.0f-c)-z*s; m[2] = x*z*(1.0f-c)+y*s;
m[3] = y*x*(1.0f-c)+z*s; m[4] = y*y*(1.0f-c)+c; m[5] = y*z*(1.0f-c)-x*s;
m[6] = z*x*(1.0f-c)-y*s; m[7] = z*y*(1.0f-c)+x*s; m[8] = z*z*(1.0f-c)+c };
}
Write a function which rotates a 3 dimensional vector by the matrix:
Vector3D Rotate(float m[], const Vector3D &v)
{
Vector3D rv;
rv.x = m[0] * v.x + m[3] * v.y + m[6] * v.z;
rv.y = m[1] * v.x + m[4] * v.y + m[7] * v.z;
rv.z = m[2] * v.x + m[5] * v.y + m[8] * v.z;
return rv;
}
Calculate the vector form the position to the target:
Vector3D los = Vector3D(refrence.x - position.x, refrence.y - position.y, refrence.z - position.z);
Rotate all the vectors around the z axis of the world by angleX:
float rotX[9];
RotateMat(rotX, angleX, Vector3D(0, 0, 1));
los = Rotate(rotX, los);
upVector = Rotate(rotX, upVector);
Rotate all the vectors around the current y axis of the view by angleY:
float rotY[9];
RotateMat(rotY, angleY, Vector3D(los.x, los.y, 0.0));
los = Rotate(rotY, los);
upVector = Rotate(rotY, upVector);
Calculate the new target point:
refrence = Vector3D(position.x + los.x, position.y + los.y, position.z + los.z);
U_Cam_X_angle is left right rotation.. U_Cam_Y_angle is up down rotation.
view_radius is the view distance (zoom) to U_look_point_x, U_look_point_y and U_look_point_z.
This is ALWAYS a negative number! This is because you are always looking in positive direction. Deeper in the screen is more positive.
This is all in radians.
The last three.. eyeX, eyeY and eyeZ is where the camera is in 3D space.
This code is in VB.net. Find a converter online for VB to C++ or do it manually.
Public Sub set_eyes()
Dim sin_x, sin_y, cos_x, cos_y As Single
sin_x = Sin(U_Cam_X_angle + angle_offset)
cos_x = Cos(U_Cam_X_angle + angle_offset)
cos_y = Cos(U_Cam_Y_angle)
sin_y = Sin(U_Cam_Y_angle)
cam_y = Sin(U_Cam_Y_angle) * view_radius
cam_x = (sin_x - (1 - cos_y) * sin_x) * view_radius
cam_z = (cos_x - (1 - cos_y) * cos_x) * view_radius
Glu.gluLookAt(cam_x + U_look_point_x, cam_y + U_look_point_y, cam_z + U_look_point_z, _
U_look_point_x, U_look_point_y, U_look_point_z, 0.0F, 1.0F, 0.0F)
eyeX = cam_x + U_look_point_x
eyeY = cam_y + U_look_point_y
eyeZ = cam_z + U_look_point_z
End Sub
I am using legacy OpenGL and trying to move vertices around with the mouse. To test whether a vertex is clicked on I loop through all vertices and multiply them by the model and projection matrix before dividing by the w value. This works fine and is shown below:
for (Vertex *vertex : context->getMesh().vertices) {
QVector4D vert(vertex->xPos, vertex->yPos, vertex->zPos, 1.0f);
QVector4D transformedVert = projectionMatrix * modelMatrix * vert;
transformedVert /= transformedVert.w();
if ((mappedX < (transformedVert.x() + 0.1) && mappedX > (transformedVert.x() - 0.1)) &&
(mappedY < (transformedVert.y() + 0.1) && mappedY > (transformedVert.y() - 0.1))) {
std::cout << "SUCCESS" << std::endl;
vertexPicked = true;
currentVertex = vertex;
}
}
Then when I move the mouse I try to work backwards by first multiplying the current mouse coordinates by the same W value as in the first step and then multiplying by the inverse of the projection and model matrices. This moves the vertex around but not to where the mouse is.
float mouseX = ((2.0f * event->x()) / width() - 1.0f);
float mouseY = -((2.0f * event->y()) / height() - 1.0f);
float x = (modelMatrix.inverted() * projectionMatrix.inverted() *
(QVector4D(mouseX, mouseY, 1, 1) * (projectionMatrix * modelMatrix * QVector4D(MousePicker::currentVertex->xPos, MousePicker::currentVertex->yPos, MousePicker::currentVertex->zPos, 1)).w())).x();
MousePicker::currentVertex->xPos = x;
I am currently only trying to change the X coordinate.
I'm trying to load in a triangle mesh from an .off file and show the triangle mesh centered at the origin and scaled to fit in the unit cube. But for some reason I'm off by a large factor and it looks like
The way I'm doing this is finding the extrema of the mesh, and using that to offset the surface by that amount.
float avgX = (maxX + minX) / 2;
float avgY = (maxY + minY) / 2;
float avgZ = (maxZ + minZ) / 2;
Vector3f center(avgX, avgY, avgZ);
Vector3f offset = Vector3f(0, 0, 0) - center;
Translation3f translation(offset);
cout << "offset is: " << endl << offset << endl;
double d_theta = (M_PI / 180);
AngleAxisf rotation(d_theta, Vector3f(0, 0, 1));
float scaleX = (float) 1 / (abs(maxX - minX));
float scaleY = (float) 1 / (abs(maxY - minY));
float scaleZ = (float) 1 / (abs(maxZ - minZ));
AlignedScaling3f scale = AlignedScaling3f(scaleX, scaleY, scaleZ);
I then put it into a vector of surfaces with
Vector3f translatedCenter = translation * rotation * scale * center;
VertexBufferObject VBO;
VBO.init();
VBO.update(Vertices);
program.bindVertexAttribArray("position", VBO);
VertexBufferObject VBO_N;
VBO_N.init();
VBO_N.update(FlatNormals);
program.bindVertexAttribArray("normals", VBO_N);
cout << "updated normals" << endl;
VertexBufferObject VBO_C;
VBO_C.init();
VBO_C.update(C);
program.bindVertexAttribArray("color",VBO_C);
cout << "updated color " << endl;
Surface* s = new Surface(VBO, Vertices, translation, rotation, scale, percentScale, translatedCenter, SmoothNormals, FlatNormals, C);
And I pass it to the Vertex Shader as "model"
Affine3f model = s->getTranslation() * s->getRotation() * s->getScale();
glUniformMatrix4fv(program.uniform("model"), 1, GL_FALSE, model.data());
This is all being done using the Eigen library (https://eigen.tuxfamily.org/dox/group__TutorialGeometry.html#TutorialGeoTransform)
No matter what I try I'm off by a little bit. What am I doing wrong?
Swap translation and rotation:
Affine3f model = s->getRotation() * s->getTranslation() * s->getScale();
Note, the translation moves the center of the object to the center of the view. After that the rotation matrix rotates around the this center.
If you don't have any projection matrix, then the view space is the normalized device space where each coordinate is in range [-1, 1]. This mean the length of a side is 2 = 1 - (-1). You have to respect this when you calculate the scale:
float scaleX = (float) 2 / (abs(maxX - minX));
float scaleY = (float) 2 / (abs(maxY - minY));
float scaleZ = (float) 2 / (abs(maxZ - minZ));
I'm drawing a 10x10 grid of squares at a depth of 0 and trying to highlight the one the mouse is over. I've tried following the tutorial here: http://antongerdelan.net/opengl/raycasting.html
but I don't know if I did it right. I end up with a vector at the end, but I'm not sure what to do with it.
Here's a screenshot of the squares (not sure how it helps..)
http://postimg.org/image/dau330qwt/2
/* Enable attribute index 1 as being used */
glEnableVertexAttribArray(1);
float camera_z = 50;
float camera_x = 0;
float camera_y = 0;
GLuint MatrixID = glGetUniformLocation(program, "MVP");
GLuint ColorID = glGetUniformLocation(program, "input_color");
int mouse_x;
int mouse_y;
while (1) {
int window_width;
int window_height;
SDL_GetWindowSize(win, &window_width, &window_height);
glm::mat4 Projection = glm::perspective(45.0f, ((float)window_width) / window_height, 0.1f, 100.0f);
// printf("Camera at %f %f\n", camera_x, camera_y);
glm::mat4 View = glm::lookAt(glm::vec3(camera_x,camera_y,camera_z), // camera position
glm::vec3(camera_x,camera_y,0), // looking at
glm::vec3(0,1,0)); // up
int map_width = map.width();
int map_height = map.height();
/* Make our background black */
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
// go through my 10x10 map and
for (int i = 0; i < map_width; i++) {
for ( int j = 0; j < map_height; j++) {
glm::mat4 Model = glm::translate(glm::mat4(1.0f), glm::vec3(i, j, 0.0f));
glm::mat4 MVP = Projection * View * Model;
glm::vec3 color = random_color();
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniform3fv(ColorID, 1, &color[0]);
glDrawArrays(GL_LINE_LOOP, 0, 4);
}
}
/* Swap our buffers to make our changes visible */
SDL_GL_SwapWindow(win);
// printf("Window dimensions %d x %d\n", window_width, window_height);
float normalized_mouse_x = (2.0f * mouse_x) / window_width - 1.0f;
float normalized_mouse_y = 1.0f - (2.0f * mouse_y) / window_height;
printf("Normalized mouse position %f x %f\n", normalized_mouse_x, normalized_mouse_y);
glm::vec3 normalized_mouse_vector = glm::vec3(normalized_mouse_x, normalized_mouse_y, 1.0f);
glm::vec4 ray_clip = glm::vec4 (normalized_mouse_vector.x, normalized_mouse_vector.y, -1.0, 1.0);
glm::vec4 ray_eye = glm::inverse(Projection) * ray_clip;
ray_eye = glm::vec4(ray_eye.xy(), -1.0, 0.0);
glm::vec3 ray_world = (glm::inverse(View) * ray_eye).xyz();
ray_world = glm::normalize(ray_world);
// this prints out values like: World ray: 0.000266, 0.000382, 1.000000
printf("World ray: %f, %f, %f\n", ray_world.x, ray_world.y, ray_world.z);
// l = -(camera_z / ray_world.z)
float l = -(camera_z / ray_world.z);
float mouse_world_x = camera_x + l * ray_world.x;
float mouse_world_y = camera_y + l * ray_world.y;
printf("mouse world %f, %f\n", mouse_world_x, mouse_world_y);
}
Updated with code from BDL's comment. The output I get now is:
Normalized mouse position 0.087500 x 0.145833
World ray: 0.065083, 0.081353, 499.000000
World ray: 0.000130, 0.000163, 1.000000
mouse world -0.006521, -0.008152
I'm expecting the "mouse world" line to have numbers in the 1-10 range, not in the .00x range, though, based on the screenshot above showing a grid of squares with x and y ranging from 0-10.
Thanks for looking.
The intersection between a given ray r, starting at point C (in this case the camera position) with a x/y plane with z=0 can be calculated as follows:
C ... Camera position [cx,cy,cz]
r ... ray direction [rx,ry,rz]
We are searching for the point on the ray that has z=0
C + l*r = [x,y,0]
=>
cz + l*rz = 0
l * rz = -cz
l = -(cz / rz)
The xy-coordinates of the intersection are now:
x = cx + l * rx
y = cy + l * ry
What is left to do is to check in which rectangle this (x,y) coordinates are located.
Edit3 : My problems were in completely different functions than i expected. ill let the code stay, maybe this helps someone :) (and dont forget to debug!).
Im trying to find the vector where a line intersects with a triangle.
Current state: Random intersections even if mouse is not at the floor and camera view dependend (lookat matrix)
Steps
Unproject mouse coordinations
Check line / triangle intersection
Unproject mouse coordinations
I checked the source of glm::unproject and gluUnproject and created this function.
pixel::CVector3 pixel::CVector::unproject(
CVector2 inPosition,
pixel::CShape window,
pixel::matrix4 projectionMatrix,
pixel::matrix4 modelViewMatrix,
float depth
)
{
// transformation of normalized coordinates
CVector4 inVector;
inVector.x = (2.0f * inPosition.x) / window.width - 1.0f;
inVector.y = (2.0f * inPosition.y) / window.height - 1.0f;
inVector.z = 2.0f * depth - 1.0f;
inVector.w = 1.0f;
// multiply inverted matrix with vector
CVector4 rayWorld = pixel::CVector::multMat4Vec4(pixel::CMatrix::invertMatrix(projectionMatrix * modelViewMatrix), inVector);
CVector3 result;
result.x = rayWorld.x / rayWorld.w;
result.y = rayWorld.y / rayWorld.w;
result.z = rayWorld.z / rayWorld.w;
return result;
}
Checking intersection
pixel::CVector3 pixel::Ray::intersection(
Ray ray,
pixel::CVector3 v0,
pixel::CVector3 v1,
pixel::CVector3 v2
)
{
// compute normal
CVector3 a, b, n;
a = v1 - v0;
b = v2 - v0;
n = ray.direction.cross(b);
// find determinant
float det = a.dot(n);
if (det < 0.000001f)
{
std::cout << "Ray intersecting with backface triangles \n";
return pixel::CVector::vector3(0.0f, 0.0f, 0.0f);
}
det = 1.0f / det;
// calculate distance from vertex0 to ray origin
CVector3 s = ray.origin - v0;
float u = det * s.dot(n);
if (u < -0.000001f || u > 1.f + 0.000001f)
{
std::cout << "U: Intersection outside of the triangle!\n";
return pixel::CVector::vector3(0.0f, 0.0f, 0.0f);
}
CVector3 r = s.cross(a);
float v = det * ray.direction.dot(r);
if (v < -0.000001f || u + v > 1.f + 0.000001f)
{
std::cout << "V/U: Intersection outside of triangle!\n";
return pixel::CVector::vector3(0.0f, 0.0f, 0.0f);
}
// distance from ray to triangle
det = det * b.dot(r);
std::cout << "T: " << det << "\n";
CVector3 endPosition;
endPosition.x = ray.origin.x + (ray.direction.x * det);
endPosition.y = ray.origin.y + (ray.direction.y * det);
endPosition.z = ray.origin.z + (ray.direction.z * det);
return endPosition;
}
Usage
if (event.button.button == SDL_BUTTON_RIGHT)
{
camera->setCameraActive();
float mx = event.motion.x;
float my = window->info.height - event.motion.y;
// ray casting
pixel::Ray ray;
std::cout << "\n\n";
// near
pixel::CVector3 rayNear = pixel::CVector::unproject(
pixel::CVector::vector2(mx, my),
pixel::CVector::shape2(window->info.internalWidth, window->info.internalHeight),
camera->camInfo.currentProjection,
camera->camInfo.currentView,
1.0f
);
// far
pixel::CVector3 rayFar = pixel::CVector::unproject(
pixel::CVector::vector2(mx, my),
pixel::CVector::shape2(window->info.internalWidth, window->info.internalHeight),
camera->camInfo.currentProjection,
camera->camInfo.currentView,
0.0f
);
// normalized direction results in the same behavior
ray.origin = cameraPosition;
ray.direction = pixel::CVector::normalize(rayFar- rayNear);
std::cout << "Raycast \n";
std::cout << "Mouse Position: " << mx << " - " << my << "\n";
std::cout << "Camera Position: " << ray.origin.x << " - " << ray.origin.y << " - " << ray.origin.z << "\n";
std::cout << "Ray direction: " << ray.direction.x << " - " << ray.direction.y << " - " << ray.direction.z << "\n";
pixel::CVector3 vertOne = pixel::CVector::vector3(0.0f, 0.0f, -300.0f);
pixel::CVector3 vertTwo = pixel::CVector::vector3(0.0f, 0.0f, 0.0f);
pixel::CVector3 vertThree = pixel::CVector::vector3(300.0f, 0.0f, 0.0f);
pixel::CVector3 vertFour = pixel::CVector::vector3(300.0f, 0.0f, -300.0f);
pixel::CVector3 rayHit = pixel::Ray::intersection(ray, vertOne, vertTwo, vertThree);
pixel::CVector3 rayHit2 = pixel::Ray::intersection(ray, vertThree, vertFour, vertOne);
std::cout << "Ray hit: " << rayHit.x << " - " << rayHit.y << " - " << rayHit.z << "\n";
std::cout << "Ray hit: " << rayHit2.x << " - " << rayHit2.y << " - " << rayHit2.z << "\n";
std::cout << "--------------------\n";
towerHouse->modelMatrix = pixel::CMatrix::translateMatrix(rayHit);
Output
As ive never used glm::unproject or gluUnproject, i dont know how the normal output should look like, but im getting results like:
Ray direction: 0.109035 -0.0380502 0.0114562
Doesnt look right to me, but checking my code against other sources (mentioned above), i dont see the mistake/s.
Ray intersection works in some special cases (camera rotation) and even then i get intersections even if i dont click on the floor.
Same goes with intersection output varying from backface hits to outside of the triangle.
All those errors look like the main source of problem is the unprojection.
Any hints in the right direction?
This is nowhere close to an answer to this question, but this is too complicated to explain in comments or chat.
First of all:
// near
pixel::CVector3 rayNear = pixel::CVector::raycast(
pixel::CVector::vector2(mx, my),
pixel::CVector::shape2(window->info.internalWidth, window->info.internalHeight),
camera->camInfo.currentProjection,
camera->camInfo.currentView,
1.0f // WRONG
);
// far
pixel::CVector3 rayFar = pixel::CVector::raycast(
pixel::CVector::vector2(mx, my),
pixel::CVector::shape2(window->info.internalWidth, window->info.internalHeight),
camera->camInfo.currentProjection,
camera->camInfo.currentView,
0.0f // WRONG
);
Near is 0.0 in window-space, and far is 1.0 (depends on the depth range, but if you changed the depth range you should already know this).
In your ray cast function, you have:
CVector3 result;
result.x = rayWorld.x / rayWorld.w;
result.y = rayWorld.y / rayWorld.w;
result.z = rayWorld.z / rayWorld.w;
There is a chance that w == 0.0, and the result is not yet a ray at this time... it is a position in object-space (not world). Generally you are always going to be working with well-behaved matrices, but if you ever look at a formal implementation of UnProject (...) you will notice that they handle the case where w == 0.0 with a special return value or by setting a status flag.
pixel::CVector3 vertOne = pixel::CVector::vector3(0.0f, 0.0f, -300.0f);
pixel::CVector3 vertTwo = pixel::CVector::vector3(0.0f, 0.0f, 0.0f);
pixel::CVector3 vertThree = pixel::CVector::vector3(300.0f, 0.0f, 0.0f);
pixel::CVector3 vertFour = pixel::CVector::vector3(300.0f, 0.0f, -300.0f);
What coordinate space are these verts in? Presumably object-space, which means if you are casting a ray from your camera's eye point (defined in world-space) that passes through a point on your far plane, and try to test for intersection against a triangle in object-space more often than not you will miss. This is because the origin, scale and rotation for each of these spaces may differ. You need to transform those points into world-space (your original code had a floor->modelMatrix that would work well for this purpose) before you try this test.
I tracked down the problem and fixed bugs. i had wrong matrix*matrix and matrix*vector multiplications operators.