-(void)moveCamera:(CGpoint)resultVector {
float centerX, centerY, centerZ;
float eyeX, eyeY, eyeZ;
[self.camera centerX:¢erX centerY=¢erY centerZ=¢erZ];
[self.camera eyeX:&eyeX eyeY:&eyeY eyeZ:&eyeZ];
float newX = (resultVector.x - actorOriginalPosition.x) + (actorOriginalPosition.x - screenSize.width/2);
float newY = (resultVector.y - actorOriginalPosition.y) + (actorOriginalPosition.y - screenSize.height/2);
[self.camera setCenterX:newX centerY=newY centerZ:-150.0f];
[self.camera setEyeX:newX eyeY=newY eyeZ:eyeZ];
}
Here resultVector is actor currentPosition
in update function :
CGPoint direction = [self getPoint:myBody->GetPosition()];
[self setPosition:direction];
- (CGPoint)getPoint:(b2Vec2)vec
{
CGSize screen = [[CCDirector sharedDirector] winSize];
float x = vec.x * PTM_RATIO;
float y = vec.y * PTM_RATIO;
x = MAX(x, screen.width/2);
y = MAX(y, screen.height/2);
float _x = area.width - (screen.width/2);
float _y = area.height - (screen.height/2);
x = MIN(x, _x);
y = MIN(y, _y);
CGPoint goodPoint = ccp(x,y);
CGPoint centerOfScreen = ccp(screen.width/2, screen.height/2);
CGPoint difference = ccpSub(centerOfScreen, goodPoint);
return difference;
}
Related
I am facing problems trying to make 3d objects clickable by mouse. For intersection checking I use ray casting. Code I found, ported for my solution:
Exactly picking
bool RaySphereIntersect(Vector3, Vector3, float);
bool TestIntersection(Matrix projectionMatrix, Matrix viewMatrix, Matrix worldMatrix, Vector3 origin, float radius, int m_screenWidth, int m_screenHeight, int mouseX, int mouseY)
{
float pointX, pointY;
Matrix inverseViewMatrix, translateMatrix, inverseWorldMatrix;
Vector3 direction, rayOrigin, rayDirection;
bool intersect, result;
// Move the mouse cursor coordinates into the -1 to +1 range.
pointX = ((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f) * -1.0f;
// Adjust the points using the projection matrix to account for the aspect ratio of the viewport.
pointX = pointX / projectionMatrix._11;
pointY = pointY / projectionMatrix._22;
// Get the inverse of the view matrix.
inverseViewMatrix=XMMatrixInverse(NULL, viewMatrix);
// Calculate the direction of the picking ray in view space.
direction.x = (pointX * inverseViewMatrix._11) + (pointY * inverseViewMatrix._21) + inverseViewMatrix._31;
direction.y = (pointX * inverseViewMatrix._12) + (pointY * inverseViewMatrix._22) + inverseViewMatrix._32;
direction.z = (pointX * inverseViewMatrix._13) + (pointY * inverseViewMatrix._23) + inverseViewMatrix._33;
// Get the origin of the picking ray which is the position of the camera.
// Get the world matrix and translate to the location of the sphere.
// Now get the inverse of the translated world matrix.
inverseWorldMatrix= XMMatrixInverse(NULL, worldMatrix);
// Now transform the ray origin and the ray direction from view space to world space.
rayOrigin=XMVector3TransformCoord(origin, inverseWorldMatrix);
rayDirection=XMVector3TransformNormal(direction, inverseWorldMatrix);
// Normalize the ray direction.
rayDirection=XMVector3Normalize(rayDirection);
// Now perform the ray-sphere intersection test.
intersect = RaySphereIntersect(rayOrigin, rayDirection, radius);
if (intersect == true)
return true;
else
return false;
}
bool RaySphereIntersect(Vector3 rayOrigin, Vector3 rayDirection, float radius)
{
float a, b, c, discriminant;
// Calculate the a, b, and c coefficients.
a = (rayDirection.x * rayDirection.x) + (rayDirection.y * rayDirection.y) + (rayDirection.z * rayDirection.z);
b = ((rayDirection.x * rayOrigin.x) + (rayDirection.y * rayOrigin.y) + (rayDirection.z * rayOrigin.z)) * 2.0f;
c = ((rayOrigin.x * rayOrigin.x) + (rayOrigin.y * rayOrigin.y) + (rayOrigin.z * rayOrigin.z)) - (radius * radius);
// Find the discriminant.
discriminant = (b * b) - (4 * a * c);
// if discriminant is negative the picking ray missed the sphere, otherwise it intersected the sphere.
if (discriminant < 0.0f)
return false;
else
return true;
}
How do I create sphere
D3DSphere(float x, float y, float z, float radius, float r, float g, float b, float a)
{
this->x = x;
this->y = y;
this->z = z;
this->radius = radius;
this->shape = GeometricPrimitive::CreateSphere(radius*2.0f);
this->world = Matrix::Identity;
this->world = XMMatrixMultiply(this->world, Matrix::CreateTranslation(x, y, z));
this->index = vsphere;
d3dsphere[vsphere] = this;
vsphere++;
}
How do I call raycaster
void Game::LButtonUp(int x, int y)
{
Vector3 eye(camx, camy, camz);
Vector3 at(Vector3::Zero);
m_view = Matrix::CreateLookAt(eye, at, Vector3::UnitY);
for (int i = 0; i < vsphere; i++)
{
if (TestIntersection(m_projection, m_view, d3dsphere[i]->world, eye, d3dsphere[i]->radius, 800, 600, x, y))
{
MessageBoxW(NULL, L"LOL", L"It works", MB_OK);
break;
}
}
}
Nothing happens by clicking, but if I rotate camera, perpendicularly to XOY, sometimes, clicking near the sphere, message box appears.
Update
MessageBox appears independently on camera angle, and it seems, that it detects intersection correctly, but mirrored, relatively to the window center. For example, if sphere is at (0, window.bottom-20) point then I will get MessageBox if I click at (0, 20) point.
What if calculation of the direction of the picking ray is wrong, if it was wrote for left-handed system, and I use right-handed?
Probably, because of the right-handed system, that is used by default in DirectX Tool Kit the next section from caster
pointX = ((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f) * -1.0f;
Should be changed to
pointX = (((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f) * -1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f);
Important
That code also will work wrong because of depth independence, i.e. you may select sphere that is situated behind the sphere you clicking. For solve that I changed the code:
float distance3(float x1, float y1, float z1, float x2, float y2, float z2)
{
float dx=x1-x2;
float dy=y1-y2;
float dz=z1-z2;
return sqrt(dx*dx+dy*dy+dz*dz);
}
void Game::LButtonUp(int x, int y)
{
Vector3 eye(camx, camy, camz);
Vector3 at(Vector3::Zero);
m_view = Matrix::CreateLookAt(eye, at, Vector3::UnitY);
int last_index=-1;
float last_distance=99999.0f;//set the obviously highest value, may happen in your scene
for (int i = 0; i < vsphere; i++)
{
if (TestIntersection(m_projection, m_view, d3dsphere[i]->world, eye, d3dsphere[i]->radius, 800, 600, x, y))
{
float distance=distance3(camx,camy,camz, d3dsphere[i]->x, d3dsphere[i]->y, d3dsphere[i]->z);
if(distance<last_distance)
{
last_distance=distance;
last_index=i;
}
}
}
d3dsphere[last_index];//picked sphere
}
I cannot understand the math behind this problem, I am trying to create an FPS camera where I can look freely with my mouse input.
I am trying to rotate and position my lookat point with 180 degrees of freedom. I understand the easier solution is to glRotate the world to fit my perspective, but I do not want this approach. I am fairly unfamiliar with the trigonometry involved here and cannot figure out how to solve this problem the way I want to...
here is my attempt to do this so far...
code to get mouse coordinates relative to the center of the window, then process it in my camera object
#define DEG2RAD(a) (a * (M_PI / 180.0f))//convert to radians
static void glutPassiveMotionHandler(int x, int y) {
glf centerX = WinWidth / 2; glf centerY = WinHeight / 2;//get windows origin point
f speed = 0.2f;
f oldX = mouseX; f oldY = mouseY;
mouseX = DEG2RAD(-((x - centerX)));//get distance from 0 and convert to radians
mouseY = DEG2RAD(-((y - centerY)));//get distance from 0 and convert to radians
f diffX = mouseX - oldX; f diffY = mouseY - oldY;//get difference from last frame to this frame
if (mouseX != 0 || mouseY != 0) {
mainCamera->Rotate(diffX, diffY);
}
Code to rotate the camera
void Camera::Rotate(f angleX, f angleY) {
Camera::refrence = Vector3D::NormalizeVector(Camera::refrence * cos(angleX)) + (Camera::upVector * sin(angleY));//rot up
Camera::refrence = Vector3D::NormalizeVector((Camera::refrence * cos(angleY)) - (Camera::rightVector * sin(angleX)));//rot side to side
};
Camera::refrence is our lookat point, processing the lookat point is handled as follows
void Camera::LookAt(void) {
gluLookAt(
Camera::position.x, Camera::position.y, Camera::position.z,
Camera::refrence.x, Camera::refrence.y, Camera::refrence.z,
Camera::upVector.x, Camera::upVector.y, Camera::upVector.z
);
};
The camera is defined by a position point (position) a target point (refrence) and a up-vector upVector. If you want to change the orientation of the camera, then you've to rotate the direction vector from the position (position) to the target (refrence) rather then the target point by a Rotation matrix.
Note, since the 2 angles are angles which should change an already rotated view, you've to use a rotation matrix, to rotate the vectors which point in an arbitrary direction.
Write a function which set 3x3 rotation matrix around an arbitrary axis:
void RotateMat(float m[], float angle_radians, float x, float y, float z)
{
float c = cos(angle_radians);
float s = sin(angle_radians);
m[0] = x*x*(1.0f-c)+c; m[1] = x*y*(1.0f-c)-z*s; m[2] = x*z*(1.0f-c)+y*s;
m[3] = y*x*(1.0f-c)+z*s; m[4] = y*y*(1.0f-c)+c; m[5] = y*z*(1.0f-c)-x*s;
m[6] = z*x*(1.0f-c)-y*s; m[7] = z*y*(1.0f-c)+x*s; m[8] = z*z*(1.0f-c)+c };
}
Write a function which rotates a 3 dimensional vector by the matrix:
Vector3D Rotate(float m[], const Vector3D &v)
{
Vector3D rv;
rv.x = m[0] * v.x + m[3] * v.y + m[6] * v.z;
rv.y = m[1] * v.x + m[4] * v.y + m[7] * v.z;
rv.z = m[2] * v.x + m[5] * v.y + m[8] * v.z;
return rv;
}
Calculate the vector form the position to the target:
Vector3D los = Vector3D(refrence.x - position.x, refrence.y - position.y, refrence.z - position.z);
Rotate all the vectors around the z axis of the world by angleX:
float rotX[9];
RotateMat(rotX, angleX, Vector3D(0, 0, 1));
los = Rotate(rotX, los);
upVector = Rotate(rotX, upVector);
Rotate all the vectors around the current y axis of the view by angleY:
float rotY[9];
RotateMat(rotY, angleY, Vector3D(los.x, los.y, 0.0));
los = Rotate(rotY, los);
upVector = Rotate(rotY, upVector);
Calculate the new target point:
refrence = Vector3D(position.x + los.x, position.y + los.y, position.z + los.z);
U_Cam_X_angle is left right rotation.. U_Cam_Y_angle is up down rotation.
view_radius is the view distance (zoom) to U_look_point_x, U_look_point_y and U_look_point_z.
This is ALWAYS a negative number! This is because you are always looking in positive direction. Deeper in the screen is more positive.
This is all in radians.
The last three.. eyeX, eyeY and eyeZ is where the camera is in 3D space.
This code is in VB.net. Find a converter online for VB to C++ or do it manually.
Public Sub set_eyes()
Dim sin_x, sin_y, cos_x, cos_y As Single
sin_x = Sin(U_Cam_X_angle + angle_offset)
cos_x = Cos(U_Cam_X_angle + angle_offset)
cos_y = Cos(U_Cam_Y_angle)
sin_y = Sin(U_Cam_Y_angle)
cam_y = Sin(U_Cam_Y_angle) * view_radius
cam_x = (sin_x - (1 - cos_y) * sin_x) * view_radius
cam_z = (cos_x - (1 - cos_y) * cos_x) * view_radius
Glu.gluLookAt(cam_x + U_look_point_x, cam_y + U_look_point_y, cam_z + U_look_point_z, _
U_look_point_x, U_look_point_y, U_look_point_z, 0.0F, 1.0F, 0.0F)
eyeX = cam_x + U_look_point_x
eyeY = cam_y + U_look_point_y
eyeZ = cam_z + U_look_point_z
End Sub
When the resize event of the window is called, the objects are moved out of the viewport / screen.
The link below is a video to show what happening is:
https://drive.google.com/file/d/1dBnOqBDUBNCQrwr7ChFlpS8vbBQ6wfKh/view?usp=sharing
I just found out that it just happens whin using QT Windowing. It did not happend with GLFW... wooow
I use the following code:
void Renderer::resize(int width, int height) {
RendererSettings* settings = RendererSettings::getInstance();
settings->setSize(width, height);
glViewport(0, 0, width, height);
if (camera != nullptr)
{
float aspectRatio = float(width) / float(height);
camera->updateProjectionPerspectiveAspect(aspectRatio);
}
}
I do not change the camera anymore.
The updateProjectionPerspectiveAspect is the same of glFrustum(FoV, aspect, near, far). but the data others parameters are kept the same.
void Camera::setProjectionPerspective(float fieldOfView, float aspectRatio, float near, float far) {
this->fieldOfView = fieldOfView;
this->aspectRatio = aspectRatio;
this->nearFrustum = near;
this->farFrustum = far;
float xmin, xmax, ymin, ymax; // Dimensions of near clipping plane
float xFmin, xFmax, yFmin, yFmax; // Dimensions of far clipping plane
// Do the Math for the near clipping plane
ymax = near * tanf(float(fieldOfView * PI_DIV_360));
ymin = -ymax;
xmin = ymin * aspectRatio;
xmax = -xmin;
// Construct the projection matrix
projectionMatrix = Mat4f::identity();
projectionMatrix[0] = (2.0f * near) / (xmax - xmin);
projectionMatrix[5] = (2.0f * near) / (ymax - ymin);
projectionMatrix[8] = (xmax + xmin) / (xmax - xmin);
projectionMatrix[9] = (ymax + ymin) / (ymax - ymin);
projectionMatrix[10] = -((far + near) / (far - near));
projectionMatrix[11] = -1.0f;
projectionMatrix[14] = -((2.0f * far * near) / (far - near));
projectionMatrix[15] = 0.0f; }
Camera parameter is not null and this event "resize" is called some times during the resizing. The parameters width and height are corrects.
I think your projection Matrix is wrong, mainly because you don't use the variable aspectRatio at all, but the way you do it it looks correct..? (So it's just me guessing :P)
Here is how i did my projection Matrix in C using an aspect ratio argument, maybe this helps
mat4 set_perspective_matrix(GLfloat fov, GLfloat aspect, GLfloat nearPlane, GLfloat farPlane)
{
mat4 p;
GLfloat f = 1.0/ tan(fov * 3.1415926/360.0);
GLfloat c1 = -(farPlane + nearPlane) / (farPlane - nearPlane);
GLfloat c2 = -(2.0 * farPlane * nearPlane) / (farPlane - nearPlane);
p._[0] = f/aspect;
p._[1] = 0.0;
p._[2] = 0.0;
p._[3] = 0.0;
p._[4] = 0.0;
p._[5] = f;
p._[6] = 0.0;
p._[7] = 0.0;
p._[8] = 0.0;
p._[9] = 0.0;
p._[10] = c1;
p._[11] = c2;
p._[12] = 0.0;
p._[13] = 0.0;
p._[14] =-1.0;
p._[15] = 0.0;
return p;
}
Here is a good article describing the setup of a projection matrix: The Perspective Matrix
The problem was on QT Windowing. It was solved using the following code to resize:
void QtOpenGLRenderer::resizeEvent(QResizeEvent* event) {
QSize size = event->size();
if (event->oldSize().isEmpty())
{
initialScreenSize = size;
return;
}
size = parentWidget->size();
float deltaX = size.width() - initialScreenSize.width();
float deltaY = size.height() - initialScreenSize.height();
renderer->resize(size.width() - deltaX, size.height() - deltaY); }
can anyone tell me how to use
GL11.glreadpixels()
in lwjgl to get the z depth of the ray casted by mouse?
i can get x,y and of the view before transforming it to ray
float x = Mouse.getX();
float y = Mouse.getY();
but i don't know how to use glreadpixels
as when i use it it doesn't give any significance
both calculateMousePoint and calculate MouseRay give the same result
public static float getZDepth(int x, int y)
{
ByteBuffer zdepth = allocBytes(SIZE_FLOAT);
GL11.glReadPixels(x, y, 1, 1, GL11.GL_DEPTH_COMPONENT, GL11.GL_FLOAT, zdepth);
return ( (float) (zdepth.getFloat(0)));
}
private Vector3f calculateMouseRay() {
float mouseX = Mouse.getX();
float mouseY = Mouse.getY();
Vector2f normalizedCoords = getNormalisedDeviceCoordinates(mouseX, mouseY);
Vector4f clipCoords = new Vector4f(normalizedCoords.x, normalizedCoords.y, -1.0f, 1.0f);
Vector4f eyeCoords = toEyeCoords(clipCoords);
Vector3f worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
private Vector3f calculateMousePoint() {
float mouseX = Mouse.getX();
float mouseY = Mouse.getY();
float mouseZ = getZDepth((int)mouseX,(int) mouseY);
Vector2f normalizedCoords = getNormalisedDeviceCoordinates(mouseX, mouseY);
Vector4f clipCoords = new Vector4f(normalizedCoords.x, normalizedCoords.y, mouseZ, 1.0f);
Vector4f eyeCoords = toEyeCoords2(clipCoords);
Vector3f worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
private Vector3f toWorldCoords(Vector4f eyeCoords) {
Matrix4f invertedView = Matrix4f.invert(viewMatrix, null);
Vector4f rayWorld = Matrix4f.transform(invertedView, eyeCoords, null);
Vector3f mouseRay = new Vector3f(rayWorld.x, rayWorld.y, rayWorld.z);
mouseRay.normalise();
return mouseRay;
}
private Vector4f toEyeCoords(Vector4f clipCoords) {
Matrix4f invertedProjection = Matrix4f.invert(projectionMatrix, null);
Vector4f eyeCoords = Matrix4f.transform(invertedProjection, clipCoords, null);
return new Vector4f(eyeCoords.x, eyeCoords.y, -1f, 0f);
}
private Vector4f toEyeCoords2(Vector4f clipCoords) {
Matrix4f invertedProjection = Matrix4f.invert(projectionMatrix, null);
Vector4f eyeCoords = Matrix4f.transform(invertedProjection, clipCoords, null);
return new Vector4f(eyeCoords.x, eyeCoords.y, eyeCoords.z, 0f);
}
private Vector2f getNormalisedDeviceCoordinates(float mouseX, float mouseY) {
float x = (2.0f * mouseX) / Display.getWidth() - 1f;
float y = (2.0f * mouseY) / Display.getHeight() - 1f;
return new Vector2f(x, y);
}
From the documentation, x,y and _width, height stand for the area to shoot. type is the type of data, then data is the result.
Last, the most important here is the format parameter : You can select what you want to retreive. For you, it will be GL_DEPTH_COMPONENT :
float zmouse;
GL11.glReadnPixels(xmouse, ymouse, 1, 1, GL11.GL_DEPTH_COMPONENT, GL11.GL_FLOAT, zmouse)
You have the Z depth, now you have to convert it into the good space. Actually, it's the clip space and I think you would get the camera space one. So you have to multiply the "mouse" point by the invert of projection and view matrix, something like realPoint = inverse(projection * view * model) * (xmouse, ymousen, zmouse).
Finally, realPoint is the point in the 3D space.
With your code as example, this should do the work :
public static float getZDepth(int x, int y)
{
ByteBuffer zdepth = allocBytes(SIZE_FLOAT);
GL11.glReadPixels(x, y, 1, 1, GL11.GL_DEPTH_COMPONENT, GL11.GL_FLOAT, zdepth);
return ( (float) (zdepth.getFloat(0)));
}
private Vector3f calculateMousePoint(Vector3f point) {
float x = Mouse.getX();
float y = Mouse.getY();
float z = getZDepth((int)mouseX,(int) mouseY);
return project(new Vector3f(x,y,z), new Vector4f(0,0,Display.getWidth(), Display.getHeight()));
}
private Vector3f calculateFarPoint(Vector3f point) {
float x = Mouse.getX();
float y = Mouse.getY();
return project(new Vector3f(x,y,1.), new Vector4f(0,0,Display.getWidth(), Display.getHeight()));
}
// Code translated from GLM_GTC_matrix_transform
//(https://glm.g-truc.net/0.9.2/api/a00245.html#gac38d611231b15799a0c06c54ff1ede43)
private Vector3f project(Vector3f point, Vector4f viewport)
{
Matrix4f Inverse = Matrix4f.invert(projectionMatrix * viewMatrix)
Vector4f tmp = new Vector4f(point.x, point.y, point.z, 1.f);
tmp.x = (tmp.x - viewport.x) / viewport.z;
tmp.y = (tmp.y - viewport.y) / viewport.w;
tmp = tmp * 2.0 - 1.0;
Vector4f obj = Inverse * tmp;
obj /= obj.w;
return new Vector3f(obj.x, obj.y, obj.z);
}
I am trying to map a texture to a circle using GL_POLYGON using this code:
void drawCircleOutline(Circle c, int textureindex)
{
float angle, radian, x, y; // values needed by drawCircleOutline
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureLib[textureindex]);
glBegin(GL_POLYGON);
for (angle=0.0; angle<360.0; angle+=2.0)
{
radian = angle * (pi/180.0f);
x = (float)cos(radian) * c.r + c.pos.x;
y = (float)sin(radian) * c.r + c.pos.y;
glTexCoord2f(x, y);
glVertex2f(x, y);
}
glEnd();
glDisable(GL_TEXTURE_2D);
}
it looks like this when running.
And should look like this:
Try:
radian = angle * (pi/180.0f);
xcos = (float)cos(radian);
ysin = (float)sin(radian);
x = xcos * c.r + c.pos.x;
y = ysin * c.r + c.pos.y;
tx = xcos * 0.5 + 0.5;
ty = ysin * 0.5 + 0.5;
glTexCoord2f(tx, ty);
glVertex2f(x, y);