can anyone tell me how to use
GL11.glreadpixels()
in lwjgl to get the z depth of the ray casted by mouse?
i can get x,y and of the view before transforming it to ray
float x = Mouse.getX();
float y = Mouse.getY();
but i don't know how to use glreadpixels
as when i use it it doesn't give any significance
both calculateMousePoint and calculate MouseRay give the same result
public static float getZDepth(int x, int y)
{
ByteBuffer zdepth = allocBytes(SIZE_FLOAT);
GL11.glReadPixels(x, y, 1, 1, GL11.GL_DEPTH_COMPONENT, GL11.GL_FLOAT, zdepth);
return ( (float) (zdepth.getFloat(0)));
}
private Vector3f calculateMouseRay() {
float mouseX = Mouse.getX();
float mouseY = Mouse.getY();
Vector2f normalizedCoords = getNormalisedDeviceCoordinates(mouseX, mouseY);
Vector4f clipCoords = new Vector4f(normalizedCoords.x, normalizedCoords.y, -1.0f, 1.0f);
Vector4f eyeCoords = toEyeCoords(clipCoords);
Vector3f worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
private Vector3f calculateMousePoint() {
float mouseX = Mouse.getX();
float mouseY = Mouse.getY();
float mouseZ = getZDepth((int)mouseX,(int) mouseY);
Vector2f normalizedCoords = getNormalisedDeviceCoordinates(mouseX, mouseY);
Vector4f clipCoords = new Vector4f(normalizedCoords.x, normalizedCoords.y, mouseZ, 1.0f);
Vector4f eyeCoords = toEyeCoords2(clipCoords);
Vector3f worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
private Vector3f toWorldCoords(Vector4f eyeCoords) {
Matrix4f invertedView = Matrix4f.invert(viewMatrix, null);
Vector4f rayWorld = Matrix4f.transform(invertedView, eyeCoords, null);
Vector3f mouseRay = new Vector3f(rayWorld.x, rayWorld.y, rayWorld.z);
mouseRay.normalise();
return mouseRay;
}
private Vector4f toEyeCoords(Vector4f clipCoords) {
Matrix4f invertedProjection = Matrix4f.invert(projectionMatrix, null);
Vector4f eyeCoords = Matrix4f.transform(invertedProjection, clipCoords, null);
return new Vector4f(eyeCoords.x, eyeCoords.y, -1f, 0f);
}
private Vector4f toEyeCoords2(Vector4f clipCoords) {
Matrix4f invertedProjection = Matrix4f.invert(projectionMatrix, null);
Vector4f eyeCoords = Matrix4f.transform(invertedProjection, clipCoords, null);
return new Vector4f(eyeCoords.x, eyeCoords.y, eyeCoords.z, 0f);
}
private Vector2f getNormalisedDeviceCoordinates(float mouseX, float mouseY) {
float x = (2.0f * mouseX) / Display.getWidth() - 1f;
float y = (2.0f * mouseY) / Display.getHeight() - 1f;
return new Vector2f(x, y);
}
From the documentation, x,y and _width, height stand for the area to shoot. type is the type of data, then data is the result.
Last, the most important here is the format parameter : You can select what you want to retreive. For you, it will be GL_DEPTH_COMPONENT :
float zmouse;
GL11.glReadnPixels(xmouse, ymouse, 1, 1, GL11.GL_DEPTH_COMPONENT, GL11.GL_FLOAT, zmouse)
You have the Z depth, now you have to convert it into the good space. Actually, it's the clip space and I think you would get the camera space one. So you have to multiply the "mouse" point by the invert of projection and view matrix, something like realPoint = inverse(projection * view * model) * (xmouse, ymousen, zmouse).
Finally, realPoint is the point in the 3D space.
With your code as example, this should do the work :
public static float getZDepth(int x, int y)
{
ByteBuffer zdepth = allocBytes(SIZE_FLOAT);
GL11.glReadPixels(x, y, 1, 1, GL11.GL_DEPTH_COMPONENT, GL11.GL_FLOAT, zdepth);
return ( (float) (zdepth.getFloat(0)));
}
private Vector3f calculateMousePoint(Vector3f point) {
float x = Mouse.getX();
float y = Mouse.getY();
float z = getZDepth((int)mouseX,(int) mouseY);
return project(new Vector3f(x,y,z), new Vector4f(0,0,Display.getWidth(), Display.getHeight()));
}
private Vector3f calculateFarPoint(Vector3f point) {
float x = Mouse.getX();
float y = Mouse.getY();
return project(new Vector3f(x,y,1.), new Vector4f(0,0,Display.getWidth(), Display.getHeight()));
}
// Code translated from GLM_GTC_matrix_transform
//(https://glm.g-truc.net/0.9.2/api/a00245.html#gac38d611231b15799a0c06c54ff1ede43)
private Vector3f project(Vector3f point, Vector4f viewport)
{
Matrix4f Inverse = Matrix4f.invert(projectionMatrix * viewMatrix)
Vector4f tmp = new Vector4f(point.x, point.y, point.z, 1.f);
tmp.x = (tmp.x - viewport.x) / viewport.z;
tmp.y = (tmp.y - viewport.y) / viewport.w;
tmp = tmp * 2.0 - 1.0;
Vector4f obj = Inverse * tmp;
obj /= obj.w;
return new Vector3f(obj.x, obj.y, obj.z);
}
Related
I'm currently working on my own small game engine (I am learning OpenGL). I made camera mechanism that can rotate vectors with Euler angles, and now I'm working on rotations with quaternions. Now I'm stuck because my quaternions rotations behave very strangely (flipping objects, not rotating camera as it should). Please, help me find out what is wrong with my algorithm. Could you suggest some fixes to my camera code? Below is my source code, and here is some marks to it: target I want to look at is at coordinates (0.0f, 0.0f, 0.0f). I want my position from which I am look at the target to be glm::vec3 position = glm::vec3(0.0f, 0.0f, 3.0f)
My camera class:
class Camera {
private:
bool eulerMode = false;
float m_mouseSensitivity;
float m_velocity;
glm::vec3 m_rightAxis{};
glm::vec3 m_upAxis;
glm::vec3 m_position;
glm::vec3 m_target{};
glm::vec3 m_r{};
glm::vec3 m_direction{};
static glm::vec3 rotateVector(float angle, glm::vec3 rotationAxis, glm::vec3 vectorToRotate);
static glm::quat quaternion(float angle, glm::vec3 vec);
public:
static Camera *s_context;
float m_yaw;
float m_pitch;
float m_mouseLastX;
float m_mouseLastY;
bool m_firstMouse;
glm::vec3 m_frontAxis;
Camera(float speed,
int width,
int height,
glm::vec3 position = glm::vec3(0.0f, 0.0f, 3.0f),
glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f),
glm::vec3 target = glm::vec3(0.0f, 0.0f, 0.0f)
);
glm::mat4 getLookAtMatrix();
static void quaternionRotate(GLFWwindow *window, double x, double y);
};
Camera::Camera(
float speed,
int width,
int height,
glm::vec3 position,
glm::vec3 up,
glm::vec3 target
)
: m_pitch(0.0f), m_yaw(-90.0f), m_mouseLastX((float) width / 2),
m_mouseLastY((float) height / 2), m_mouseSensitivity(0.1f), m_upAxis(up), m_position(position),
m_frontAxis(glm::vec3(0.0f, 0.0f, -1.0f)), m_firstMouse(true) {
m_velocity = speed;
m_direction = glm::normalize(position - target);
m_rightAxis = glm::normalize(glm::cross(up, m_direction));
m_upAxis = glm::cross(m_direction, m_rightAxis);
s_context = this;
glfwSetWindowUserPointer(g_Window->getOpenGLWindow(), this);
if (eulerMode) {
glfwSetCursorPosCallback(g_Window->getOpenGLWindow(), eulerRotate);
} else {
glfwSetCursorPosCallback(g_Window->getOpenGLWindow(), quaternionRotate);
}
}
glm::mat4 Camera::getLookAtMatrix() {
glm::mat4 view = glm::lookAt(m_position, m_r, m_upAxis);
return view;
}
// for the sake of brevity, I skipped some class methods that are unnecessary for quaternions rotations
void Camera::quaternionRotate(GLFWwindow *window, double x, double y) {
if (s_context->m_firstMouse) {
s_context->m_mouseLastX = (float) x;
s_context->m_mouseLastY = (float) y;
s_context->m_firstMouse = false;
}
auto xoffset = (float) (x - s_context->m_mouseLastX);
auto yoffset = (float) (s_context->m_mouseLastY - y);
s_context->m_mouseLastX = (float) x;
s_context->m_mouseLastY = (float) y;
float sensitivity = 0.1f;
xoffset *= sensitivity;
yoffset *= sensitivity;
s_context->m_yaw += xoffset;
s_context->m_pitch += yoffset;
glm::vec3 yAxis = glm::vec3(0, 1, 0);
// Rotate the view vector by the horizontal angle around the vertical axis
glm::vec3 view = s_context->m_direction;
view = glm::normalize(rotateVector(s_context->m_yaw, yAxis, view));
// Rotate the view vector by the vertical angle around the horizontal axis
glm::vec3 xAxis = glm::normalize(glm::cross(yAxis, view));
view = glm::normalize(rotateVector(s_context->m_pitch, xAxis, view));
s_context->m_r = view;
s_context->m_upAxis = glm::normalize(glm::cross(s_context->m_r, xAxis));
}
glm::vec3 Camera::rotateVector(float angle, const glm::vec3 rotationAxis, const glm::vec3 vectorToRotate) {
glm::quat rotationQ = quaternion(angle, rotationAxis);
glm::quat conjugateQ = glm::conjugate(rotationQ);
glm::quat result = rotationQ * vectorToRotate * conjugateQ;
return {result.x, result.y, result.z};
}
glm::quat Camera::quaternion(float angle, const glm::vec3 vec) {
float HalfAngleInRadians = glm::radians(angle / 2);
float SineHalfAngle = sinf(HalfAngleInRadians);
float CosHalfAngle = cosf(HalfAngleInRadians);
float xC = vec.x * SineHalfAngle;
float yC = vec.y * SineHalfAngle;
float zC = vec.z * SineHalfAngle;
float wC = CosHalfAngle;
return {wC, xC, yC, zC};
}
i need to implement arcball camera. I got something similar, but it works very crookedly (the angle changes sharply, when turning to the right / left, the camera raises up / down strongly).
Here is my source code, can you tell me where I went wrong:
bool get_arcball_vec(double x, double y, glm::vec3& a)
{
glm::vec3 vec = glm::vec3((2.0 * x) / window.getWidth() - 1.0, 1.0 - (2.0 * y) / window.getHeight(), 0.0);
if (glm::length(vec) >= 1.0)
{
vec = glm::normalize(vec);
}
else
{
vec.z = sqrt(1.0 - pow(vec.x, 2.0) - pow(vec.y, 2.0));
}
a = vec;
return true;
}
...
void onMouseMove(double x, double y) {
if (rightMouseButtonPressed) {
glm::vec3 a,b;
cur_mx = x;
cur_my = y;
if (cur_mx != last_mx || cur_my != last_my)
if (get_arcball_vec(last_mx, last_my, a) && get_arcball_vec(cur_mx, cur_my, b))
viewport.getCamera().orbit(a,b);
last_mx = cur_mx;
last_my = cur_my;
...
void Camera::orbit(glm::vec3 a, glm::vec3 b)
{
forward = calcForward();
right = calcRight();
double alpha = acos(glm::min(1.0f, glm::dot(b, a)));
glm::vec3 axis = glm::cross(a, b);
glm::mat4 rotationComponent = glm::mat4(1.0f);
rotationComponent[0] = glm::vec4(right, 0.0f);
rotationComponent[1] = glm::vec4(up, 0.0f);
rotationComponent[2] = glm::vec4(forward, 0.0f);
glm::mat4 toWorldCameraSpace = glm::transpose(rotationComponent);
axis = toWorldCameraSpace * glm::vec4(axis, 1.0);
glm::mat4 orbitMatrix = glm::rotate(glm::mat4(1.0f), (float)alpha, axis);
eye = glm::vec4(target, 1.0) + orbitMatrix * glm::vec4(eye - target, 1.0f);
up = orbitMatrix * glm::vec4(up, 1.0f);
}
I use this code to map 2D mouse position to the sphere:
Vector3 GetArcBallVector(const Vector2f & mousePos) {
float radiusSquared = 1.0; //squared radius of the sphere
//compute mouse position from the centre of screen to interval [-half, +half]
Vector3 pt = Vector3(
mousePos.x - halfScreenW,
halfScreenH - mousePos.y,
0.0f
);
//if length squared is smaller than sphere diameter
//point is inside
float lengthSqr = pt.x * pt.x + pt.y * pt.y;
if (lengthSqr < radiusSquared){
//inside
pt.z = std::sqrtf(radiusSquared - lengthSqr);
}
else {
pt.z = 0.0f;
}
pt.z *= -1;
return pt;
}
To calculate rotation, I use the last (startPt) and current (endPt) mapped position and do:
Quaternion actRot = Quaternion::Identity();
Vector3 axis = Vector3::Cross(endPt, startPt);
if (axis.LengthSquared() > MathUtils::EPSILON) {
float angleCos = Vector3::Dot(endPt, startPt);
actRot = Quaternion(axis.x, axis.y, axis.z, angleCos);
}
I prefer to use Quaternions over matrices since they are easy to multiply (for acumulated rotation) and interpolate (for some smooting).
I am facing problems trying to make 3d objects clickable by mouse. For intersection checking I use ray casting. Code I found, ported for my solution:
Exactly picking
bool RaySphereIntersect(Vector3, Vector3, float);
bool TestIntersection(Matrix projectionMatrix, Matrix viewMatrix, Matrix worldMatrix, Vector3 origin, float radius, int m_screenWidth, int m_screenHeight, int mouseX, int mouseY)
{
float pointX, pointY;
Matrix inverseViewMatrix, translateMatrix, inverseWorldMatrix;
Vector3 direction, rayOrigin, rayDirection;
bool intersect, result;
// Move the mouse cursor coordinates into the -1 to +1 range.
pointX = ((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f) * -1.0f;
// Adjust the points using the projection matrix to account for the aspect ratio of the viewport.
pointX = pointX / projectionMatrix._11;
pointY = pointY / projectionMatrix._22;
// Get the inverse of the view matrix.
inverseViewMatrix=XMMatrixInverse(NULL, viewMatrix);
// Calculate the direction of the picking ray in view space.
direction.x = (pointX * inverseViewMatrix._11) + (pointY * inverseViewMatrix._21) + inverseViewMatrix._31;
direction.y = (pointX * inverseViewMatrix._12) + (pointY * inverseViewMatrix._22) + inverseViewMatrix._32;
direction.z = (pointX * inverseViewMatrix._13) + (pointY * inverseViewMatrix._23) + inverseViewMatrix._33;
// Get the origin of the picking ray which is the position of the camera.
// Get the world matrix and translate to the location of the sphere.
// Now get the inverse of the translated world matrix.
inverseWorldMatrix= XMMatrixInverse(NULL, worldMatrix);
// Now transform the ray origin and the ray direction from view space to world space.
rayOrigin=XMVector3TransformCoord(origin, inverseWorldMatrix);
rayDirection=XMVector3TransformNormal(direction, inverseWorldMatrix);
// Normalize the ray direction.
rayDirection=XMVector3Normalize(rayDirection);
// Now perform the ray-sphere intersection test.
intersect = RaySphereIntersect(rayOrigin, rayDirection, radius);
if (intersect == true)
return true;
else
return false;
}
bool RaySphereIntersect(Vector3 rayOrigin, Vector3 rayDirection, float radius)
{
float a, b, c, discriminant;
// Calculate the a, b, and c coefficients.
a = (rayDirection.x * rayDirection.x) + (rayDirection.y * rayDirection.y) + (rayDirection.z * rayDirection.z);
b = ((rayDirection.x * rayOrigin.x) + (rayDirection.y * rayOrigin.y) + (rayDirection.z * rayOrigin.z)) * 2.0f;
c = ((rayOrigin.x * rayOrigin.x) + (rayOrigin.y * rayOrigin.y) + (rayOrigin.z * rayOrigin.z)) - (radius * radius);
// Find the discriminant.
discriminant = (b * b) - (4 * a * c);
// if discriminant is negative the picking ray missed the sphere, otherwise it intersected the sphere.
if (discriminant < 0.0f)
return false;
else
return true;
}
How do I create sphere
D3DSphere(float x, float y, float z, float radius, float r, float g, float b, float a)
{
this->x = x;
this->y = y;
this->z = z;
this->radius = radius;
this->shape = GeometricPrimitive::CreateSphere(radius*2.0f);
this->world = Matrix::Identity;
this->world = XMMatrixMultiply(this->world, Matrix::CreateTranslation(x, y, z));
this->index = vsphere;
d3dsphere[vsphere] = this;
vsphere++;
}
How do I call raycaster
void Game::LButtonUp(int x, int y)
{
Vector3 eye(camx, camy, camz);
Vector3 at(Vector3::Zero);
m_view = Matrix::CreateLookAt(eye, at, Vector3::UnitY);
for (int i = 0; i < vsphere; i++)
{
if (TestIntersection(m_projection, m_view, d3dsphere[i]->world, eye, d3dsphere[i]->radius, 800, 600, x, y))
{
MessageBoxW(NULL, L"LOL", L"It works", MB_OK);
break;
}
}
}
Nothing happens by clicking, but if I rotate camera, perpendicularly to XOY, sometimes, clicking near the sphere, message box appears.
Update
MessageBox appears independently on camera angle, and it seems, that it detects intersection correctly, but mirrored, relatively to the window center. For example, if sphere is at (0, window.bottom-20) point then I will get MessageBox if I click at (0, 20) point.
What if calculation of the direction of the picking ray is wrong, if it was wrote for left-handed system, and I use right-handed?
Probably, because of the right-handed system, that is used by default in DirectX Tool Kit the next section from caster
pointX = ((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f) * -1.0f;
Should be changed to
pointX = (((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f) * -1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f);
Important
That code also will work wrong because of depth independence, i.e. you may select sphere that is situated behind the sphere you clicking. For solve that I changed the code:
float distance3(float x1, float y1, float z1, float x2, float y2, float z2)
{
float dx=x1-x2;
float dy=y1-y2;
float dz=z1-z2;
return sqrt(dx*dx+dy*dy+dz*dz);
}
void Game::LButtonUp(int x, int y)
{
Vector3 eye(camx, camy, camz);
Vector3 at(Vector3::Zero);
m_view = Matrix::CreateLookAt(eye, at, Vector3::UnitY);
int last_index=-1;
float last_distance=99999.0f;//set the obviously highest value, may happen in your scene
for (int i = 0; i < vsphere; i++)
{
if (TestIntersection(m_projection, m_view, d3dsphere[i]->world, eye, d3dsphere[i]->radius, 800, 600, x, y))
{
float distance=distance3(camx,camy,camz, d3dsphere[i]->x, d3dsphere[i]->y, d3dsphere[i]->z);
if(distance<last_distance)
{
last_distance=distance;
last_index=i;
}
}
}
d3dsphere[last_index];//picked sphere
}
My problem is a little bit extense. I'm trying to print a sprite using DirectX 11 and to handle the scale and rotations I use a transformation matrix. I extracted this code from SFML, an open-source library, and for the translation works fine. However, the rotation and scale aren't working as expected as I show next.
When I rotate across the center all is correct.
When I move the sprite and then I rotate, the sprite rotates around an "unknown" point.
The same happens when I scale the sprite, it scales himself from an "unknown" point.
If I rotate the sprite, then the translation axis is the same but with the sprite rotated, so I can't move it properly.
I leave you here a little video of exactly what's going on. [VIDEO]
I think the problem is related to how I transform the sprite, but I can't assure it. I leave you here the code parts I think are involved in this error but you also can take a look into the GitHub project to take a deep look. [GITHUB PROJECT]
Transformable.cpp
const Transform& Transformable::GetTransform() const
{
if (_transformNeedUpdate)
{
float angle = -_rotation * 3.141592654f / 180.f;
float cosine = static_cast<float>(std::cos(angle));
float sine = static_cast<float>(std::sin(angle));
float sxc = _scale.x * cosine;
float syc = _scale.y * cosine;
float sxs = _scale.x * sine;
float sys = _scale.y * sine;
float tx = -_origin.x * sxc - _origin.y * sys + _position.x;
float ty = _origin.x * sxs - _origin.y * syc + _position.y;
_transform = Transform(sxc, sys, tx,
-sxs, syc, ty,
0.f, 0.f, 1.f);
_transformNeedUpdate = false;
}
return _transform;
}
Transform.cpp
Transform::Transform(
float a00, float a01, float a02,
float a10, float a11, float a12,
float a20, float a21, float a22
)
{
_matrix[0] = a00; _matrix[4] = a01; _matrix[8] = 0.f; _matrix[12] = a02;
_matrix[1] = a10; _matrix[5] = a11; _matrix[9] = 0.f; _matrix[13] = a12;
_matrix[2] = 0.f; _matrix[6] = 0.f; _matrix[10] = 1.f; _matrix[14] = 0.f;
_matrix[3] = a20; _matrix[7] = a21; _matrix[11] = 0.f; _matrix[15] = a22;
}
D3DXVECTOR2 Transform::TransformPoint(float x, float y) const
{
return D3DXVECTOR2(_matrix[0] * x + _matrix[4] * y + _matrix[12],
_matrix[1] * x + _matrix[5] * y + _matrix[13]);
}
D3DXVECTOR2 operator *(const Transform& left, const D3DXVECTOR2& right)
{
return left.TransformPoint(right);
}
Bitmap.cpp (Where I setup the vertex to be drawn)
HRESULT Bitmap::UpdateBuffers(ID3D11DeviceContext* deviceContext)
{
if (_transform == _previousTransform && _bounds == _previousBounds)
{
return S_OK;
}
VertexType* vertices;
D3D11_MAPPED_SUBRESOURCE mappedResource;
VertexType* verticesPtr;
HRESULT result;
_previousTransform = _transform;
_previousBounds = _bounds;
vertices = new VertexType[_vertexCount];
if (!vertices)
{
return CO_E_ERRORINAPP;
}
float left = _bounds.left();
float right = left + _bounds.width();
float top = _bounds.top();
float bottom = top + _bounds.height();
D3DXVECTOR2 topLeft = { left, top };
D3DXVECTOR2 bottomRight = { right, bottom };
D3DXVECTOR2 topRight = { right, top };
D3DXVECTOR2 bottomLeft = { left, bottom };
topLeft = _transform * topLeft;
bottomRight = _transform * bottomRight;
topRight = _transform * topRight;
bottomLeft = _transform * bottomLeft;
vertices[0].position = D3DXVECTOR3(topLeft.x, topLeft.y, 0.0f);
vertices[0].texture = D3DXVECTOR2(0.0f, 0.0f);
vertices[1].position = D3DXVECTOR3(bottomRight.x, bottomRight.y, 0.0f);
vertices[1].texture = D3DXVECTOR2(1.0f, 1.0f);
vertices[2].position = D3DXVECTOR3(bottomLeft.x, bottomLeft.y, 0.0f);
vertices[2].texture = D3DXVECTOR2(0.0f, 1.0f);
vertices[3].position = D3DXVECTOR3(topLeft.x, topLeft.y, 0.0f);
vertices[3].texture = D3DXVECTOR2(0.0f, 0.0f);
vertices[4].position = D3DXVECTOR3(topRight.x, topRight.y, 0.0f);
vertices[4].texture = D3DXVECTOR2(1.0f, 0.0f);
vertices[5].position = D3DXVECTOR3(bottomRight.x, bottomRight.y, 0.0f);
vertices[5].texture = D3DXVECTOR2(1.0f, 1.0f);
result = deviceContext->Map(_vertexBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (FAILED(result))
{
return CO_E_ERRORINAPP;
}
verticesPtr = (VertexType*)mappedResource.pData;
memcpy(verticesPtr, (void*)vertices, (sizeof(VertexType) * _vertexCount));
deviceContext->Unmap(_vertexBuffer, 0);
delete[] vertices;
vertices = 0;
return S_OK;
}
The Coordinate System is like the shown in the image below; the center of the screen is the (0,0) position.
The code flow
When the position, scale, rotation or origin of the actor changes, a new Transform is generated. Then, I transform the local bounds of the actor with the generated transform to get the correct positions of the vertex, and I set them into the vertex buffer.
Using Monotouch and OpenTK I am trying to get the screen coordinate of one 3D point. I have my world view projection matrix set up, and OpenGL makes sense of it and projects my 3D model perfectly, but how to use the same matrix to project just one point from 2D to 3D?
I thought I could simply use:
Vector3.Transform(ref input3Dpos, ref matWorldViewProjection, out projected2Dpos);
Then have the projected screen coordinate in projected2DPos. But the resulting Vector4 does not seem to represent the proper projected screen coordinate. And I do not know how to calculate it from there on.
I found I need to divide by Vector4.w, however I am still getting the wrong values. Using this method now:
private static bool GluProject(OpenTK.Vector3 objPos, OpenTK.Matrix4 matWorldViewProjection, int[] viewport, out OpenTK.Vector3 screenPos)
{
OpenTK.Vector4 _in;
_in.X = objPos.X;
_in.Y = objPos.Y;
_in.Z = objPos.Z;
_in.W = 1f;
Vector4 _out = OpenTK.Vector4.Transform(_in, matWorldViewProjection);
if (_out.W <= 0.0)
{
screenPos = OpenTK.Vector3.Zero;
return false;
}
_out.X /= _out.W;
_out.Y /= _out.W;
_out.Z /= _out.W;
/* Map x, y and z to range 0-1 */
_out.X = _out.X * 0.5f + 0.5f;
_out.Y = -_out.Y * 0.5f + 0.5f;
_out.Z = _out.Z * 0.5f + 0.5f;
/* Map x,y to viewport */
_out.X = _out.X * viewport[2] + viewport[0];
_out.Y = _out.Y * viewport[3] + viewport[1];
screenPos.X = _out.X;
screenPos.Y = _out.Y;
screenPos.Z = _out.Z;
return true;
}
I cannot see any errors though... :S
In the first question you're missing the last step: Mapping from NDC (Normalized Device Coordinates) to viewport coordinates. That's what the lines
/* Map x,y to viewport */
_out.X = _out.X * viewport[2] + viewport[0];
_out.Y = _out.Y * viewport[3] + viewport[1];
in your GluProject do,
You have two options. You can calculate it yourself, or use the glProject function. I prefer the first.
Number 1:
private Vector2 Convert(
Vector3 pos,
Matrix4 viewMatrix,
Matrix4 projectionMatrix,
int screenWidth,
int screenHeight)
{
pos = Vector3.Transform(pos, viewMatrix);
pos = Vector3.Transform(pos, projectionMatrix);
pos.X /= pos.Z;
pos.Y /= pos.Z;
pos.X = (pos.X + 1) * screenWidth / 2;
pos.Y = (pos.Y + 1) * screenHeight / 2;
return new Vector2(pos.X, pos.Y);
}
Number 2:
public Vector2 form3Dto2D(Vector3 our3DPoint)
{
Vector3 our2DPoint;
float[] modelviewMatrix = new float[16];
float[] projectionMatrix = new float[16];
int[] viewport = new int[4];
GL.GetFloat(GetPName.ModelviewMatrix, modelviewMatrix);
GL.GetFloat(GetPName.ProjectionMatrix, projectionMatrix);
GL.GetInteger(GetPName.Viewport, viewport);
OpenTK.Graphics.Glu.Project(our3DPoint, convertFloatsToDoubles(modelviewMatrix),
convertFloatsToDoubles(projectionMatrix), viewport, out our2DPoint);
return new Vector2(our2DPoint.X, our2DPoint.Y)
}
public static double[] convertFloatsToDoubles(float[] input)
{
if (input == null)
{
return null; // Or throw an exception - your choice
}
double[] output = new double[input.Length];
for (int i = 0; i < input.Length; i++)
{
output[i] = input[i];
}
return output;
}