I have an Icosahedron subdivided and with LOD. Now i am trying to add a dynamic Material.
The Problem is that i need the Normals for that.
I use Unreal Engine 4, when i use the build in Function to calculate Normals i get strange Artifacts in the Area between different LOD Levels.
So i made my own calculation with this Code:
TArray<FVector> normals;
normals.Init(FVector(-1.f, -1.f, -1.f), geoData.GeoData.Num());
int32 triangleCount = geoData.Triangles.Num() / 3;
for (int32 i = 0; i < triangleCount; i++)
{
int32 normalTriangleIndex = i * 3;
int32 triangleIndexA = geoData.Triangles[normalTriangleIndex];
int32 triangleIndexB = geoData.Triangles[normalTriangleIndex + 1];
int32 triangleIndexC = geoData.Triangles[normalTriangleIndex + 2];
FVector pointA = geoData.GeoData[triangleIndexA];
FVector pointB = geoData.GeoData[triangleIndexB];
FVector pointC = geoData.GeoData[triangleIndexC];
FVector sideAB = pointB - pointA;
FVector sideBC = pointC - pointB;
FVector nNormal;
nNormal = FVector::CrossProduct(sideAB, sideBC);
nNormal = nNormal / nNormal.Size();
nNormal = nNormal.GetSafeNormal();
normals[triangleIndexA] = -nNormal;
normals[triangleIndexB] = -nNormal;
normals[triangleIndexC] = -nNormal;
}
Seems the calculation is correct. when i skip the calculation of morphing in my shader those artifacts disappear...
Maybe the Triangles are to close to each other while morphing them and this causes this artifacts?
This is how morphing looks like. (Source)
I solved it by skipping the first morph value.
Now morphing starts from the 2nd picture as you can see on the screenshot.
float morphFac(float dist, int lev)
{
float low = input_distanceLUT[lev - 1];
float high = input_distanceLUT[lev];
float delta = high - low;
float a = (dist - low) / delta;
float morphRange = 0.5f;
return 1.0f - clamp(a / morphRange, 0.1f, 1.0f);
}
Just a tiny change in the shader. clamp(a / morphRange, 0.1f, 1.0f);
0.1f was earlyer 0.0f
Related
I'm trying to use bullet physics to draw a ray to hit a game object in the scene so I can select it, i am using the camera matrix to draw a ray and then pick an object in space and then look through a list of game objects and look for the same location.
On Mouse press I have the following code, it seems to be off and only picks the items some times:
glm::vec4 lRayStart_NDC(
((float)lastX / (float)RECT_WIDTH - 0.5f) * 2.0f,
((float)lastY / (float)RECT_HEIGHT - 0.5f) * 2.0f,
-1.0,
1.0f
);
glm::vec4 lRayEnd_NDC(
((float)lastX / (float)RECT_WIDTH - 0.5f) * 2.0f,
((float)lastY / (float)RECT_HEIGHT - 0.5f) * 2.0f,
0.0,
1.0f
);
projection = glm::perspective(glm::radians(SceneManagement::getInstance()->MainCamera->GetVOW()), (float)RECT_WIDTH / (float)RECT_HEIGHT, 0.1f, 100.0f);
glm::mat4 InverseProjectionMatrix = glm::inverse(projection);
view = SceneManagement::getInstance()->MainCamera->GetViewMatrix();
glm::mat4 InverseViewMatrix = glm::inverse(view);
glm::vec4 lRayStart_camera = InverseProjectionMatrix * lRayStart_NDC;
lRayStart_camera /= lRayStart_camera.w;
glm::vec4 lRayStart_world = InverseViewMatrix * lRayStart_camera;
lRayStart_world /= lRayStart_world.w;
glm::vec4 lRayEnd_camera = InverseProjectionMatrix * lRayEnd_NDC;
lRayEnd_camera /= lRayEnd_camera.w;
glm::vec4 lRayEnd_world = InverseViewMatrix * lRayEnd_camera;
lRayEnd_world /= lRayEnd_world.w;
glm::vec3 lRayDir_world(lRayEnd_world - lRayStart_world);
lRayDir_world = glm::normalize(lRayDir_world);
glm::vec3 out_end = SceneManagement::getInstance()->MainCamera->GetCamPosition() + SceneManagement::getInstance()->MainCamera->GetCamFront() * 1000.0f;
btCollisionWorld::ClosestRayResultCallback RayCallback(
btVector3(SceneManagement::getInstance()->MainCamera->GetCamPosition().x, SceneManagement::getInstance()->MainCamera->GetCamPosition().y, SceneManagement::getInstance()->MainCamera->GetCamPosition().z),
btVector3(out_end.x, out_end.y, out_end.z)
);
PhysicsManager::getInstance()->dynamicsWorld->rayTest(
btVector3(SceneManagement::getInstance()->MainCamera->GetCamPosition().x, SceneManagement::getInstance()->MainCamera->GetCamPosition().y, SceneManagement::getInstance()->MainCamera->GetCamPosition().z),
btVector3(out_end.x, out_end.y, out_end.z),
RayCallback
);
if (RayCallback.hasHit())
{
btTransform position = RayCallback.m_collisionObject->getInterpolationWorldTransform();
printf("Collision \n");
for (int i = 0; i < SceneManagement::getInstance()->gObjects.size(); i++)
{
if (SceneManagement::getInstance()->gObjects.at(i)->transform.Position.x == position.getOrigin().getX() &&
SceneManagement::getInstance()->gObjects.at(i)->transform.Position.y == position.getOrigin().getY() &&
SceneManagement::getInstance()->gObjects.at(i)->transform.Position.z == position.getOrigin().getZ())
{
int select = i;
SceneManagement::getInstance()->SelectedGameObject = SceneManagement::getInstance()->gObjects.at(select);
SceneManagement::getInstance()->SelectedGameObject->DisplayInspectorUI();
return;
}
}
}
This check
SceneManagement::getInstance()->gObjects.at(i)->transform.Position.x == position.getOrigin().getX() &&
SceneManagement::getInstance()->gObjects.at(i)->transform.Position.y == position.getOrigin().getY() &&
SceneManagement::getInstance()->gObjects.at(i)->transform.Position.z == position.getOrigin().getZ()
fails due to numerical precision issues.
You should check the "equality" of vectors only up to some precision.
Please, use some distance function and the following check instead of your if() condition:
const double EPSILON = 1e-4; // experiment with this value
auto objPos = SceneManagement::getInstance()->gObjects.at(i)->transform.Position;
bool isWithinRange = distance3D(objPos, position.getOrigin()) < EPSILON;
if (isWithinRange)
{
int select = i;
...
}
The distance3D function is simply the euclidean distance in 3D. The glm::distance function should do, just convert both vectors to glm::vec3 format.
I'm trying to implement frustum culling to my voxel engine, basically I'm rendering chunks and I want to cull every chunk that is outside of the frustum of the camera. I tried a lot of different approaches and code that I found on the web, but yet I can't get it to work. The algorithm is in two parts:
• First I'm extracting the frustum planes from the projectionview matrix.
• Then I'm checking for each chunk if it is inside or colliding with the frustum.
The behavior is generally the same: when looking from the origin to positive direction it seems to work, but when looking to the negative direction it doesn't, and when I'm going away from the origin it starts breaking and doing non-sense. Also when looking up and down the culling is weird.
Here is my frustum plane extraction:
public static Plane[] frustumPlanes(Matrix4f mat, boolean normalize)
{
Plane[] p = new Plane[6];
p[0] = normalizePlane(mat.m30 + mat.m00, mat.m31 + mat.m01, mat.m32 + mat.m02, mat.m33 + mat.m03); // left
p[1] = normalizePlane(mat.m30 - mat.m00, mat.m31 - mat.m01, mat.m32 - mat.m02, mat.m33 - mat.m03); // right
p[2] = normalizePlane(mat.m30 - mat.m10, mat.m31 - mat.m11, mat.m32 - mat.m12, mat.m33 - mat.m13); // top
p[3] = normalizePlane(mat.m30 + mat.m10, mat.m31 + mat.m11, mat.m32 + mat.m12, mat.m33 + mat.m13); // bottom
p[4] = normalizePlane(mat.m30 + mat.m20, mat.m31 + mat.m21, mat.m32 + mat.m22, mat.m33 + mat.m23); // near
p[5] = normalizePlane(mat.m30 - mat.m20, mat.m31 - mat.m21, mat.m32 - mat.m22, mat.m33 - mat.m23); // far
return p;
}
public static Plane normalizePlane(float A, float B, float C, float D) {
float nf = 1.0f / (float)Math.sqrt(A * A + B * B + C * C);
return new Plane(new Vector3f(nf * A, nf * B, nf * C), nf * D);
}
mat is the projectionview matrix, here is the projection matrix:
private void createProjectionMatrix() {
float aspectRatio = (float) DisplayManager.WIDTH / (float) DisplayManager.HEIGHT;
float y_scale = (float) ((1f / Math.tan(Math.toRadians(FOV / 2f))));
float x_scale = y_scale / aspectRatio;
float frustum_length = FAR_PLANE - NEAR_PLANE;
projectionMatrix = new Matrix4f();
projectionMatrix.m00 = x_scale;
projectionMatrix.m11 = y_scale;
projectionMatrix.m22 = -((FAR_PLANE + NEAR_PLANE) / frustum_length);
projectionMatrix.m23 = -1;
projectionMatrix.m32 = -((2 * NEAR_PLANE * FAR_PLANE) / frustum_length);
projectionMatrix.m33 = 0;
}
Here is the view matrix:
public static Matrix4f createViewMatrix(Camera camera) {
Matrix4f viewMatrix = new Matrix4f();
viewMatrix.setIdentity();
Matrix4f.rotate((float) Math.toRadians(camera.getRotation().x), new Vector3f(1, 0, 0), viewMatrix, viewMatrix);
Matrix4f.rotate((float) Math.toRadians(camera.getRotation().y), new Vector3f(0, 1, 0), viewMatrix, viewMatrix);
Matrix4f.rotate((float) Math.toRadians(camera.getRotation().z), new Vector3f(0, 0, 1), viewMatrix, viewMatrix);
Vector3f cameraPos = camera.getPosition();
Vector3f negativeCameraPos = new Vector3f(-cameraPos.x,-cameraPos.y,-cameraPos.z);
Matrix4f.translate(negativeCameraPos, viewMatrix, viewMatrix);
return viewMatrix;
}
Here is the collision detection code aabb vs plane:
public static int boxToPlaneCollision(Plane plane, Vector3f[] minMax)
{
int result = 2; //Inside
// planes have unit-length normal, offset = -dot(normal, point on plane)
int nx = plane.normal.x > 0?1:0;
int ny = plane.normal.y > 0?1:0;
int nz = plane.normal.z > 0?1:0;
// getMinMax(): 0 = return min coordinate. 1 = return max.
float dot = (plane.normal.x*minMax[nx].x) + (plane.normal.y*minMax[nx].y) + (plane.normal.z*minMax[nx].z);
if ( dot < -plane.offset )
return 0; //Outside
float dot2 = (plane.normal.x*minMax[1-nx].x) + (plane.normal.y*minMax[1-nx].y) + (plane.normal.z*minMax[1-nx].z);
if ( dot2 <= -plane.offset )
result = 1; //Intersect
return result;
}
And finally here is where everything is called:
public boolean chunkInsideFrustum(Vector3f chunkPos) {
Vector3f chunkPosMax = new Vector3f(chunkPos.x + Terrain.CHUNK_SIZE, Terrain.CHUNK_HEIGHT, chunkPos.z + Terrain.CHUNK_SIZE);
for (int i = 0; i < 6; i++) {
if(Collider.boxToPlaneCollision(frustumPlanes[i], new Vector3f[] {chunkPos,chunkPosMax}) == 0)
return false;
}
return true;
}
I'm using openGL with LWJGL 2 (Java).
My questions are:
Where is the problem? In the frustum plane extraction code? In the collision detection?
and
I saw people calculating the frustum with projection and modelview matrix, what about this technique? is it better?
Thank you very much for your help!
EDIT:
for the second question, I saw here Extracting View Frustum Planes (Gribb & Hartmann method) someone posted that:
The missing part:
comboMatrix = projection_matrix * Matrix4_Transpose(modelview_matrix)
And then he did the exact same algorithm that I did to extract the planes, but what is modelview_matrix? What model should I use?
My problem is a little bit extense. I'm trying to print a sprite using DirectX 11 and to handle the scale and rotations I use a transformation matrix. I extracted this code from SFML, an open-source library, and for the translation works fine. However, the rotation and scale aren't working as expected as I show next.
When I rotate across the center all is correct.
When I move the sprite and then I rotate, the sprite rotates around an "unknown" point.
The same happens when I scale the sprite, it scales himself from an "unknown" point.
If I rotate the sprite, then the translation axis is the same but with the sprite rotated, so I can't move it properly.
I leave you here a little video of exactly what's going on. [VIDEO]
I think the problem is related to how I transform the sprite, but I can't assure it. I leave you here the code parts I think are involved in this error but you also can take a look into the GitHub project to take a deep look. [GITHUB PROJECT]
Transformable.cpp
const Transform& Transformable::GetTransform() const
{
if (_transformNeedUpdate)
{
float angle = -_rotation * 3.141592654f / 180.f;
float cosine = static_cast<float>(std::cos(angle));
float sine = static_cast<float>(std::sin(angle));
float sxc = _scale.x * cosine;
float syc = _scale.y * cosine;
float sxs = _scale.x * sine;
float sys = _scale.y * sine;
float tx = -_origin.x * sxc - _origin.y * sys + _position.x;
float ty = _origin.x * sxs - _origin.y * syc + _position.y;
_transform = Transform(sxc, sys, tx,
-sxs, syc, ty,
0.f, 0.f, 1.f);
_transformNeedUpdate = false;
}
return _transform;
}
Transform.cpp
Transform::Transform(
float a00, float a01, float a02,
float a10, float a11, float a12,
float a20, float a21, float a22
)
{
_matrix[0] = a00; _matrix[4] = a01; _matrix[8] = 0.f; _matrix[12] = a02;
_matrix[1] = a10; _matrix[5] = a11; _matrix[9] = 0.f; _matrix[13] = a12;
_matrix[2] = 0.f; _matrix[6] = 0.f; _matrix[10] = 1.f; _matrix[14] = 0.f;
_matrix[3] = a20; _matrix[7] = a21; _matrix[11] = 0.f; _matrix[15] = a22;
}
D3DXVECTOR2 Transform::TransformPoint(float x, float y) const
{
return D3DXVECTOR2(_matrix[0] * x + _matrix[4] * y + _matrix[12],
_matrix[1] * x + _matrix[5] * y + _matrix[13]);
}
D3DXVECTOR2 operator *(const Transform& left, const D3DXVECTOR2& right)
{
return left.TransformPoint(right);
}
Bitmap.cpp (Where I setup the vertex to be drawn)
HRESULT Bitmap::UpdateBuffers(ID3D11DeviceContext* deviceContext)
{
if (_transform == _previousTransform && _bounds == _previousBounds)
{
return S_OK;
}
VertexType* vertices;
D3D11_MAPPED_SUBRESOURCE mappedResource;
VertexType* verticesPtr;
HRESULT result;
_previousTransform = _transform;
_previousBounds = _bounds;
vertices = new VertexType[_vertexCount];
if (!vertices)
{
return CO_E_ERRORINAPP;
}
float left = _bounds.left();
float right = left + _bounds.width();
float top = _bounds.top();
float bottom = top + _bounds.height();
D3DXVECTOR2 topLeft = { left, top };
D3DXVECTOR2 bottomRight = { right, bottom };
D3DXVECTOR2 topRight = { right, top };
D3DXVECTOR2 bottomLeft = { left, bottom };
topLeft = _transform * topLeft;
bottomRight = _transform * bottomRight;
topRight = _transform * topRight;
bottomLeft = _transform * bottomLeft;
vertices[0].position = D3DXVECTOR3(topLeft.x, topLeft.y, 0.0f);
vertices[0].texture = D3DXVECTOR2(0.0f, 0.0f);
vertices[1].position = D3DXVECTOR3(bottomRight.x, bottomRight.y, 0.0f);
vertices[1].texture = D3DXVECTOR2(1.0f, 1.0f);
vertices[2].position = D3DXVECTOR3(bottomLeft.x, bottomLeft.y, 0.0f);
vertices[2].texture = D3DXVECTOR2(0.0f, 1.0f);
vertices[3].position = D3DXVECTOR3(topLeft.x, topLeft.y, 0.0f);
vertices[3].texture = D3DXVECTOR2(0.0f, 0.0f);
vertices[4].position = D3DXVECTOR3(topRight.x, topRight.y, 0.0f);
vertices[4].texture = D3DXVECTOR2(1.0f, 0.0f);
vertices[5].position = D3DXVECTOR3(bottomRight.x, bottomRight.y, 0.0f);
vertices[5].texture = D3DXVECTOR2(1.0f, 1.0f);
result = deviceContext->Map(_vertexBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (FAILED(result))
{
return CO_E_ERRORINAPP;
}
verticesPtr = (VertexType*)mappedResource.pData;
memcpy(verticesPtr, (void*)vertices, (sizeof(VertexType) * _vertexCount));
deviceContext->Unmap(_vertexBuffer, 0);
delete[] vertices;
vertices = 0;
return S_OK;
}
The Coordinate System is like the shown in the image below; the center of the screen is the (0,0) position.
The code flow
When the position, scale, rotation or origin of the actor changes, a new Transform is generated. Then, I transform the local bounds of the actor with the generated transform to get the correct positions of the vertex, and I set them into the vertex buffer.
I'm trying to set up a google maps style zoom-to-cursor control for my opengl camera. I'm using a similar method to the one suggested here. Basically, I get the position of the cursor, and calculate the width/height of my perspective view at that depth using some trigonometry. I then change the field of view, and calculate how to much I need to translate in order to keep the point under the cursor in the same apparent position on the screen. That part works pretty well.
The issue is that I want to limit the fov to be less than 90 degrees. When it ends up >90, I cut it in half and then translate everything away from the camera so that the resulting scene looks the same as with the larger fov. The equation to find that necessary translation isn't working, which is strange because it comes from pretty simple algebra. I can't find my mistake. Here's the relevant code.
void Visual::scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
glm::mat4 modelview = view*model;
glm::vec4 viewport = { 0.0, 0.0, width, height };
float winX = cursorPrevX;
float winY = viewport[3] - cursorPrevY;
float winZ;
glReadPixels(winX, winY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &winZ);
glm::vec3 screenCoords = { winX, winY, winZ };
glm::vec3 cursorPosition = glm::unProject(screenCoords, modelview, projection, viewport);
if (isinf(cursorPosition[2]) || isnan(cursorPosition[2])) {
cursorPosition[2] = 0.0;
}
float zoomFactor = 1.1;
// = zooming in
if (yoffset > 0.0)
zoomFactor = 1/1.1;
//the width and height of the perspective view, at the depth of the cursor position
glm::vec2 fovXY = camera.getFovXY(cursorPosition[2] - zTranslate, width / height);
camera.setZoomFromFov(fovXY.y * zoomFactor, cursorPosition[2] - zTranslate);
//don't want fov to be greater than 90, so cut it in half and move the world farther away from the camera to compensate
//not working...
if (camera.Zoom > 90.0 && zTranslate*2 > MAX_DEPTH) {
float prevZoom = camera.Zoom;
camera.Zoom *= .5;
//need increased distance between camera and world origin, so that view does not appear to change when fov is reduced
zTranslate = cursorPosition[2] - tan(glm::radians(prevZoom)) / tan(glm::radians(camera.Zoom) * (cursorPosition[2] - zTranslate));
}
else if (camera.Zoom > 90.0) {
camera.Zoom = 90.0;
}
glm::vec2 newFovXY = camera.getFovXY(cursorPosition[2] - zTranslate, width / height);
//translate so that position under the cursor does not appear to move.
xTranslate += (newFovXY.x - fovXY.x) * (winX / width - .5);
yTranslate += (newFovXY.y - fovXY.y) * (winY / height - .5);
updateView = true;
}
The definition of my view matrix. Called ever iteration of the main loop.
void Visual::setView() {
view = glm::mat4();
view = glm::translate(view, { xTranslate,yTranslate,zTranslate });
view = glm::rotate(view, glm::radians(camera.inclination), glm::vec3(1.f, 0.f, 0.f));
view = glm::rotate(view, glm::radians(camera.azimuth), glm::vec3(0.f, 0.f, 1.f));
camera.Right = glm::column(view, 0).xyz();
camera.Up = glm::column(view, 1).xyz();
camera.Front = -glm::column(view, 2).xyz(); // minus because OpenGL camera looks towards negative Z.
camera.Position = glm::column(view, 3).xyz();
updateView = false;
}
Field of view helper functions.
glm::vec2 getFovXY(float depth, float aspectRatio) {
float fovY = tan(glm::radians(Zoom / 2)) * depth;
float fovX = fovY * aspectRatio;
return glm::vec2{ 2*fovX , 2*fovY };
}
//you have a desired fov, and you want to set the zoom to achieve that.
//factor of 1/2 inside the atan because we actually need the half-fov. Keep full-fov as input for consistency
void setZoomFromFov(float fovY, float depth) {
Zoom = glm::degrees(2 * atan(fovY / (2 * depth)));
}
The equations I'm using can be found from the diagram here. Since I want to have the same field of view dimensions before and after the angle is changed, I start with
fovY = tan(theta1) * d1 = tan(theta2) * d2
d2 = (tan(theta1) / tan(theta2)) * d1
d1 = distance between camera and cursor position, before fov change = cursorPosition[2] - zTranslate
d2 = distance after
theta1 = fov angle before
theta2 = fov angle after = theta1 * .5
Appreciate the help.
Using Monotouch and OpenTK I am trying to get the screen coordinate of one 3D point. I have my world view projection matrix set up, and OpenGL makes sense of it and projects my 3D model perfectly, but how to use the same matrix to project just one point from 2D to 3D?
I thought I could simply use:
Vector3.Transform(ref input3Dpos, ref matWorldViewProjection, out projected2Dpos);
Then have the projected screen coordinate in projected2DPos. But the resulting Vector4 does not seem to represent the proper projected screen coordinate. And I do not know how to calculate it from there on.
I found I need to divide by Vector4.w, however I am still getting the wrong values. Using this method now:
private static bool GluProject(OpenTK.Vector3 objPos, OpenTK.Matrix4 matWorldViewProjection, int[] viewport, out OpenTK.Vector3 screenPos)
{
OpenTK.Vector4 _in;
_in.X = objPos.X;
_in.Y = objPos.Y;
_in.Z = objPos.Z;
_in.W = 1f;
Vector4 _out = OpenTK.Vector4.Transform(_in, matWorldViewProjection);
if (_out.W <= 0.0)
{
screenPos = OpenTK.Vector3.Zero;
return false;
}
_out.X /= _out.W;
_out.Y /= _out.W;
_out.Z /= _out.W;
/* Map x, y and z to range 0-1 */
_out.X = _out.X * 0.5f + 0.5f;
_out.Y = -_out.Y * 0.5f + 0.5f;
_out.Z = _out.Z * 0.5f + 0.5f;
/* Map x,y to viewport */
_out.X = _out.X * viewport[2] + viewport[0];
_out.Y = _out.Y * viewport[3] + viewport[1];
screenPos.X = _out.X;
screenPos.Y = _out.Y;
screenPos.Z = _out.Z;
return true;
}
I cannot see any errors though... :S
In the first question you're missing the last step: Mapping from NDC (Normalized Device Coordinates) to viewport coordinates. That's what the lines
/* Map x,y to viewport */
_out.X = _out.X * viewport[2] + viewport[0];
_out.Y = _out.Y * viewport[3] + viewport[1];
in your GluProject do,
You have two options. You can calculate it yourself, or use the glProject function. I prefer the first.
Number 1:
private Vector2 Convert(
Vector3 pos,
Matrix4 viewMatrix,
Matrix4 projectionMatrix,
int screenWidth,
int screenHeight)
{
pos = Vector3.Transform(pos, viewMatrix);
pos = Vector3.Transform(pos, projectionMatrix);
pos.X /= pos.Z;
pos.Y /= pos.Z;
pos.X = (pos.X + 1) * screenWidth / 2;
pos.Y = (pos.Y + 1) * screenHeight / 2;
return new Vector2(pos.X, pos.Y);
}
Number 2:
public Vector2 form3Dto2D(Vector3 our3DPoint)
{
Vector3 our2DPoint;
float[] modelviewMatrix = new float[16];
float[] projectionMatrix = new float[16];
int[] viewport = new int[4];
GL.GetFloat(GetPName.ModelviewMatrix, modelviewMatrix);
GL.GetFloat(GetPName.ProjectionMatrix, projectionMatrix);
GL.GetInteger(GetPName.Viewport, viewport);
OpenTK.Graphics.Glu.Project(our3DPoint, convertFloatsToDoubles(modelviewMatrix),
convertFloatsToDoubles(projectionMatrix), viewport, out our2DPoint);
return new Vector2(our2DPoint.X, our2DPoint.Y)
}
public static double[] convertFloatsToDoubles(float[] input)
{
if (input == null)
{
return null; // Or throw an exception - your choice
}
double[] output = new double[input.Length];
for (int i = 0; i < input.Length; i++)
{
output[i] = input[i];
}
return output;
}