I have a camera class that uses the DirectXMath API:
__declspec(align(16)) class Camera
{
public:
XMVECTOR Translation;
XMMATRIX Rotation;
XMVECTOR Scale;
XMMATRIX Transform;
XMFLOAT3 RotAngles;
XMMATRIX ProjectionMatrix;
float Width;
float Height;
float NearZ;
float FarZ;
float AspectRatio;
float FieldOfView;
Camera()
{
Translation = XMVectorZero();
Rotation = XMMatrixIdentity();
Scale = XMVectorSplatOne();
Transform = XMMatrixIdentity();
Width = 800;
Height = 600;
NearZ = 0.1f;
FarZ = 100.0f;
AspectRatio = 800 / 600;
FieldOfView = (XM_PIDIV4);
ProjectionMatrix = XMMatrixPerspectiveFovLH(FieldOfView, AspectRatio, NearZ, FarZ);
}
void Update()
{
Rotation = XMMatrixRotationRollPitchYaw(RotAngles.x, RotAngles.y, RotAngles.z);
XMMATRIX scaleM = XMMatrixScalingFromVector(Scale);
XMMATRIX translationM = XMMatrixTranslationFromVector(Translation);
Transform = scaleM * Rotation * translationM;
}
XMMATRIX GetViewMatrix()
{
XMVECTOR Eye;
XMVECTOR At;
XMVECTOR Up;
Eye = Translation;
At = Translation + Transform.r[2];
Up = Transform.r[1];
return(XMMatrixLookAtLH(Eye, At, Up));
}
XMMATRIX GetViewProjectionMatrix()
{
return(XMMatrixTranspose(GetViewMatrix() * ProjectionMatrix));
}
};
When I store the result of GetViewProjectionMatrix() in a XMFLOAT4X4 and update it to the constant buffer, the geometry gets torn apart or doesn't show up at all when I move/rotate the camera with the keyboard.I have isolated the camera to be issue with the deforming/disappearing geometry, but I have no idea what the problem is.I mean the projection matrix can't be wrong, it's just 1 function call, so it's most likely the view matrix.Could someone tell me where the issue is?I tried different combinations of multiplication orders/transposing both/transposing only one/anything.It never works properly.
In case anyone sees this question again:
It seems that OP did not transpose to ViewProjection matrix they generated. Note that DirectXMath works in row-major order while HLSL defaults to column-major. As per the documentation at - https://msdn.microsoft.com/en-us/library/windows/desktop/bb509634(v=vs.85).aspx
Related
I'm using DirectX 11. I'm trying to draw a Cube mesh to the screen but the bottom half is getting cut off. If I move the camera up/down the bottom half is still cut off, which leads me to think that it's not a viewport/rasterizer issue, but I'm not sure. The pictures are of the cube looking down and then looking up. You can see the cube is getting cut off regardless of the camera position. I think it might be an issue with my projection matrices.
I've attached the RenderDoc capture here, and you can see that the VS input is correct, but when viewing the VS output with solid shading, the same thing happens. https://drive.google.com/file/d/1sh7tj0hPYwD936BEQCL0wtH8ZzXMiEno/view?usp=sharing
This is how I'm calculating my matrices:
mat4 LookAtMatrix(float3 Position, float3 Target, float3 Up) {
float3 Forward = Normalise(Target - Position);
float3 Right = Cross(Normalise(Up), Forward);
float3 UpV = Cross(Forward, Right);
mat4 Out;
Out.v[0] = float4(Right, 0);
Out.v[1] = float4(UpV, 0);
Out.v[2] = float4(Forward, 0);
Out.v[3] = float4(Position, 1);
return Out;
}
mat4 ProjectionMatrix(f32 FOV, f32 Aspect, f32 Near, f32 Far) {
mat4 Out;
f32 YScale = 1.0f / tan((FOV * Deg2Rad) / 2.0f);
f32 XScale = YScale / Aspect;
f32 NmF = Near - Far;
Out.v[0] = float4(XScale, 0, 0, 0);
Out.v[1] = float4(0, YScale, 0, 0);
Out.v[2] = float4(0, 0, (Far + Near) / NmF, -1.0f);
Out.v[3] = float4(0, 0, 2 * Far * Near / NmF, 0);
return Out;
}
And this is how I'm calling these functions (The issue happens reguardless of whether I use rotation or not):
D3D11_MAPPED_SUBRESOURCE Resource;
HRESULT Result = DeviceContext->Map(ConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &Resource);
if(FAILED(Result)) FatalError("DeviceContext->Map failed");
matrix_buffer *Buffer = (matrix_buffer *)Resource.pData;
static float yR = 0.0f;
yR += 50.0f * DeltaTime;
while(yR > 360.0f) yR -= 360.0f;
while(yR < 0.0f) yR += 360.0f;
quat R = QuatFromAngles(0.0f, yR, 0.0f);
const float Speed = 100.0f;
static float3 Position = float3(0, 0, -300);
if(WDown) Position.z += Speed * DeltaTime;
if(ADown) Position.x += Speed * DeltaTime;
if(SDown) Position.z -= Speed * DeltaTime;
if(DDown) Position.x -= Speed * DeltaTime;
if(QDown) Position.y -= Speed * DeltaTime;
if(EDown) Position.y += Speed * DeltaTime;
Buffer->WorldMatrix = RotationMatrix(R, float3(0, 0, 0));
Buffer->ViewMatrix = LookAtMatrix(Position, Position+float3(0, 0, 1), float3(0, 1, 0));
Buffer->ProjectionMatrix = ProjectionMatrix(45.0f, 1366/768, 0.1f, 1000.0f);
DeviceContext->Unmap(ConstantBuffer, 0);
And this is my vertex shader code:
struct vertex_data {
float3 Position : POSITION;
float2 UV : TEXCOORD;
float4 Colour : COLOR;
float3 Normal : NORMAL;
};
struct pixel_data {
float4 Position : SV_POSITION;
float2 UV : TEXCOORD;
float4 Colour : COLOR;
float3 Normal : NORMAL;
};
cbuffer MatrixBuffer {
float4x4 WorldMatrix;
float4x4 ViewMatrix;
float4x4 ProjectionMatrix;
};
pixel_data VertexMain(vertex_data Input) {
pixel_data Output;
float4 V = float4(Input.Position, 1);
Output.Position = mul(V, transpose(WorldMatrix));
Output.Position = mul(Output.Position, transpose(ViewMatrix));
Output.Position = mul(Output.Position, transpose(ProjectionMatrix));
Output.UV = Input.UV;
Output.Colour = Input.Colour;
Output.Normal = Input.Normal;
return Output;
}
Here is my code for setting up the viewport (Width/Height are 1366/768 - the size of the window):
D3D11_VIEWPORT Viewport;
Viewport.Width = (float)Width;
Viewport.Height = (float)Height;
Viewport.MinDepth = 0.0f;
Viewport.MaxDepth = 1.0f;
Viewport.TopLeftX = 0.0f;
Viewport.TopLeftY = 0.0f;
DeviceContext->RSSetViewports(1, &Viewport);
I've seen similar issues caused by:
Transposed matrices (are you using row major or column major matrices? Do you need a #pragma pack_matrix? It looks like you've finnicked with transposing quite a bit - avoid doing that, as you will make mistakes that are difficult to reason about)
Otherwise messed up matrix multiplication order. If you bob the camera up/down/left/right or arcball it around & rotate the model, does it actually work? Make sure you incorporate camera rotations with camera translations and object rotations / translations, otherwise you might incorrectly think your code works. What if you zoom near or far?
I recommend when debugging these issues that you first try running your shader transformations in CPU code:
Take a simple model-space coordinate (e.g. 0,0,0).
Pass it through your world matrix, and check if it looks right.
Pass it through your view matrix, verify it.
Then your proj matrix.
Even that simple test can be quite revealing. Basically, if you think your vertex shader is wrong, that's fortunately usually the easiest shader to validate in software! If this passes, try a few other vertices, like the vertices if your box. If that succeeds in software, then now you know it somehow has to do with how you're passing vertex data to the GPU (e.g. row-major vs column-major). If not, then you've built a simple CPU-side repro, great.
(Also, I'm not sure what your pixel shader is, but to rule it out and isolate the vertex shader, consider making the pixel shader just return a solid white)
I create a cube like normal using 8 vertex points that outline a cube and use indices to draw each individual triangle. However, when I create my camera matrix and rotate it using the lookat function with glm it rotates the entire screen positions not world positions.
glm::mat4 Projection = glm::mat4(1);
Projection = glm::perspective(glm::radians(60.0f), (float)window_width / (float)window_hight, 0.1f, 100.0f);
const float radius = 10.0f;
float camX = sin(glfwGetTime()) * radius;
float camZ = cos(glfwGetTime()) * radius;
glm::mat4 View = glm::mat4(1);
View = glm::lookAt(
glm::vec3(camX, 0, camZ),
glm::vec3(0, 0, 0),
glm::vec3(0, 1, 0)
);
glm::mat4 Model = glm::mat4(1);
glm::mat4 mvp = Projection * View * Model;
Then in glsl:
uniform mat4 camera_mat4
void main()
{
vec4 pos = vec4(vertexPosition_modelspace, 1.0) * camera_mat4;
gl_Position.xyzw = pos;
}
Example: GLM rotating screen coordinates not cube
I´m trying to implements a bullet so I have this free movement first person camera. I got this camera from learnopengl.com this is the coding:
// Default camera values
const float YAW = -90.0f;
const float PITCH = 0.0f;
const float SPEED = 2.5f;
const float SENSITIVITY = 0.1f;
const float ZOOM = 45.0f;
// An abstract camera class that processes input and calculates the corresponding Euler Angles, Vectors and Matrices for use in OpenGL
class Camera
{
public:
// Camera Attributes
glm::vec3 Position;
glm::vec3 Front;
glm::vec3 Up;
glm::vec3 Right;
glm::vec3 WorldUp;
// Euler Angles
float Yaw;
float Pitch;
// Camera options
float MovementSpeed;
float MouseSensitivity;
float Zoom;
// Constructor with vectors
Camera(glm::vec3 position = glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f), float yaw = YAW, float pitch = PITCH) : Front(glm::vec3(0.0f, 0.0f, -1.0f)), MovementSpeed(SPEED), MouseSensitivity(SENSITIVITY), Zoom(ZOOM)
{
Position = position;
WorldUp = up;
Yaw = yaw;
Pitch = pitch;
updateCameraVectors();
}
// Constructor with scalar values
Camera(float posX, float posY, float posZ, float upX, float upY, float upZ, float yaw, float pitch) : Front(glm::vec3(0.0f, 0.0f, -1.0f)), MovementSpeed(SPEED), MouseSensitivity(SENSITIVITY), Zoom(ZOOM)
{
Position = glm::vec3(posX, posY, posZ);
WorldUp = glm::vec3(upX, upY, upZ);
Yaw = yaw;
Pitch = pitch;
updateCameraVectors();
}
// Returns the view matrix calculated using Euler Angles and the LookAt Matrix
glm::mat4 GetViewMatrix()
{
return glm::lookAt(Position, Position + Front, Up);
}
// Processes input received from any keyboard-like input system. Accepts input parameter in the form of camera defined ENUM (to abstract it from windowing systems)
void ProcessKeyboard(Camera_Movement direction, float deltaTime)
{
float velocity = MovementSpeed * deltaTime;
if (direction == FORWARD)
Position += Front * velocity;
if (direction == BACKWARD)
Position -= Front * velocity;
if (direction == LEFT)
Position -= Right * velocity;
if (direction == RIGHT)
Position += Right * velocity;
}
// Processes input received from a mouse input system. Expects the offset value in both the x and y direction.
void ProcessMouseMovement(float xoffset, float yoffset, GLboolean constrainPitch = true)
{
xoffset *= MouseSensitivity;
yoffset *= MouseSensitivity;
Yaw += xoffset;
Pitch += yoffset;
// Make sure that when pitch is out of bounds, screen doesn't get flipped
if (constrainPitch)
{
if (Pitch > 89.0f)
Pitch = 89.0f;
if (Pitch < -89.0f)
Pitch = -89.0f;
}
// Update Front, Right and Up Vectors using the updated Euler angles
updateCameraVectors();
}
// Processes input received from a mouse scroll-wheel event. Only requires input on the vertical wheel-axis
void ProcessMouseScroll(float yoffset)
{
if (Zoom >= 1.0f && Zoom <= 45.0f)
Zoom -= yoffset;
if (Zoom <= 1.0f)
Zoom = 1.0f;
if (Zoom >= 45.0f)
Zoom = 45.0f;
}
private:
// Calculates the front vector from the Camera's (updated) Euler Angles
void updateCameraVectors()
{
// Calculate the new Front vector
glm::vec3 front;
front.x = cos(glm::radians(Yaw)) * cos(glm::radians(Pitch));
front.y = sin(glm::radians(Pitch));
front.z = sin(glm::radians(Yaw)) * cos(glm::radians(Pitch));
Front = glm::normalize(front);
// Also re-calculate the Right and Up vector
Right = glm::normalize(glm::cross(Front, WorldUp)); // Normalize the vectors, because their length gets closer to 0 the more you look up or down which results in slower movement.
Up = glm::normalize(glm::cross(Right, Front));
}
};
So now I want to create a bullet that starts from
model = glm::translate(model, camara.Position+7.0f*camara.Front);
The issue is that as I try to move the camera the object rotates with it which I know why but I don't know how to fix it, I have tried something like this:
model = glm::rotate(model, glm::radians(camara.Pitch), glm::vec3(1.0f, 0.0f, 0.0f));
model = glm::rotate(model, -glm::radians(camara.Yaw), glm::vec3(0.0f, 1.0f, 0.0f));
trying to sync the rotations but it's not working.
I want to store the position because then I want the bullets to go straight no matter where I move. Thank you.
This is how I always want it to look:
This is how it rotates as I move:
I have a question in regards to using quaternions for the rotation of my graphics object.
I have a Transform class which has the following constructor with default parameters:
Transform(const glm::vec3& pos = glm::vec3(0.0), const glm::quat& rot = glm::quat(1.0, 0.0, 0.0, 0.0),
const glm::vec3& scale = glm::vec3(1.0))
{
m_pos = pos;
m_rot = rot;
m_scale = scale;
}
In my Transform class calculate the MVP as follows:
glm::mat4 Transform::GetModelMatrix() const
{
glm::mat4 translate = glm::translate(glm::mat4(1.0), m_pos);
glm::mat4 rotate = glm::mat4_cast(m_rot);
glm::mat4 scale = glm::scale(glm::mat4(1.0), m_scale);
return translate * rotate * scale;
}
The issue I'm facing is that when I use const glm::quat& rot = glm::quat(1.0, 0.0, 0.0, 0.0) my object appears normal on screen. The following image shows it:
However if I try to use for example const glm::quat& rot = glm::quat(glm::radians(90.0f), 0.0, 1.0, 0.0) (rotating on y axis by 90 degrees) my object appears as if it has been scaled. The following image shows it:
I can't figure out what is causing it to become like this when I try to rotate it. Am I missing something important?
If it's of any relevance, the following is how I calculate my view matrix:
glm::mat4 Camera::GetView() const
{
glm::mat4 view = glm::lookAt(m_pos, m_pos + m_forward, m_up);
return view;
}
AFAIK you can init a glm::quat using:
glm::vec3 angles(degToRad(rotx), degToRad(roty), degToRad(rotz));
glm::quat rotation(angles);
Where rotx, roty, rotz are the rotation angles around x, y and z axis and degToRad converts angles to radians. Therefore for your case:
glm::vec3 angles(degToRad(0), degToRad(90), degToRad(0));
glm::quat rotation(angles);
Regards
Edit: okay, I've written the code totally intuitive now and this is the result:
http://i.imgur.com/x5arJE9.jpg
The Cube is at 0,0,0
As you can see, the camera position is negative on the z axis, suggesting that I'm viewing along the positive z axis, which does not match up. (fw is negative)
Also the cube colors suggest that I'm on the positive z axis, looking in the negative direction. Also the positive x-axis is to the right (in modelspace)
The angles are calculated like this:
public virtual Vector3 Right
{
get
{
return Vector3.Transform(Vector3.UnitX, Rotation);
}
}
public virtual Vector3 Forward
{
get
{
return Vector3.Transform(-Vector3.UnitZ, Rotation);
}
}
public virtual Vector3 Up
{
get
{
return Vector3.Transform(Vector3.UnitY, Rotation);
}
}
Rotation is a Quaternion.
This is how the view and model matrices are creates:
public virtual Matrix4 GetMatrix()
{
Matrix4 translation = Matrix4.CreateTranslation(Position);
Matrix4 rotation = Matrix4.CreateFromQuaternion(Rotation);
return translation * rotation;
}
Projection:
private void SetupProjection()
{
if(GameObject != null)
{
AspectRatio = GameObject.App.Window.Width / (float)GameObject.App.Window.Height;
projectionMatrix = Matrix4.CreatePerspectiveFieldOfView((float)((Math.PI * Fov) / 180), AspectRatio, ZNear, ZFar);
}
}
Matrix multiplication:
public Matrix4 GetModelViewProjectionMatrix(Transform model)
{
return model.GetMatrix()* Transform.GetMatrix() * projectionMatrix;
}
Shader:
[Shader vertex]
#version 150 core
in vec3 pos;
in vec4 color;
uniform float _time;
uniform mat4 _modelViewProjection;
out vec4 vColor;
void main() {
gl_Position = _modelViewProjection * vec4(pos, 1);
vColor = color;
}
OpenTK matrices are transposed, thus the multiplication order.
Any idea why the axis / locations are all messed up ?
End of edit. Original Post:
Have a look at this image: http://i.imgur.com/Cjjr8jz.jpg
As you can see, while the forward vector ( of the camera ) is positive in the z-Axis and the red cube is on the negative x axis,
float[] points = {
// position (3) Color (3)
-s, s, z, 1.0f, 0.0f, 0.0f, // Red point
s, s, z, 0.0f, 1.0f, 0.0f, // Green point
s, -s, z, 0.0f, 0.0f, 1.0f, // Blue point
-s, -s, z, 1.0f, 1.0f, 0.0f, // Yellow point
};
(cubes are created in the geometry shader around those points)
the camera x position seems to be inverted. In other words, if I increase the camera position along its local x axis, it will move to the left, and vice versa.
I pass the transformation matrix like this:
if (DefaultAttributeLocations.TryGetValue("modelViewProjectionMatrix", out loc))
{
if (loc >= 0)
{
Matrix4 mvMatrix = Camera.GetMatrix() * projectionMatrix;
GL.UniformMatrix4(loc, false, ref mvMatrix);
}
}
The GetMatrix() method looks like this:
public virtual Matrix4 GetMatrix()
{
Matrix4 translation = Matrix4.CreateTranslation(Position);
Matrix4 rotation = Matrix4.CreateFromQuaternion(Rotation);
return translation * rotation;
}
And the projection matrix:
private void SetupProjection()
{
AspectRatio = Window.Width / (float)Window.Height;
projectionMatrix = Matrix4.CreatePerspectiveFieldOfView((float)((Math.PI * Fov)/180), AspectRatio, ZNear, ZFar);
}
I don't see what I'm doing wrong :/
It's a little hard to tell from the code, but I believe this is because in OpenGL, the default forward vector of the camera is negative along the Z axis - yours is positive, which means you're looking at the model from the back. That would be why the X coordinate seems inverted.
Although this question is a few years old, I'd still like to give my input.
The reason you're experiencing this bug is because OpenTK's matrices are row major. All this really means is you have to do all matrix math is reverse. For example, the transformation matrix will be multiplied like so:
public static Matrix4 CreateTransformationMatrix(Vector3 position, Quaternion rotation, Vector3 scale)
{
return Matrix4.CreateScale(scale) *
Matrix4.CreateFromQuaternion(rotation) *
Matrix4.CreateTranslation(position);
}
This goes for any matrix, so if you're using Vector3's instead of Quaternion's for your rotation it would look like this:
public static Matrix4 CreateTransformationMatrix(Vector3 position, Vector3 rotation, Vector3 scale)
{
return Matrix4.CreateScale(scale) *
Matrix4.CreateRotationZ(rotation.Z) *
Matrix4.CreateRotationY(rotation.Y) *
Matrix4.CreateRotationX(rotation.X) *
Matrix4.CreateTranslation(position);
}
Note that your vertex shader will still be multiplied like this:
void main()
{
gl_Position = projection * view * transform * vec4(position, 1.0f);
}
I hope this helps!