For a simple 2d game I'm making I'm trying to rotate sprites around the z axis using matrices. I'm clearly doing something wrong as when I attempt to rotate my sprite it looks like it's being rotated around the screen origin (bottom, left) and not the sprite origin. I'm confused as my quad is at the origin already so I didn't think I need to translate -> rotate and translate back. Here's a code snippet and a small video or the erroneous transformation
void MatrixMultiply(
MATRIX &mOut,
const MATRIX &mA,
const MATRIX &mB);
/*!***************************************************************************
#Function TransTransformArray
#Output pTransformedVertex Destination for transformed vectors
#Input pV Input vector array
#Input nNumberOfVertices Number of vectors to transform
#Input pMatrix Matrix to transform the vectors of input vector (e.g. use 1 for position, 0 for normal)
#Description Transform all vertices in pVertex by pMatrix and store them in
pTransformedVertex
- pTransformedVertex is the pointer that will receive transformed vertices.
- pVertex is the pointer to untransformed object vertices.
- nNumberOfVertices is the number of vertices of the object.
- pMatrix is the matrix used to transform the object.
*****************************************************************************/
void TransTransformArray(
VECTOR3 * const pTransformedVertex,
const VECTOR3 * const pV,
const int nNumberOfVertices,
const MATRIX * const pMatrix);
RenderQuad CreateRenderQuad(
const Texture2D & texture,
float x,
float y,
float scaleX,
float scaleY,
float rotateRadians,
int zIndex,
const Color & color,
const Quad2 & textureCoord,
const char * name
) {
MATRIX mT;
MATRIX mS;
MATRIX concat;
MATRIX mR;
MatrixTranslation(mT, x, y, 0.0f);
MatrixRotationZ(mR, rotateRadians);
MatrixScaling(mS, scaleX, scaleY, 1.0f);
VECTOR3 quad[] = {
{-0.5f, 0.5f, 0.f}, //tl
{0.5f, 0.5f, 0.f}, //tr
{-0.5, -0.5f, 0.0f}, //bl
{0.5f, -0.5f, 0.0f}, //br
};
MatrixMultiply(concat, mR, mT);
MatrixMultiply(concat, concat, mS);
// apply to all the points in the quad
TransTransformArray(quad, quad, 4, &concat);
== Update:
here's the structs and render code:
I'm using the matrix class from the oolongengine code.google.com/p/oolongengine/source/browse/trunk/Oolong%20Engine2/Math/Matrix.cpp
I transform all the quads then later render them using OpenGL. Here are my data structs and render code:
typedef struct _RenderData {
VECTOR3 vertex;
RenderColor3D color;
RenderTextureCoord textureCoord;
float zIndex;
GLuint textureId;
} RenderData;
typedef struct _RenderQuad {
//! top left
RenderData tl;
//! top right
RenderData tr;
//! bottom left
RenderData bl;
//! bottom right
RenderData br;
float zIndex;
Texture2D * texture; // render quad draws a source rect from here
ESpriteBlendMode blendMode;
} RenderQuad ;
/// Draw
class QuadBatch {
GLushort * m_indices;
const Texture2D * m_texture;
GLuint m_vbos[2];
RenderData * m_vertices;
};
QuadBatch::Draw () {
int offset = (int)&m_vertices[startIndex];
// vertex
int diff = offsetof( RenderData, vertex);
glVertexPointer(3, GL_FLOAT, kRenderDataSize, (void*) (offset + diff) );
// color
diff = offsetof( RenderData, color);
glColorPointer(4, GL_FLOAT, kRenderDataSize, (void*)(offset + diff));
// tex coords
diff = offsetof( RenderData, textureCoord);
glTexCoordPointer(2, GL_FLOAT, kRenderDataSize, (void*)(offset + diff));
// each quad has 6 indices
glDrawElements(GL_TRIANGLES, vertexCount * elementMultiplier, GL_UNSIGNED_SHORT, m_indices);
'Rotation', by definition, is around the origin (0,0,0). If you want a different axis of rotation, you have to apply a Translation component. Say you want to apply a rotation R around an axis a. The transformation to apply to an arbitrary vector x is:
x --> a + R(x - a) = Rx + (a - Ra)
(This might take some staring to digest). So, after applying your rotation - which, as you observed, rotates around the origin - you have to add the constant vector (a - Ra).
[Edit:] This answer is language and platform agnostic - the math is the same wherever you look. Specific libraries contain different structures and API to apply transformations. Both DirectX and OpenGL, for example, maintain 4x4 matrix transforms, to unify rotations and translations into a single matrix multiplication (via an apparatus called homogeneous coordinates).
Related
I'm trying to visualize normals of triangles.
I have created a triangle to use as the visual representation of the normal but I'm having trouble aligning it to the normal.
I have tried using glm::lookAt but the triangle ends up in some weird position and rotation after that. I am able to move the triangle in the right place with glm::translate though.
Here is my code to create the triangle which is used for the visualization:
// xyz rgb
float vertex_data[] =
{
0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f,
0.25f, 0.0f, 0.025f, 0.0f, 1.0f, 1.0f,
0.25f, 0.0f, -0.025f, 0.0f, 1.0f, 1.0f,
};
unsigned int index_data[] = {0, 1, 2};
glGenVertexArrays(1, &nrmGizmoVAO);
glGenBuffers(1, &nrmGizmoVBO);
glGenBuffers(1, &nrmGizmoEBO);
glBindVertexArray(nrmGizmoVAO);
glBindBuffer(GL_ARRAY_BUFFER, nmrGizmoVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_data), vertex_data, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, nrmGizmoEBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(index_data), index_data, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
glBindVertexArray(0);
and here is the code to draw the visualizations:
for(unsigned int i = 0; i < worldTriangles->size(); i++)
{
Triangle *tri = &worldTriangles->at(i);
glm::vec3 wp = tri->worldPosition;
glm::vec3 nrm = tri->normal;
nrmGizmoMatrix = glm::mat4(1.0f);
//nrmGizmoMatrix = glm::translate(nrmGizmoMatrix, wp);
nrmGizmoMatrix = glm::lookAt(wp, wp + nrm, glm::vec3(0.0f, 1.0f, 0.0f));
gizmoShader.setMatrix(projectionMatrix, viewMatrix, nrmGizmoMatrix);
glBindVertexArray(nrmGizmoVAO);
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
When using only glm::translate, the triangles appear in right positions but all point in the same direction. How can I rotate them so that they point in the direction of the normal vector?
Your code doesn't work because lookAt is intended to be used as the view matrix, thus it returns the transform from world space to local (camera) space. In your case you want the reverse -- from local (triangle) to world space. Taking an inverse of lookAt should solve that.
However, I'd take a step back and look at (haha) the bigger picture. What I notice about your approach:
It's very inefficient -- you issue a separate call with a different model matrix for every single normal.
You don't even need the entire model matrix. A triangle is a 2-d shape, so all you need is two basis vectors.
I'd instead generate all the vertices for the normals in a single array, and then use glDrawArrays to draw that. For the actual calculation, observe that we have one degree of freedom when it comes to aligning the triangle along the normal. Your lookAt code resolves that DoF rather arbitrary. A better way to resolve that is to constrain it by requiring that it faces towards the camera, thus maximizing the visible area. The calculation is straightforward:
// inputs: vertices output array, normal position, normal direction, camera position
void emit_normal(std::vector<vec3> &v, const vec3 &p, const vec3 &n, const vec3 &c) {
static const float length = 0.25f, width = 0.025f;
vec3 t = normalize(cross(n, c - p)); // tangent
v.push_back(p);
v.push_back(p + length*n + width*t);
v.push_back(p + length*n - width*t);
}
// ... in your code, generate normals through:
std::vector<vec3> normals;
for(unsigned int i = 0; i < worldTriangles->size(); i++) {
Triangle *tri = &worldTriangles->at(i);
emit_normal(normals, tri->worldPosition, tri->normal, camera_position);
}
// ... create VAO for normals ...
glDrawArrays(GL_TRIANGLES, 0, normals.size());
Note, however, that this would make the normal mesh camera-dependent -- which is desirable when rendering normals with triangles. Most CAD software draws normals with lines instead, which is much simpler and avoids many problems:
void emit_normal(std::vector<vec3> &v, const vec3 &p, const vec3 &n) {
static const float length = 0.25f;
v.push_back(p);
v.push_back(p + length*n);
}
// ... in your code, generate normals through:
std::vector<vec3> normals;
for(unsigned int i = 0; i < worldTriangles->size(); i++) {
Triangle *tri = &worldTriangles->at(i);
emit_normal(normals, tri->worldPosition, tri->normal);
}
// ... create VAO for normals ...
glDrawArrays(GL_LINES, 0, normals.size());
I have a sphere and also the axis through its origin. I want to translate the sphere on its axis up and down. In it's starting position that's no problem, as the axis start parallel to the global y axis., but as soon as I rotate the sphere and therefore also the sphere axis around the z-axis it gets complicated.
My first thought was to normalize the axis and simply use that as the translation vector in the translation matrix. Then multiply the translation matrix with the normalized axis and the sphere should be pushed one unit on the axis.
Here is the code I already got:
class Object
{
public:
inline Object()
: vao(0),
positionBuffer(0),
colorBuffer(0),
indexBuffer(0),
elements(0),
vertices(0)
{}
inline ~Object() { // GL context must exist on destruction
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(1, &indexBuffer);
glDeleteBuffers(1, &colorBuffer);
glDeleteBuffers(1, &positionBuffer);
}
GLuint vao; // vertex-array-object ID
GLuint positionBuffer; // ID of vertex-buffer: position
GLuint colorBuffer; // ID of vertex-buffer: color
GLuint indexBuffer; // ID of index-buffer
GLuint elements; // Number of Elements
vector<glm::vec3> vertices;
glm::vec3 mp;
glm::mat4x4 model; // model matrix
};
glm::vec3 axis = glm::normalize(glm::vec3{
sphereax.vertices[0].x - sphereax.vertices[1].x,
sphereax.vertices[0].y - sphereax.vertices[1].y,
sphereax.vertices[0].z - sphereax.vertices[1].z}
);
translateObject(earth, axis);
void translateObject(Object &obj, glm::vec3 &translation)
{
glm::mat4x4 trans_mat = glm::translate(glm::mat4(1.0f), translation);
for (int i = 0; i < obj.vertices.size(); i++)
{
obj.vertices[i] = glm::vec3(glm::vec4(obj.vertices[i], 1.0f) * trans_mat);
}
obj.mp = glm::vec3(glm::vec4(obj.mp, 1.0f) * trans_mat);
}
The translation matrix in translateObject(); seems to be right, yet the multiplication of one of the points and the transformation matrix shows no effect.
I suggest to do the following:
Rotate the sphere around the its rotation axis
Translate the sphere by its distance form the center along the x axis
Rotate the sphere around the center of the world
glm::vec3 rotation_axis = ...; // sphere axis
float axis_rot_angle = ...; // current axis rotation angle in radians
float distance_to_center = ...; // distance from the center of the world
float world_rot_angle = ...; // current world rotation angle in radians
glm::mat4 sphere_rot = glm::rotation(glm::mat4(1.0f), axis_rot_angle, rotation_axis);
glm::mat4 sphere_trans = glm::translate(glm::mat4(1.0f), glm::vec3(distance_to_center, 0.0f, 0.0f));
glm::mat4 world_rot = glm::rotation(glm::mat4(1.0f), world_rot_angle, glm::vec3(0.0f, 0.0f, 1.0f));
glm::mat4 model_mat = world_rot * sphere_trans * sphere_rot;
I'm trying to implement a simple paint program and now I have a problem with zoom, I can't understand how to do it? I tried to adapt the code from here to myself, but it did not work, I just get a black screen. What my problem?
Not using glut or glew!
Here my camera code:
.h
class Camera2d
{
public:
Camera2d(const glm::vec3& pos = glm::vec3(0.f, 0.f, 0.f),
const glm::vec3& up = glm::vec3(0.f, 1.f, 0.f));
//~Camera2d();
void setZoom(const float& zoom);
float getZoom() const noexcept;
glm::mat4 getViewMatrix() const noexcept;
void mouseScrollCallback(const float& yOffset);
protected:
void update();
private:
// Camera zoom
float m_zoom;
// Euler Angles
float m_yaw;
float m_pitch;
public:
// Camera Attributes
glm::vec3 position;
glm::vec3 worldUp;
glm::vec3 front;
glm::vec3 up;
glm::vec3 right;
};
.cpp
Camera2d::Camera2d(
const glm::vec3& pos /* = glm::vec3(0.f, 0.f, 0.f) */,
const glm::vec3& up /* = glm::vec3(0.f, 1.f, 0.f) */
)
: m_zoom(45.f)
, m_yaw(-90.f)
, m_pitch(0.f)
, position(pos)
, worldUp(up)
, front(glm::vec3(0.f, 0.f, -1.f))
{
this->update();
}
void Camera2d::setZoom(const float& zoom)
{
this->m_zoom = zoom;
}
float Camera2d::getZoom() const noexcept
{
return this->m_zoom;
}
glm::mat4 Camera2d::getViewMatrix() const noexcept
{
return glm::lookAt(this->position, this->position + this->front, this->up);
}
void Camera2d::mouseScrollCallback(const float& yOffset)
{
if (m_zoom >= 1.f && m_zoom <= 45.f)
m_zoom -= yOffset;
else if (m_zoom <= 1.f)
m_zoom = 1.f;
else if (m_zoom >= 45.f)
m_zoom = 45.f;
}
void Camera2d::update()
{
// Calculate the new Front vector
glm::vec3 _front;
_front.x = cos(glm::radians(this->m_yaw)) * cos(glm::radians(this->m_pitch));
_front.y = sin(glm::radians(this->m_pitch));
_front.z = cos(glm::radians(this->m_pitch)) * sin(glm::radians(this->m_yaw));
this->front = glm::normalize(_front);
// Also re-calculate the Right and Up vector
this->right = glm::normalize(glm::cross(this->front, this->worldUp)); // Normalize the vectors, because their length gets closer to 0 the more you look up or down which results in slower movement.
this->up = glm::normalize(glm::cross(this->right, this->front));
}
and in main i try smth like this in render loop
// pass projection matrix to shader
glm::mat4 projection = glm::perspective(glm::radians(camera.getZoom()),
static_cast<float>(WIDTH) / static_cast<float>(HEIGHT),
0.1f,
10000.f);
shaderProg.setMat4("projecton", projection);
// camera view transformation
glm::mat4 view = camera.getViewMatrix();
shaderProg.setMat4("view", view);
here i have just 1 model its my white bg-texture
glm::mat4 model = glm::translate(model, glm::vec3(0.f, 0.f, 0.f));
model = glm::rotate(model, glm::radians(0.f), glm::vec3(1.0f, 0.3f, 0.5f));
shaderProg.setMat4("model", model);
All code on github: here
You're working in 2D, forget about the camera, forget about projection, forget about following OpenGL tutorials, they're aimed at 3D graphics.
What you need is just a rectangle to fill your screen. Start with the vertices at the the corners of the screen, starting from the top-left corner and moving counterclockwise: (-1,1,0) (-1,-1,0) (1,-1,0) (1,1,0). Forget about Z, you're working in 2D.
You draw on a texture, and the texture coordinates are (0,1) (0,0) (1,0) (1,1), same order as above. Zooming is now just a matter of scaling the rectangle. You have a matrix to determine the scale and one to determine the position. Forget about rotations, front vectors and all that stuff. In the vertex shader you scale and then translate the vertices as usual, in this order. Done.
To interact for example you can have mouse wheel up increasing the scale factor and mouse wheel down decreasing it. Hold click and move the mouse to change the (x,y) position. Again forget about Z. Throw those values into the vertex shader and do the usual transformations.
I know that I can get a colour gradient representation on a line like this:
glBegin (GL_LINES);
glColor3f (1, 0, 0);
glVertex2f (v0_x, v0_y);
glColor3f (0, 0, 1);
glVertex2f (v1_x, v1_y);
glEnd ();
Result:
Question:
Is it possible to extend this for more points? Example: I have two further points v2 and v3. All points are connected (v0v1,v1v2,v2v3). Is there any way to get a colour gradient (red to blue) while drawing these lines so that v0 would be coloured red and v3 would be coloured blue?
You need to calculate colours for this points with linear interpolation.
If distance between all your vertices is the same:
static void lerp3(float *o, const float *a, const float *b, float t) {
float it = 1.0f - t;
o[0] = it*a[0]+t*b[0];
o[1] = it*a[1]+t*b[1];
o[2] = it*a[2]+t*b[2];
}
/* ... */
float v0_colour[3] = {1.0f, 0.0f, 0.0f};
float v1_colour[3], v2_colour[3];
float v3_colour[3] = {0.0f, 0.0f, 1.0f};
float t1 = 1.0f / 3;
float t2 = 1.0f / 3 + t1;
lerp3(v1_colour, v0_colour, v3_colour, t1);
lerp3(v2_colour, v0_colour, v3_colour, t2);
Then just use v1_colour and v2_colour to colour your extra vertices. If distance is varying, recalculate t1 and t2 accordingly - e.g. by dividing sum vector lengths of this points.
I can't figure out how to get glDrawElements to not connect everything it draws...
//Draw Reds
glEnableVertexAttribArray(vLoc);
glEnableVertexAttribArray(cLoc);
glBindBuffer(GL_ARRAY_BUFFER,positionBufferRed);
glVertexAttribPointer(vLoc,3,GL_FLOAT,GL_FALSE,0,0);
glBindBuffer(GL_ARRAY_BUFFER,redBuffer);
glVertexAttribPointer(cLoc,3,GL_FLOAT,GL_FALSE,0,0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,elementBufferRed);
glDrawElements(GL_TRIANGLES,nElements*3,GL_UNSIGNED_INT,0);
glDisableVertexAttribArray(vLoc);
glDisableVertexAttribArray(cLoc);
//Draw Blues
glEnableVertexAttribArray(vLoc);
glEnableVertexAttribArray(cLoc);
glBindBuffer(GL_ARRAY_BUFFER,positionBufferBlue);
glVertexAttribPointer(vLoc,3,GL_FLOAT,GL_FALSE,0,0);
glBindBuffer(GL_ARRAY_BUFFER,blueBuffer);
glVertexAttribPointer(cLoc,3,GL_FLOAT,GL_FALSE,0,0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,elementBufferBlue);
glDrawElements(GL_TRIANGLES,nElements*3,GL_UNSIGNED_INT,0);
glDisableVertexAttribArray(vLoc);
glDisableVertexAttribArray(cLoc);
This is what the result looks like:
http://img338.imageshack.us/img338/2440/cows.png
Should be two separate cows but instead they're connected with black lines. Any advice will be appreciated!
My guess is that the number of elements you're trying to draw is wrong (too big). So the GPU tries to get triangles that don't exist in the buffer, and accidentally access the vertices of the next mesh, but not the color (black).
Try with glDrawElements(GL_TRIANGLES,nElements,GL_UNSIGNED_INT,0);
If it doesn't work, try with a handcoded single triangle.
Here's an example :
GLsizei const TonemapperElementCount = 3;
GLsizeiptr const TonemapperElementSize = TonemapperElementCount * sizeof(glm::uint32);
glm::uint32 const TonemapperElementData[TonemapperElementCount] =
{
0, 1, 2,
};
GLsizei const TonemapperVertexCount = 3;
GLsizeiptr const TonemapperPositionSize = TonemapperVertexCount * sizeof(glm::vec4);
glm::vec4 const TonemapperPositionData[TonemapperVertexCount] =
{ // A full-screen triangle in normalized screen space.
glm::vec4( -1.0f, -1.0f,0,1),
glm::vec4( 3.0f, -1.0f ,0,1),
glm::vec4( -1.0f, 3.0f ,0,1),
};