Draw Isometric figure based on matrix - python-2.7

How can generate a isometric from a matrix with numbers??
I need ideas
Example:
Matrix:
[[3,2],
[1,1]]
This
Each number is the height, 3 represent 3 cubes height first file, 2 represent 2 cubes height first file second element
Thanks

This is a very nice question.
I think matplotlib does not directly provide any such function, but we can of course simulate a cube by its six surfaces. Getting those surfaces we can use a piece of code provided here.
We then need to plot the cube at the positions defined by the matrix.
In order to make the plot look isometric, we use a workaround, plotting invisible points at the corners of the cubic bounding box of the mpl3d axes.
Finally we need to make the axes invisible.
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
def cuboid_data(center, size=(1,1,1)):
# code taken from
# https://stackoverflow.com/questions/30715083/python-plotting-a-wireframe-3d-cuboid?noredirect=1&lq=1
# suppose axis direction: x: to left; y: to inside; z: to upper
# get the (left, outside, bottom) point
o = [a - b / 2 for a, b in zip(center, size)]
# get the length, width, and height
l, w, h = size
x = [[o[0], o[0] + l, o[0] + l, o[0], o[0]], # x coordinate of points in bottom surface
[o[0], o[0] + l, o[0] + l, o[0], o[0]], # x coordinate of points in upper surface
[o[0], o[0] + l, o[0] + l, o[0], o[0]], # x coordinate of points in outside surface
[o[0], o[0] + l, o[0] + l, o[0], o[0]]] # x coordinate of points in inside surface
y = [[o[1], o[1], o[1] + w, o[1] + w, o[1]], # y coordinate of points in bottom surface
[o[1], o[1], o[1] + w, o[1] + w, o[1]], # y coordinate of points in upper surface
[o[1], o[1], o[1], o[1], o[1]], # y coordinate of points in outside surface
[o[1] + w, o[1] + w, o[1] + w, o[1] + w, o[1] + w]] # y coordinate of points in inside surface
z = [[o[2], o[2], o[2], o[2], o[2]], # z coordinate of points in bottom surface
[o[2] + h, o[2] + h, o[2] + h, o[2] + h, o[2] + h], # z coordinate of points in upper surface
[o[2], o[2], o[2] + h, o[2] + h, o[2]], # z coordinate of points in outside surface
[o[2], o[2], o[2] + h, o[2] + h, o[2]]] # z coordinate of points in inside surface
return x, y, z
def plotCubeAt(pos=(0,0), N=0, ax=None):
# Plotting N cube elements at position pos
if ax !=None:
if N > 0:
for n in range(N):
X, Y, Z = cuboid_data( (pos[0],pos[1],n) )
ax.plot_surface(X, Y, Z, color='b', rstride=1, cstride=1, alpha=1)
def plotIsoMatrix(ax, matrix):
# plot a Matrix
# where matrix[i,j] cubes are added at position (i,j)
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
plotCubeAt(pos=(i,j), N=matrix[i,j], ax=ax)
l = max(matrix.shape[0], matrix.shape[1], matrix.max())
bb = np.array([(0,0,0), (0,l,0), (l,0,0), (l,l,0),(0,0,l), (0,l,l), (l,0,l), (l,l,l)])
ax.plot(bb[:,0], bb[:,1], bb[:,2], "w", alpha=0.0)
if __name__ == '__main__':
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
matrix = np.array([[3,2],[1,1]])
plotIsoMatrix(ax, matrix)
ax.set_axis_off()
plt.show()

Related

How to find the center and radius of an any dimensional sphere giving dims+1 points

given a vector of N-dimensional points. The vector will be of size N+1.
Is there a generalized algorithm to find the center and radius of the ND sphere using those points where the sphere intersects every single one of those points?
The same question has been asked on the mathematics stackexchange and has received a constructive answer:
Does a set of n+1 points that affinely span R^n lie on a unique (n-1)-sphere?
Here is an implementation in python/numpy of the algorithm described at that answer.
import numpy as np
def find_sphere_through_points(points):
n_points, n_dim = points.shape
if (n_points != n_dim + 1):
raise ValueError('Number of points must be equal to 1 + dimension')
a = np.concatenate((points, np.ones((n_points, 1))), axis=1)
b = (points**2).sum(axis=1)
x = np.linalg.solve(a, b)
center = x[:-1] / 2
radius = x[-1] + center#center
return center, radius
To test this method, we can generate random points on the surface of a sphere, using the method described in this related question:
Generate a random sample of points distributed on the surface of a unit spher
import numpy as np
def sample_spherical(npoints, ndim=3, center=None):
vec = np.random.randn(npoints, ndim)
vec /= np.linalg.norm(vec, axis=1).reshape(npoints,1)
if center is None:
return vec
else:
return vec + center
n = 5
center = np.random.rand(n)
points = sample_spherical(n+1, ndim=n, center=center)
guessed_center, guessed_radius = find_sphere_through_points(points)
print('True center:\n ', center)
print('Calc center:\n ', guessed_center)
print('True radius:\n ', 1.0)
print('Calc radius:\n ', guessed_radius)
# True center:
# [0.18150032 0.94979547 0.07719378 0.26561175 0.37509931]
# Calc center:
# [0.18150032 0.94979547 0.07719378 0.26561175 0.37509931]
# True radius:
# 1.0
# Calc radius:
# 0.9999999999999997
The center of a circle by three points, let (X, Y) and its radius R are found as follows:
(X - X0)² + (Y - Y0)² = R²
(X - X1)² + (Y - Y1)² = R²
(X - X2)² + (Y - Y2)² = R²
Then subtracting pair-wise to eliminate R,
(2X - X0 - X1)(X1 - X0) + (2Y - Y0 - Y1)(Y1 - Y0) = 0
(2X - X0 - X2)(X2 - X0) + (2Y - Y0 - Y2)(Y2 - Y0) = 0
This is a system of two linear equations in two unknowns, giving the coordinates of the center (in fact we construct the intersection of two bisectors). The radius follows from the first equation.
This immediately generalizes to D dimensions.

How draw implict 3D surface with sympy?

I try to draw x**2 + y**3 = z**2 use sympy, but I don't how to draw it.
I use plot3d(x**2 + y**3-z**2) but it don't work at all, So can someone know how to do?
Sadly, Sympy doesn't support 3d implicit plots. However, sage does, though I remember it was a very heavy module to install.
Alternatively, if you are using Jupyter Notebook you can use K3D-Jupyter which is capable of plotting implicit surfaces. Let's see how to plot your surface:
import k3d
import numpy as np
r = 5
zmin, zmax = -r, r
xmin, xmax = -r, r
ymin, ymax = -r, r
Nx, Ny, Nz = 100, 100, 100
x = np.linspace(xmin, xmax, Nx, dtype=np.float32)
y = np.linspace(ymin, ymax, Ny, dtype=np.float32)
z = np.linspace(zmin, zmax, Nz, dtype=np.float32)
x, y, z = np.meshgrid(x, y, z, indexing='ij')
p = x**2 + y**3 - z**2
plot = k3d.plot()
plt_iso = k3d.marching_cubes(p, compression_level=9, xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax,
zmin=zmin, zmax=zmax, level=0.0,
flat_shading=False)
plot += plt_iso
plot.display()

How do I estimate a cube corner from a projected square?

I've got the coordinates of a square's corners projected onto an image. I want to know where I'd find the corners of a cube if they were projected onto the image.
More specifically, assuming a square with sides of length 1. It's at the origin with coordinates (x, y, z) of (0, 0, 0), (1, 0, 0), (0, 1, 0) and (1, 1, 0). I know the corresponding projected locations (u, v) for those square corners. how do I find the (u, v) for the (x, y, z) point (0, 0, 1) ?
I'm really just looking for an equations that will give u001 and v001 in terms of the scalar values:
u000 v000
u100 v100
u010 v010
u110 v110
I think the transformation matrix in homogenous coordinates from (x, y, z, 1) to (u, v, w) will look like:
a b c d
e f g h
i j k 1
But I can't figure out how to find c, g, or k without a z point.
Doing some algebra:
d = u_00
h = v_00
a = u_10 * (i+1) - u_00
b = u_01 * (j+1) - u_00
e = v_10 * (i+1) - v_00
f = v_01 * (j+1) - v_00

How to fit a 2D ellipse to given points

I would like to fit a 2D array by an elliptic function: (x / a)² + (y / b)² = 1 ----> (and so get the a and b)
And then, be able to replot it on my graph.
I found many examples on internet, but no one with this simple Cartesian equation. I probably have searched badly ! I think a basic solution for this problem could help many people.
Here is an example of the data:
Sadly, I can not put the values... So let's assume that I have an X,Y arrays defining the coordinates of each of those points.
This can be solved directly using least squares. You can frame this as minimizing the sum of squares of quantity (alpha * x_i^2 + beta * y_i^2 - 1) where alpha is 1/a^2 and beta is 1/b^2. You have all the x_i's in X and the y_i's in Y so you can find the minimizer of ||Ax - b||^2 where A is an Nx2 matrix (i.e. [X^2, Y^2]), x is the column vector [alpha; beta] and b is column vector of all ones.
The following code solves the more general problem for an ellipse of the form Ax^2 + Bxy + Cy^2 + Dx +Ey = 1 though the idea is exactly the same. The print statement gives 0.0776x^2 + 0.0315xy+0.125y^2+0.00457x+0.00314y = 1 and the image of the ellipse generated is also below
import numpy as np
import matplotlib.pyplot as plt
alpha = 5
beta = 3
N = 500
DIM = 2
np.random.seed(2)
# Generate random points on the unit circle by sampling uniform angles
theta = np.random.uniform(0, 2*np.pi, (N,1))
eps_noise = 0.2 * np.random.normal(size=[N,1])
circle = np.hstack([np.cos(theta), np.sin(theta)])
# Stretch and rotate circle to an ellipse with random linear tranformation
B = np.random.randint(-3, 3, (DIM, DIM))
noisy_ellipse = circle.dot(B) + eps_noise
# Extract x coords and y coords of the ellipse as column vectors
X = noisy_ellipse[:,0:1]
Y = noisy_ellipse[:,1:]
# Formulate and solve the least squares problem ||Ax - b ||^2
A = np.hstack([X**2, X * Y, Y**2, X, Y])
b = np.ones_like(X)
x = np.linalg.lstsq(A, b)[0].squeeze()
# Print the equation of the ellipse in standard form
print('The ellipse is given by {0:.3}x^2 + {1:.3}xy+{2:.3}y^2+{3:.3}x+{4:.3}y = 1'.format(x[0], x[1],x[2],x[3],x[4]))
# Plot the noisy data
plt.scatter(X, Y, label='Data Points')
# Plot the original ellipse from which the data was generated
phi = np.linspace(0, 2*np.pi, 1000).reshape((1000,1))
c = np.hstack([np.cos(phi), np.sin(phi)])
ground_truth_ellipse = c.dot(B)
plt.plot(ground_truth_ellipse[:,0], ground_truth_ellipse[:,1], 'k--', label='Generating Ellipse')
# Plot the least squares ellipse
x_coord = np.linspace(-5,5,300)
y_coord = np.linspace(-5,5,300)
X_coord, Y_coord = np.meshgrid(x_coord, y_coord)
Z_coord = x[0] * X_coord ** 2 + x[1] * X_coord * Y_coord + x[2] * Y_coord**2 + x[3] * X_coord + x[4] * Y_coord
plt.contour(X_coord, Y_coord, Z_coord, levels=[1], colors=('r'), linewidths=2)
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
Following the suggestion by ErroriSalvo, here is the complete process of fitting an ellipse using the SVD. The arrays x, y are coordinates of the given points, let's say there are N points. Then U, S, V are obtained from the SVD of the centered coordinate array of shape (2, N). So, U is a 2 by 2 orthogonal matrix (rotation), S is a vector of length 2 (singular values), and V, which we do not need, is an N by N orthogonal matrix.
The linear map transforming the unit circle to the ellipse of best fit is
sqrt(2/N) * U * diag(S)
where diag(S) is the diagonal matrix with singular values on the diagonal. To see why the factor of sqrt(2/N) is needed, imagine that the points x, y are taken uniformly from the unit circle. Then sum(x**2) + sum(y**2) is N, and so the coordinate matrix consists of two orthogonal rows of length sqrt(N/2), hence its norm (the largest singular value) is sqrt(N/2). We need to bring this down to 1 to have the unit circle.
N = 300
t = np.linspace(0, 2*np.pi, N)
x = 5*np.cos(t) + 0.2*np.random.normal(size=N) + 1
y = 4*np.sin(t+0.5) + 0.2*np.random.normal(size=N)
plt.plot(x, y, '.') # given points
xmean, ymean = x.mean(), y.mean()
x -= xmean
y -= ymean
U, S, V = np.linalg.svd(np.stack((x, y)))
tt = np.linspace(0, 2*np.pi, 1000)
circle = np.stack((np.cos(tt), np.sin(tt))) # unit circle
transform = np.sqrt(2/N) * U.dot(np.diag(S)) # transformation matrix
fit = transform.dot(circle) + np.array([[xmean], [ymean]])
plt.plot(fit[0, :], fit[1, :], 'r')
plt.show()
But if you assume that there is no rotation, then np.sqrt(2/N) * S is all you need; these are a and b in the equation of the ellipse.
You could try a Singular Value Decomposition of the data matrix.
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html
First center the data by subtracting mean values of X,Y from each column respectively.
X=X-np.mean(X)
Y=Y-np.mean(Y)
D=np.vstack(X,Y)
Then, apply SVD and extract
-eigenvalues (members of s) -> axis length
-eigenvectors(U) -> axis orientation
U, s, V = np.linalg.svd(D, full_matrices=True)
This should be a least-squares fit.
Of course, things can get more complicated than this, please see
https://www.emis.de/journals/BBMS/Bulletin/sup962/gander.pdf

Opengl: Keeping Arcball camera up-vector alligned with y-axis

I'm essentially trying to mimic the way the camera rotates in Maya. The arcball in Maya is always aligned with the with the y-axis. So no matter where the up-vector is pointing, it's still rotated or registered with it's up-vector along the y-axis.
I've been able to implement is arcball in OpenGL using C++ and Qt. But I can't figure out how to keep it's up-vector aligned. I've been able to keep it aligned at times by my code below:
void ArcCamera::setPos (Vector3 np)
{
Vector3 up(0, 1, 0);
Position = np;
ViewDir = (ViewPoint - Position); ViewDir.normalize();
RightVector = ViewDir ^ up; RightVector.normalize();
UpVector = RightVector ^ ViewDir; UpVector.normalize();
}
This works up until the position is at 90-degrees, then the right vector changes and everything is inverted.
So instead I've been maintaining the total rotation (in quaternions) and rotating the original positions (up, right, pos) by it. This works best to keep everything coherent, but now I simply can't align the up-vector to the y-axis. Below is the function for the rotation.
void CCamera::setRot (QQuaternion q)
{
tot = tot * q;
Position = tot.rotatedVector(PositionOriginal);
UpVector = tot.rotatedVector(UpVectorOriginal);
UpVector.normalize();
RightVector = tot.rotatedVector(RightVectorOriginal);
RightVector.normalize();
}
The QQuaternion q is generated from the axis-angle pair derived from the mouse drag. I'm confident this is done correctly. The rotation itself is fine, it just doesn't keep the orientation aligned.
I've noticed in my chosen implementation, dragging in the corners provides a rotation around my view direction, and I can always realign the up-vector to straighten out to the world's y-axis direction. So If I could figure out how much to roll I could probably do two rotations each time to make sure it's all straight. However, I'm not sure how to go about this.
The reason this isn't working is because Maya's camera manipulation in the viewport does not use an arcball interface. What you want to do is Maya's tumble command. The best resource I've found for explaining this is this document from Professor Orr's Computer Graphics class.
Moving the mouse left and right corresponds to the azimuth angle, and specifies a rotation around the world space Y axis. Moving the mouse up and down corresponds to the elevation angle, and specifies a rotation around the view space X axis. The goal is to generate the new world-to-view matrix, then extract the new camera orientation and eye position from that matrix, based on however you've parameterized your camera.
Start with the current world-to-view matrix. Next, we need to define the pivot point in world space. Any pivot point will work to begin with, and it can be simplest to use the world origin.
Recall that pure rotation matrices generate rotations centered around the origin. This means that to rotate around an arbitrary pivot point, you first translate to the origin, perform the rotation, and translate back. Remember also that transformation composition happens from right to left, so the negative translation to get to the origin goes on the far right:
translate(pivotPosition) * rotate(angleX, angleY, angleZ) * translate(-pivotPosition)
We can use this to calculate the azimuth rotation component, which is a rotation around the world Y axis:
azimuthRotation = translate(pivotPosition) * rotateY(angleY) * translate(-pivotPosition)
We have to do a little additional work for the elevation rotation component, because it happens in view space, around the view space X axis:
elevationRotation = translate(worldToViewMatrix * pivotPosition) * rotateX(angleX) * translate(worldToViewMatrix * -pivotPosition)
We can then get the new view matrix with:
newWorldToViewMatrix = elevationRotation * worldToViewMatrix * azimuthRotation
Now that we have the new worldToView matrix, we're left with having to extract the new world space position and orientation from the view matrix. To do this, we want the viewToWorld matrix, which is the inverse of the worldToView matrix.
newOrientation = transpose(mat3(newWorldToViewMatrix))
newPosition = -((newOrientation * newWorldToViewMatrix).column(3))
At this point, we have the elements separated. If your camera is parameterized so that you're only storing a quaternion for your orientation, you just need to do the rotation matrix -> quaternion conversion. Of course, Maya is going to convert to Euler angles for display in the channel box, which will be dependent on the camera's rotation order (note that the math for tumbling doesn't change when the rotation order changes, just the way that the rotation matrix -> Euler angles conversion is done).
Here's a sample implementation in Python:
#!/usr/bin/env python
import numpy as np
from math import *
def translate(amount):
'Make a translation matrix, to move by `amount`'
t = np.matrix(np.eye(4))
t[3] = amount.T
t[3, 3] = 1
return t.T
def rotateX(amount):
'Make a rotation matrix, that rotates around the X axis by `amount` rads'
c = cos(amount)
s = sin(amount)
return np.matrix([
[1, 0, 0, 0],
[0, c,-s, 0],
[0, s, c, 0],
[0, 0, 0, 1],
])
def rotateY(amount):
'Make a rotation matrix, that rotates around the Y axis by `amount` rads'
c = cos(amount)
s = sin(amount)
return np.matrix([
[c, 0, s, 0],
[0, 1, 0, 0],
[-s, 0, c, 0],
[0, 0, 0, 1],
])
def rotateZ(amount):
'Make a rotation matrix, that rotates around the Z axis by `amount` rads'
c = cos(amount)
s = sin(amount)
return np.matrix([
[c,-s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def rotate(x, y, z, pivot):
'Make a XYZ rotation matrix, with `pivot` as the center of the rotation'
m = rotateX(x) * rotateY(y) * rotateZ(z)
I = np.matrix(np.eye(4))
t = (I-m) * pivot
m[0, 3] = t[0, 0]
m[1, 3] = t[1, 0]
m[2, 3] = t[2, 0]
return m
def eulerAnglesZYX(matrix):
'Extract the Euler angles from an ZYX rotation matrix'
x = atan2(-matrix[1, 2], matrix[2, 2])
cy = sqrt(1 - matrix[0, 2]**2)
y = atan2(matrix[0, 2], cy)
sx = sin(x)
cx = cos(x)
sz = cx * matrix[1, 0] + sx * matrix[2, 0]
cz = cx * matrix[1, 1] + sx * matrix[2, 1]
z = atan2(sz, cz)
return np.array((x, y, z),)
def eulerAnglesXYZ(matrix):
'Extract the Euler angles from an XYZ rotation matrix'
z = atan2(matrix[1, 0], matrix[0, 0])
cy = sqrt(1 - matrix[2, 0]**2)
y = atan2(-matrix[2, 0], cy)
sz = sin(z)
cz = cos(z)
sx = sz * matrix[0, 2] - cz * matrix[1, 2]
cx = cz * matrix[1, 1] - sz * matrix[0, 1]
x = atan2(sx, cx)
return np.array((x, y, z),)
class Camera(object):
def __init__(self, worldPos, rx, ry, rz, coi):
# Initialize the camera orientation. In this case the original
# orientation is built from XYZ Euler angles. orientation is the top
# 3x3 XYZ rotation matrix for the view-to-world matrix, and can more
# easily be thought of as the world space orientation.
self.orientation = \
(rotateZ(rz) * rotateY(ry) * rotateX(rx))
# position is a point in world space for the camera.
self.position = worldPos
# Construct the world-to-view matrix, which is the inverse of the
# view-to-world matrix.
self.view = self.orientation.T * translate(-self.position)
# coi is the "center of interest". It defines a point that is coi
# units in front of the camera, which is the pivot for the tumble
# operation.
self.coi = coi
def tumble(self, azimuth, elevation):
'''Tumble the camera around the center of interest.
Azimuth is the number of radians to rotate around the world-space Y axis.
Elevation is the number of radians to rotate around the view-space X axis.
'''
# Find the world space pivot point. This is the view position in world
# space minus the view direction vector scaled by the center of
# interest distance.
pivotPos = self.position - (self.coi * self.orientation.T[2]).T
# Construct the azimuth and elevation transformation matrices
azimuthMatrix = rotate(0, -azimuth, 0, pivotPos)
elevationMatrix = rotate(elevation, 0, 0, self.view * pivotPos)
# Get the new view matrix
self.view = elevationMatrix * self.view * azimuthMatrix
# Extract the orientation from the new view matrix
self.orientation = np.matrix(self.view).T
self.orientation.T[3] = [0, 0, 0, 1]
# Now extract the new view position
negEye = self.orientation * self.view
self.position = -(negEye.T[3]).T
self.position[3, 0] = 1
np.set_printoptions(precision=3)
pos = np.matrix([[5.321, 5.866, 4.383, 1]]).T
orientation = radians(-60), radians(40), 0
coi = 1
camera = Camera(pos, *orientation, coi=coi)
print 'Initial attributes:'
print np.round(np.degrees(eulerAnglesXYZ(camera.orientation)), 3)
print np.round(camera.position, 3)
print 'Attributes after tumbling:'
camera.tumble(azimuth=radians(-40), elevation=radians(-60))
print np.round(np.degrees(eulerAnglesXYZ(camera.orientation)), 3)
print np.round(camera.position, 3)
Keep track of you view and right vectors, from the beginning and update them with the rotation matrix. Then calculate your up vector.