Related
I’m working on the following task:
I have 6 fisheye cameras and would like to produce a 360 degree stitched image.
After carrying out the calibration procedure with findChessboardCorners, calibrateCamera, I obtained the intrinsic and extrinsic matrix.
Starting from the 6 images with fish-eye effect, through the fisheye.initUndistortRectifyMap function, I obtained the 6 planar images.
The two planar images from above are reported below.
Now I should do the stitching to get a 360 degree image.
I tried to do this using the cv2.createStitcher function, but this doesn’t always work, also I would like to have access to the homography matrix to determine the static matrices of the system.
So I tried to calculate the homography matrix, identifying through the SIFT algorithm, the common keypoints between two images and keeping the keypoints that best match.
I then stitched the two images using the warpPerspective function.
I believe that the procedure is correct up to the calculation of the keypoints, but I do not understand why the final result is not good.
In fact, in an attempt to stitch the second image is completely deformed / changed in perspective with a loss of right image.
Here there is the code:
import cv2
import numpy as np
def cvshow(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
def sift_kp(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
sift = cv2.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(image, None)
kp_image = cv2.drawKeypoints(gray_image, kp, None)
return kp_image, kp, des
def get_good_match(des1, des2):
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2) # des1 is the template image, des2 is the matching image
matches = sorted(matches, key=lambda x: x[0].distance / x[1].distance)
good = []
for m, n in matches:
if m.distance < 0.55 * n.distance:
good.append(m)
return good
def drawMatches(imageA, imageB, kpsA, kpsB, matches, status):
# Initialize the visualization picture, connect the A and B pictures left and right together
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# Joint traversal, draw matching pairs
for ((trainIdx, queryIdx), s) in zip(matches, status):
# When the point pair is matched successfully, draw it on the visualization
if s == 1:
# Draw matching pairs
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# Return visualization results
return vis
# Panorama stitching
def siftimg_rightlignment(img_right, img_left):
_, kp1, des1 = sift_kp(img_right)
_, kp2, des2 = sift_kp(img_left)
goodMatch = get_good_match(des1, des2)
# When the matching pairs of the filter items are greater than 4 pairs: calculate the perspective transformation matrix
if len(goodMatch) > 4:
# Get the point coordinates of the matching pair
ptsA = np.float32([kp1[m.queryIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
ptsB = np.float32([kp2[m.trainIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
print(H)
#H = np.array([[-3.95002617e-01,-7.49813070e-02, 4.43642683e+02], [-4.06655962e-01,5.27365057e-01, 1.20636875e+02],[-1.60149798e-03, -3.69708507e-05, 1.00000000e+00]])
# The function of this function is to first use RANSAC to select the best four sets of pairing points, and then calculate the H matrix. H is a 3*3 matrix
# Change the angle of view to the right of the picture, result is the transformed picture
result = cv2.warpPerspective(img_right, H, (img_right.shape[1] + img_left.shape[1], img_right.shape[0]))
cvshow('result_medium', result)
# Pass the picture left to the left end of the result picture
result[0:img_left.shape[0], 0:img_left.shape[1]] = img_left
return result
# Feature matching + panoramic stitching
import numpy as np
import cv2
# Read the stitched pictures (note the placement of the left and right pictures)
# Is to transform the graphics on the right
img_left = cv2.imread(r'\planar\0.png')
img_right = cv2.imread(r'\planar\5.png')
img_right = cv2.resize(img_right, None, fx=0.5, fy=0.3)
# Ensure that the two images are the same size
img_left = cv2.resize(img_left, (img_right.shape[1], img_right.shape[0]))
kpimg_right, kp1, des1 = sift_kp(img_right)
kpimg_left, kp2, des2 = sift_kp(img_left)
# Display the original image and the image after key point detection at the same time
cvshow('img_left', np.hstack((img_left, kpimg_left)))
cvshow('img_right', np.hstack((img_right, kpimg_right)))
goodMatch = get_good_match(des1, des2)
all_goodmatch_img = cv2.drawMatches(img_right, kp1, img_left, kp2, goodMatch, None, flags=2)
# goodmatch_img Set the first goodMatch[:10]
goodmatch_img = cv2.drawMatches(img_right, kp1, img_left, kp2, goodMatch[:10], None, flags=2)
cvshow('Keypoint Matches1', all_goodmatch_img)
cvshow('Keypoint Matches2', goodmatch_img)
# Stitch the picture into a panorama
result = siftimg_rightlignment(img_right, img_left)
cvshow('result', result)```
I am trying to drop the values less than 1 and greater than -1 in my custom activation like below.
def ScoreActivationFromSigmoid(x, target_min=1, target_max=9) :
condition = K.tf.logical_and(K.tf.less(x, 1), K.tf.greater(x, -1))
case_true = K.tf.reshape(K.tf.zeros([x.shape[1] * x.shape[2]], tf.float32), shape=(K.tf.shape(x)[0], x.shape[1], x.shape[2]))
case_false = x
changed_x = K.tf.where(condition, case_true, case_false)
activated_x = K.sigmoid(changed_x)
score = activated_x * (target_max - target_min) + target_min
return score
the data type has 3 dimensions: batch_size x sequence_length x number of features.
But I got this error
nvalidArgumentError: Inputs to operation activation_51/Select of type Select must have the same size and shape. Input 0: [1028,300,64] != input 1: [1,300,64]
[[{{node activation_51/Select}} = Select[T=DT_FLOAT, _class=["loc:#training_88/Adam/gradients/activation_51/Select_grad/Select_1"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](activation_51/LogicalAnd, activation_51/Reshape, dense_243/add)]]
[[{{node metrics_92/acc/Mean_1/_9371}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_473_metrics_92/acc/Mean_1", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
I understand what the problem is; custom activation function cannot find the proper batch size of inputs. But I don't know how to control them.
Can anyone fix this or suggest other methods to replace some of the element values in some conditions?
The error message I got when running your code is:
ValueError: Cannot reshape a tensor with 19200 elements to shape
[1028,300,64] (19737600 elements) for 'Reshape_8' (op: 'Reshape') with
input shapes: [19200], [3] and with input tensors computed as partial
shapes: input[1] = [1028,300,64].
And the problem should be that you cannot reshape a tensor of shape [x.shape[1] * x.shape[2]] to (K.tf.shape(x)[0], x.shape[1], x.shape[2]). This is because their element counts are different.
So the solution is just creating a zero array in right shape.
This line:
case_true = K.tf.reshape(K.tf.zeros([x.shape[1] * x.shape[2]], tf.float32), shape=(K.tf.shape(x)[0], x.shape[1], x.shape[2]))
should be replace with:
case_true = K.tf.reshape(K.tf.zeros([x.shape[0] * x.shape[1] * x.shape[2]], K.tf.float32), shape=(K.tf.shape(x)[0], x.shape[1], x.shape[2]))
or using K.tf.zeros_like:
case_true = K.tf.zeros_like(x)
Workable code:
import keras.backend as K
import numpy as np
def ScoreActivationFromSigmoid(x, target_min=1, target_max=9) :
condition = K.tf.logical_and(K.tf.less(x, 1), K.tf.greater(x, -1))
case_true = K.tf.zeros_like(x)
case_false = x
changed_x = K.tf.where(condition, case_true, case_false)
activated_x = K.tf.sigmoid(changed_x)
score = activated_x * (target_max - target_min) + target_min
return score
with K.tf.Session() as sess:
x = K.tf.placeholder(K.tf.float32, shape=(1028, 300, 64), name='x')
score = sess.run(ScoreActivationFromSigmoid(x), feed_dict={'x:0':np.random.randn(1028, 300, 64)})
print(score)
I'm creating a contour plot with a list of V values as the x-axis and a list of T values as the y-axis (the V and T values are float numbers with 2 digits after the decimal point but all sorted of course). I created a data matrix and populated it with the data correlating with the V-T coordinates.
If it helps anything, this is what the contour plot would look like:
I'm trying to override the format_coord method to also display the data along with the x-y (V-T) coordinates when the cursor moves
I can't post all my code here, but here are the relevant parts:
fig= Figure()
a = fig.add_subplot(111)
contour_plot = a.contourf(self.pre_formating[0],self.pre_formating[1],datapoint) #Plot the contour on the axes
def fmt(x, y):
'''Overrides the original matplotlib method to also display z value when moving the cursor
'''
V_lst = self.pre_formating[0] #List of V points
T_lst = self.pre_formating[1] #List of T points
Zflat = datapoint.flatten() #Flatten out the data matrix
print 'first print line'
# get closest point with known v,t values
dist = distance.cdist([x,y],np.stack([V_lst, T_lst],axis=-1))
print 'second print line'
closest_idx = np.argmin(dist)
z = Zflat[closest_idx]
return 'x={x:.5f} y={y:.5f} z={z:.5f}'.format(x=x, y=y, z=z)
a.format_coord = fmt
The above code does not work (when I move the cursor nothing shows up, even the x,y value. The 'first print line' gets printed but the 'second print line' doesn't, so I think the problem is with the 'dist' line).
But when I change the 'dist' line to
dist = np.linalg.norm(np.vstack([V_lst - x, T_lst - y]), axis=0)
Everything works (x,y,data shows up) for a 37x37 matrix but not for a 37x46 matrix (37T, 46V) and I don't know why.
What should I do to make my code work?
Thank you for helping!
Hi guys so I found out how to solve this in case anyone needs it. There were 2 problems:
1/ I was wrong in my way of generating a list of V,T coordinates
2/ distance.cdist requires everything in the form of a 2d array.
So this is the final solution:
def fmt(x, y):
'''Overrides the original matplotlib method to also display z value when moving the cursor
'''
V_lst = self.pre_formating[0] #List of V points
T_lst = self.pre_formating[1] #List of T points
coordinates = [[v,t] for t in T_lst for v in V_lst] # List of coordinates (V,T)
Zflat = datapoint.flatten() #Flatten out the data matrix
# get closest point with known v,t values
dist = distance.cdist([[x,y]],coordinates)
closest_idx = np.argmin(dist)
z = Zflat[closest_idx]
return 'x={x:.5f} y={y:.5f} z={z:.5f}'.format(x=x, y=y, z=z)
I am new to matplotlib animation and am trying to animate a scatter plot where points moving towards the right will turn red gradually while points moving towards the left will turn blue gradually. The code doesn't work perfectly as it doesn't change the color of the points gradually. When I pause the animation and maximize it, the gradual change in color suddenly appears, when I play it, it is again the same. Here is the animation link. The final image should be something like this:
But the animation doesn't show gradual change of colors as you can see in the video.
Here is the code, I'd really appreciate your help. Thanks
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import pandas as pd
class AnimatedScatter(object):
"""An animated scatter plot using matplotlib.animations.FuncAnimation."""
def __init__(self, numpoints=5):
self.numpoints = numpoints
self.stream = self.data_stream()
# Setup the figure and axes...
self.fig, self.ax = plt.subplots()
# Then setup FuncAnimation.
self.ani = animation.FuncAnimation(self.fig, self.update, interval=500,
init_func=self.setup_plot, blit=True,repeat=False)
self.fig.canvas.mpl_connect('button_press_event',self.onClick)
#self.ani.save("animation.mp4")
def setup_plot(self):
"""Initial drawing of the scatter plot."""
t=next(self.stream)
x, y, c = t[:,0],t[:,1],t[:,2]
self.scat = self.ax.scatter(x, y, c=c, s=50, animated=True)
self.ax.axis([-15, 15, -10, 10])
# For FuncAnimation's sake, we need to return the artist we'll be using
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scat,
def data_stream(self):
#f=pd.read_csv("crc_viz.csv")
columns = ['TbyN','CbyS']
#f=f[['TbyN','CbyS']]
index=range(1,self.numpoints+1)
x=10*(np.ones((self.numpoints,1))-2*np.random.random((self.numpoints,1)))
y = 5*(np.ones((self.numpoints,1))-2*np.random.random((self.numpoints,1)))
f=np.column_stack((x,y))
f=pd.DataFrame(f,columns=columns)
print f
f['new_cbys'] = f['CbyS']
f['new_cbys'][f['new_cbys']<0] = -1
f['new_cbys'][f['new_cbys']>0] = 1
f=f[:self.numpoints]
cbys=np.array(list(f['CbyS']))
sign = np.array(list(f['new_cbys']))
x = np.array([0]*self.numpoints)
y = np.array(f['TbyN'])
c = np.array([0.5]*self.numpoints)
t = [(255,0,0) for i in range(self.numpoints)]
data=np.column_stack((x,y,c))
x = data[:, 0]
c = data[:,2]
while True:
#print xy
#print cbys
if not pause:
for i in range(len(x)):
if sign[i]==1:
if x[i]<cbys[i]-0.1:
x[i]+=0.1
c[i]+=0.05
else:
x[i]=cbys[i]
elif sign[i]==-1:
if x[i]>cbys[i]+0.1:
x[i]-=0.1
c[i]-=0.05
else:
x[i]=cbys[i]
print c
#print data
#print c
yield data
def onClick(self,event):
global pause
pause ^=True
def update(self, i):
"""Update the scatter plot."""
data = next(self.stream)
print data[:,2]
# Set x and y data...
self.scat.set_offsets(data[:, :2])
# Set colors..
self.scat.set_array(data[:,2])
return self.scat,
def save(self):
plt.rcParams['animation.ffmpeg_path'] = 'C:\\ffmpeg\\bin\\ffmpeg.exe'
self.mywriter = animation.FFMpegWriter()
self.ani.save("myMovie.mp4",writer=self.mywriter)
self.show()
def show(self):
#mng = plt.get_current_fig_manager()
#mng.window.state('zoomed')
plt.show()
pause = False
if __name__ == '__main__':
a = AnimatedScatter(10)
a.show()
#a.save()
The problem you have is that the scatter plot is redrawn in every iteration, renormalizing the colors to the minimal and maximal value of c. So even at the start there will be a dot coresponding to the minmal and maximal color in the colormap already.
The solution would be to use a color normalization which is absolute from the start. The easiest way to do this is using the vmin and vmax keyword arguments.
ax.scatter(x, y, c=c, vmin=-1.5, vmax=2)
(This means that a value of c=-1.5 is the lowest color in the colormap and c=2 corresponds to the highest.)
Now it may be a bit hard to find the appropriate values, as the values are constantly changing in an infinite loop, so you need to find out appropriate values yourself depending on the use case.
I'm trying to update data in a mayavi 3D plot. Some of the changes to the data don't affect the data shape so the mlab_source.set() method can be used (which updates underlying data and refreshes the display without resetting the camera, regenerating the VTK pipeline, regenerating the underlying data structure, etc. This is the best possible case for animation or quick plot updates.
If the underlying data changes shape, the documentation recommends using the mlab_source.reset() method, which while not recreating the entire pipeline or messing up the current scene's camera, does cause the data structure to be rebuilt, costing some performance overhead. This is causing crashes for me.
The worst way to go is completely deleting the plot source and generating a new one with a new call to mlab.mesh() or whatever function was used to plot the data. This recreates a new VTK pipeline, new data structure, and resets the scene's view (loses current zoom and camera settings, which can make smooth interactivity impossible depending on the application).
I've illustrated a simple example from my application in which a Sphere class can have it's properties manipulated (position, size, and resolution). While changing position and size cause the coordinates to refresh, the data remains the same size. However changing the resolution affects the number of latitude and longitude subdivisions used to represent the sphere, which changes the number of coordinates. When attempting to use the "reset" function, the interpreter crashes completely. I'm pretty sure this is a C level segfault in the VTK code based on similar errors around the web. This thread seems to indicate the core developers dealing with the problem almost 5 years ago, but I can't tell if it was truly solved.
I am on Mayavi 4.3.1 which I got along with the Enthought Tool Suite with Python(x, y). I'm on Windows 7 64-bit with Python 2.7.5. I'm using PySide, but I removed those calls and let mlab work by itself for this example.
Here's my example that shows mlab_source.set() working but crashes on mlab_source.reset(). Any thoughts on why it's crashing? Can others duplicate it? I'm pretty sure there are other ways to update the data through the source (TVTK?) object, but I can't find it in the docs and the dozens of traits related attributes are very difficult to wade through.
Any help is appreciated!
#Numpy Imports
from time import sleep
import numpy as np
from numpy import sin, cos, pi
class Sphere(object):
"""
Class for a sphere
"""
def __init__(self, c=None, r=None, n=None):
#Initial defaults
self._coordinates = None
self._c = np.array([0.0, 0.0, 0.0])
self._r = 1.0
self._n = 20
self._hash = []
self._required_inputs = [('c', list),
('r', float)]
#Assign Inputs
if c is not None:
self.c = c
else:
self.c = self._c
if r is not None:
self.r = r
else:
self.r = self._r
if n is not None:
self.n = n
else:
self.n = self._n
#property
def c(self):
"""
Center point of sphere
- Point is specified as a cartesian coordinate triplet, [x, y, z]
- Coordinates are stored as a numpy array
- Coordinates input as a list will be coerced to a numpy array
"""
return self._c
#c.setter
def c(self, val):
if isinstance(val, list):
val = np.array(val)
self._c = val
#property
def r(self):
"""
Radius of sphere
"""
return self._r
#r.setter
def r(self, val):
if val < 0:
raise ValueError("Sphere radius input must be positive")
self._r = val
#property
def n(self):
"""
Resolution of curvature
- Number of points used to represent circles and arcs
- For a sphere, n is the number of subdivisions per hemisphere (in both latitude and longitude)
"""
return self._n
#n.setter
def n(self, val):
if val < 0:
raise ValueError("Sphere n-value for specifying arc/circle resolution must be positive")
self._n = val
#property
def coordinates(self):
"""
Returns x, y, z coordinate arrays to visualize the shape in 3D
"""
self._lazy_update()
return self._coordinates
def _lazy_update(self):
"""
Only update the coordinates data if necessary
"""
#Get a newly calculated hash based on the sphere's inputs
new_hash = self._get_hash()
#Get the old hash
old_hash = self._hash
#Check if the sphere's state has changed
if new_hash != old_hash:
#Something changed - update the coordinates
self._update_coordinates()
def _get_hash(self):
"""
Get the sphere's inputs as an immutable data structure
"""
return tuple(map(tuple, [self._c, [self._r, self._n]]))
def _update_coordinates(self):
"""
Calculate 3D coordinates to represent the sphere
"""
c, r, n = self._c, self._r, self._n
#Get the angular distance between latitude and longitude lines
dphi, dtheta = pi / n, pi / n
#Generate a latitude and longitude grid
[phi, theta] = np.mgrid[0:pi + dphi*1.0:dphi,
0:2 * pi + dtheta*1.0:dtheta]
#Map the latitude longitude grid into cartesian x, y, z coordinates
x = c[0] + r * cos(phi) * sin(theta)
y = c[1] + r * sin(phi) * sin(theta)
z = c[2] + r * cos(theta)
#Store the coordinates
self._coordinates = x, y, z
#Update the hash to coordinates to these coordinates
self._hash = self._get_hash()
if __name__ == '__main__':
from mayavi import mlab
#Make a sphere
sphere = Sphere()
#Plot the sphere
source = mlab.mesh(*sphere.coordinates, representation='wireframe')
#Get the mlab_source
ms = source.mlab_source
#Increase the sphere's radius by 2
sphere.r *= 2
#New coordinates (with larger radius)
x, y, z = sphere.coordinates
#Currently plotted coordinates
x_old, y_old, z_old = ms.x, ms.y, ms.z
#Verify that new x, y, z are all same shape as old x, y, z
data_is_same_shape = all([i.shape == j.shape for i, j in zip([x_old, y_old, z_old], [x, y, z])])
#Pause to see the old sphere
sleep(2)
#Check if data has changed shape... (shouldn't have)
if data_is_same_shape:
print "Updating same-shaped data"
ms.set(x=x, y=y, z=z)
else:
print "Updating with different shaped data"
ms.reset(x=x, y=y, z=z)
#Increase the arc resolution
sphere.n = 50
#New coordinates (with more points)
x, y, z = sphere.coordinates
#Currently plotted coordinates
x_old, y_old, z_old = ms.x, ms.y, ms.z
#Verify that new x, y, z are all same shape as old x, y, z
data_is_same_shape = all([i.shape == j.shape for i, j in zip([x_old, y_old, z_old], [x, y, z])])
#Pause to see the bigger sphere
sleep(2)
#Check if data has changed shape... (should have this time...)
if data_is_same_shape:
print "Updating same-shaped data"
ms.set(x=x, y=y, z=z)
else:
#This is where the segfault / crash occurs
print "Updating with different shaped data"
ms.reset(x=x, y=y, z=z)
mlab.show()
EDIT:
I just verified that all of these mlab_source tests pass for me which includes testing reset on an MGridSource. This does show some possible workarounds like accessing source.mlab_source.dataset.points ... maybe there's a way to update the data manually?
EDIT 2:
I tried this:
p = np.array([x.flatten(), y.flatten(), z.flatten()]).T
ms.dataset.points = p
ms.dataset.point_data.scalars = np.zeros(x.shape)
ms.dataset.points.modified()
#Regenerate the data structure
ms.reset(x=x, y=y, z=z)
It appears that modifying the TVTK Polydata object directly partly works. It appears that it's updating the points without also auto-fixing the connectivity, which is why I have to also run the mlab_source.reset(). I assume the reset() can work now because the data coming in has the same number of points and the mlab_source handles auto-generating the connectivity data. It still crashes when reducing the number of points, maybe because connectivity data exists for points that don't exist? I'm still very frustrated with this.
EDIT 3:
I've implemented the brute force method of just generating a new surface from mlab.mesh(). To prevent resetting the view I disable rendering and store the camera settings, then restore the camera settings after mlab.mesh() and then re-enable rendering. Seems to work quick enough - still wish underlying data could be updated with reset()
Here's the entire class I use to manage plotting objects (responds to GUI signals after an edit has been made).
class PlottablePrimitive(QtCore.QObject):
def __init__(self, parent=None, shape=None, scene=None, mlab=None):
super(PlottablePrimitive, self).__init__(parent=parent)
self._shape = None
self._scene = None
self._mlab = None
self._source = None
self._color = [0.706, 0.510, 0.196]
self._visible = True
self._opacity = 1.0
self._camera = {'position': None,
'focal_point': None,
'view_angle': None,
'view_up': None,
'clipping_range': None}
if shape is not None:
self._shape = shape
if scene is not None:
self._scene = scene
if mlab is not None:
self._mlab = mlab
#property
def shape(self):
return self._shape
#shape.setter
def shape(self, val):
self._shape = val
#property
def color(self):
return self._color
#color.setter
def color(self, color):
self._color = color
if self._source is not None:
surface = self._source.children[0].children[0].children[0]
surface.actor.mapper.scalar_visibility = False
surface.actor.property.color = tuple(color)
def plot(self):
x, y, z = self._shape.coordinates
self._source = self._mlab.mesh(x, y, z)
def update_plot(self):
ms = self._source.mlab_source
x, y, z = self._shape.coordinates
a, b, c = ms.x, ms.y, ms.z
data_is_same_shape = all([i.shape == j.shape for i, j in zip([a, b, c], [x, y, z])])
if data_is_same_shape:
print "Same Data Shape... updating"
#Update the data in-place
ms.set(x=x, y=y, z=z)
else:
print "New Data Shape... resetting data"
method = 'new_source'
if method == 'tvtk':
#Modify TVTK directly
p = np.array([x.flatten(), y.flatten(), z.flatten()]).T
ms.dataset.points = p
ms.dataset.point_data.scalars = np.zeros(x.shape)
ms.dataset.points.modified()
#Regenerate the data structure
ms.reset(x=x, y=y, z=z)
elif method == 'reset':
#Regenerate the data structure
ms.reset(x=x, y=y, z=z)
elif method == 'new_source':
scene = self._scene
#Save camera settings
self._save_camera()
#Disable rendering
self._scene.disable_render = True
#Delete old plot
self.delete_plot()
#Generate new mesh
self._source = self._mlab.mesh(x, y, z)
#Reset camera
self._restore_camera()
self._scene.disable_render = False
def _save_camera(self):
scene = self._scene
#Save camera settings
for setting in self._camera.keys():
self._camera[setting] = getattr(scene.camera, setting)
def _restore_camera(self):
scene = self._scene
#Save camera settings
for setting in self._camera.keys():
if self._camera[setting] is not None:
setattr(scene.camera, setting, self._camera[setting])
def delete_plot(self):
#Remove
if self._source is not None:
self._source.remove()
self._source = None