Cannot override matplotlib format_coord - python-2.7

I'm creating a contour plot with a list of V values as the x-axis and a list of T values as the y-axis (the V and T values are float numbers with 2 digits after the decimal point but all sorted of course). I created a data matrix and populated it with the data correlating with the V-T coordinates.
If it helps anything, this is what the contour plot would look like:
I'm trying to override the format_coord method to also display the data along with the x-y (V-T) coordinates when the cursor moves
I can't post all my code here, but here are the relevant parts:
fig= Figure()
a = fig.add_subplot(111)
contour_plot = a.contourf(self.pre_formating[0],self.pre_formating[1],datapoint) #Plot the contour on the axes
def fmt(x, y):
'''Overrides the original matplotlib method to also display z value when moving the cursor
'''
V_lst = self.pre_formating[0] #List of V points
T_lst = self.pre_formating[1] #List of T points
Zflat = datapoint.flatten() #Flatten out the data matrix
print 'first print line'
# get closest point with known v,t values
dist = distance.cdist([x,y],np.stack([V_lst, T_lst],axis=-1))
print 'second print line'
closest_idx = np.argmin(dist)
z = Zflat[closest_idx]
return 'x={x:.5f} y={y:.5f} z={z:.5f}'.format(x=x, y=y, z=z)
a.format_coord = fmt
The above code does not work (when I move the cursor nothing shows up, even the x,y value. The 'first print line' gets printed but the 'second print line' doesn't, so I think the problem is with the 'dist' line).
But when I change the 'dist' line to
dist = np.linalg.norm(np.vstack([V_lst - x, T_lst - y]), axis=0)
Everything works (x,y,data shows up) for a 37x37 matrix but not for a 37x46 matrix (37T, 46V) and I don't know why.
What should I do to make my code work?
Thank you for helping!

Hi guys so I found out how to solve this in case anyone needs it. There were 2 problems:
1/ I was wrong in my way of generating a list of V,T coordinates
2/ distance.cdist requires everything in the form of a 2d array.
So this is the final solution:
def fmt(x, y):
'''Overrides the original matplotlib method to also display z value when moving the cursor
'''
V_lst = self.pre_formating[0] #List of V points
T_lst = self.pre_formating[1] #List of T points
coordinates = [[v,t] for t in T_lst for v in V_lst] # List of coordinates (V,T)
Zflat = datapoint.flatten() #Flatten out the data matrix
# get closest point with known v,t values
dist = distance.cdist([[x,y]],coordinates)
closest_idx = np.argmin(dist)
z = Zflat[closest_idx]
return 'x={x:.5f} y={y:.5f} z={z:.5f}'.format(x=x, y=y, z=z)

Related

Coordinates translation using list comprehension in python

I'm trying to translate coordinates to center them over Brussels. Due to the specific structure of my variable I don't know how to iterate over them. Please can someone explain me how I could proceed while keeping the structure of the coordinates because I need them to be in such structure to plot them.
# Load JSON file
with open("districts.json", "r") as f:
data = json.load(f)
# Create a transformation object
in_proj = pyproj.Proj(init='EPSG:3857')
out_proj = pyproj.Proj(proj='longlat', datum='WGS84')
# Transform the coordinates
features = data["features"]
for feature in features:
coords = feature["geometry"]["coordinates"][0]
coords = [pyproj.transform(in_proj, out_proj, coord[0], coord[1]) for coord in coords]
# Adjust the coordinates by adding a translation
brussels_center = [50.85045, 4.34878]
coords = [[(x[0]+brussels_center[0], x[1]+brussels_center[1]) for x in coord] for coord in coords]
feature["geometry"]["coordinates"] = [coords]
# Plot the polyggon on a map
m = folium.Map()
folium.GeoJson(data).add_to(m)
m
This is how the coordinates look like:
[(1.326960232616557, 1.5057587136636252),
(1.3270959770391675, 1.5058519176095608),
...,
(1.3264792386807474, 1.5054145891073423),
(1.326960232616557, 1.5057587136636252)]
I'm trying to translate the coordinates in such way that the structure does not change but to center them around Brussels. I don't understand why my iteration is wrong.

Image transformation : converting an line to an arc

I am looking for an image transformation which can convert a line into an arc. My ultimate goal is to generate curved text of "Devanagari script".
Please be kind, I have looked and search on google and not able to find anything.
def transform_desire(image,curveIntensity):
'''
will convert image to arc form.
im1 : image.
curveIntensity : How much curved text you desired.
'''
im1 = image
ratio = 0.0001*curveIntensity
## calculate the desired width of the image.
height,width,channel = im1.shape
x = np.linspace(0,width,width).astype(int)
y = (ratio * ((x-(width/2))**2)).astype(int)
## corrosponding to an x every point will get shifted by y amount.
## time to shift.
## create canvas for new image.
adder = 0
if ratio >= 0:
adder = max(y)
else:
adder = (-1)*min(y)
retImage = (np.ones((height+adder,width, channel))*0).astype(np.uint8)
if ratio >= 0:
adder = 0
for xs in range(width):
ys = y[xs]
for t in range(height):
retImage[t+ys+adder,xs,:] = im1[t,xs,:]
return retImage
This worked for me. Please help yourself out, if you need this.

Different colors for scatter plots based on origin of data

I have a LIST called 'samples', I am loading several images into this LIST from 2 different folders, let's say Folder1 and Folder2. Then I convert this list to a DataFrame and plot them in a 2D scatter plot. I want the scatter plot to show all contents from Folder1 to be Red color and all contents from Folder2 to be in blue color. How can I accomplish this. My code is below:
samples = []
Folder1 = glob.iglob('/home/..../Folder1/*.png')
Folder2 = glob.iglob('/home/..../Folder2/*.png')
for fname in Folder1:
img = misc.imread(fname)
samples.append((img[::2, ::2] / 255.0).reshape(-1))
for fname in Folder2:
img = misc.imread(fname)
samples.append((img[::2, ::2] / 255.0).reshape(-1))
samples = pd.DataFrame(samples)
def do_ISO(df):
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=6, n_components=3)
iso.fit(df)
A = iso.transform(df)
return A
def Plot2D(T, title, x, y):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
x_size = (max(T[:,x]) - min(T[:,x])) * 0.08
y_size = (max(T[:,y]) - min(T[:,y])) * 0.08
ax.scatter(T[:,x],T[:,y], marker='.',alpha=0.7)
Plot2D(do_ISO(samples), 'ISO_Chart', 0, 1)
plt.show()
It's pretty difficult to say without seeing the arrays you are working with. You are actually plotting the result of your do_ISO() function, which creates an array using sklearn.manifold.Isomap.transform().
Does this function preserves the ordering of your elements in you array?
If so, things could be fairly easy. As you are first filling all the images from Folder1 and then from Folder2, you could simply count the number of items in Folder1, and split your array in 2 based on that number (eg. nbFilesFolder1). then you do 2 calls to scatter:
ax.scatter(T[:nbFilesFolder1,x],T[:nbFilesFolder1,y], marker='.',alpha=0.7, c='red')
ax.scatter(T[nbFilesFolder1:,x],T[nbFilesFolder1:,y], marker='.',alpha=0.7, c='blue')

Matplotlib animation scatter plot python. Gradually change color of points

I am new to matplotlib animation and am trying to animate a scatter plot where points moving towards the right will turn red gradually while points moving towards the left will turn blue gradually. The code doesn't work perfectly as it doesn't change the color of the points gradually. When I pause the animation and maximize it, the gradual change in color suddenly appears, when I play it, it is again the same. Here is the animation link. The final image should be something like this:
But the animation doesn't show gradual change of colors as you can see in the video.
Here is the code, I'd really appreciate your help. Thanks
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import pandas as pd
class AnimatedScatter(object):
"""An animated scatter plot using matplotlib.animations.FuncAnimation."""
def __init__(self, numpoints=5):
self.numpoints = numpoints
self.stream = self.data_stream()
# Setup the figure and axes...
self.fig, self.ax = plt.subplots()
# Then setup FuncAnimation.
self.ani = animation.FuncAnimation(self.fig, self.update, interval=500,
init_func=self.setup_plot, blit=True,repeat=False)
self.fig.canvas.mpl_connect('button_press_event',self.onClick)
#self.ani.save("animation.mp4")
def setup_plot(self):
"""Initial drawing of the scatter plot."""
t=next(self.stream)
x, y, c = t[:,0],t[:,1],t[:,2]
self.scat = self.ax.scatter(x, y, c=c, s=50, animated=True)
self.ax.axis([-15, 15, -10, 10])
# For FuncAnimation's sake, we need to return the artist we'll be using
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scat,
def data_stream(self):
#f=pd.read_csv("crc_viz.csv")
columns = ['TbyN','CbyS']
#f=f[['TbyN','CbyS']]
index=range(1,self.numpoints+1)
x=10*(np.ones((self.numpoints,1))-2*np.random.random((self.numpoints,1)))
y = 5*(np.ones((self.numpoints,1))-2*np.random.random((self.numpoints,1)))
f=np.column_stack((x,y))
f=pd.DataFrame(f,columns=columns)
print f
f['new_cbys'] = f['CbyS']
f['new_cbys'][f['new_cbys']<0] = -1
f['new_cbys'][f['new_cbys']>0] = 1
f=f[:self.numpoints]
cbys=np.array(list(f['CbyS']))
sign = np.array(list(f['new_cbys']))
x = np.array([0]*self.numpoints)
y = np.array(f['TbyN'])
c = np.array([0.5]*self.numpoints)
t = [(255,0,0) for i in range(self.numpoints)]
data=np.column_stack((x,y,c))
x = data[:, 0]
c = data[:,2]
while True:
#print xy
#print cbys
if not pause:
for i in range(len(x)):
if sign[i]==1:
if x[i]<cbys[i]-0.1:
x[i]+=0.1
c[i]+=0.05
else:
x[i]=cbys[i]
elif sign[i]==-1:
if x[i]>cbys[i]+0.1:
x[i]-=0.1
c[i]-=0.05
else:
x[i]=cbys[i]
print c
#print data
#print c
yield data
def onClick(self,event):
global pause
pause ^=True
def update(self, i):
"""Update the scatter plot."""
data = next(self.stream)
print data[:,2]
# Set x and y data...
self.scat.set_offsets(data[:, :2])
# Set colors..
self.scat.set_array(data[:,2])
return self.scat,
def save(self):
plt.rcParams['animation.ffmpeg_path'] = 'C:\\ffmpeg\\bin\\ffmpeg.exe'
self.mywriter = animation.FFMpegWriter()
self.ani.save("myMovie.mp4",writer=self.mywriter)
self.show()
def show(self):
#mng = plt.get_current_fig_manager()
#mng.window.state('zoomed')
plt.show()
pause = False
if __name__ == '__main__':
a = AnimatedScatter(10)
a.show()
#a.save()
The problem you have is that the scatter plot is redrawn in every iteration, renormalizing the colors to the minimal and maximal value of c. So even at the start there will be a dot coresponding to the minmal and maximal color in the colormap already.
The solution would be to use a color normalization which is absolute from the start. The easiest way to do this is using the vmin and vmax keyword arguments.
ax.scatter(x, y, c=c, vmin=-1.5, vmax=2)
(This means that a value of c=-1.5 is the lowest color in the colormap and c=2 corresponds to the highest.)
Now it may be a bit hard to find the appropriate values, as the values are constantly changing in an infinite loop, so you need to find out appropriate values yourself depending on the use case.

Python Enthought Mayavi crashes on data update using mlab_source.reset

I'm trying to update data in a mayavi 3D plot. Some of the changes to the data don't affect the data shape so the mlab_source.set() method can be used (which updates underlying data and refreshes the display without resetting the camera, regenerating the VTK pipeline, regenerating the underlying data structure, etc. This is the best possible case for animation or quick plot updates.
If the underlying data changes shape, the documentation recommends using the mlab_source.reset() method, which while not recreating the entire pipeline or messing up the current scene's camera, does cause the data structure to be rebuilt, costing some performance overhead. This is causing crashes for me.
The worst way to go is completely deleting the plot source and generating a new one with a new call to mlab.mesh() or whatever function was used to plot the data. This recreates a new VTK pipeline, new data structure, and resets the scene's view (loses current zoom and camera settings, which can make smooth interactivity impossible depending on the application).
I've illustrated a simple example from my application in which a Sphere class can have it's properties manipulated (position, size, and resolution). While changing position and size cause the coordinates to refresh, the data remains the same size. However changing the resolution affects the number of latitude and longitude subdivisions used to represent the sphere, which changes the number of coordinates. When attempting to use the "reset" function, the interpreter crashes completely. I'm pretty sure this is a C level segfault in the VTK code based on similar errors around the web. This thread seems to indicate the core developers dealing with the problem almost 5 years ago, but I can't tell if it was truly solved.
I am on Mayavi 4.3.1 which I got along with the Enthought Tool Suite with Python(x, y). I'm on Windows 7 64-bit with Python 2.7.5. I'm using PySide, but I removed those calls and let mlab work by itself for this example.
Here's my example that shows mlab_source.set() working but crashes on mlab_source.reset(). Any thoughts on why it's crashing? Can others duplicate it? I'm pretty sure there are other ways to update the data through the source (TVTK?) object, but I can't find it in the docs and the dozens of traits related attributes are very difficult to wade through.
Any help is appreciated!
#Numpy Imports
from time import sleep
import numpy as np
from numpy import sin, cos, pi
class Sphere(object):
"""
Class for a sphere
"""
def __init__(self, c=None, r=None, n=None):
#Initial defaults
self._coordinates = None
self._c = np.array([0.0, 0.0, 0.0])
self._r = 1.0
self._n = 20
self._hash = []
self._required_inputs = [('c', list),
('r', float)]
#Assign Inputs
if c is not None:
self.c = c
else:
self.c = self._c
if r is not None:
self.r = r
else:
self.r = self._r
if n is not None:
self.n = n
else:
self.n = self._n
#property
def c(self):
"""
Center point of sphere
- Point is specified as a cartesian coordinate triplet, [x, y, z]
- Coordinates are stored as a numpy array
- Coordinates input as a list will be coerced to a numpy array
"""
return self._c
#c.setter
def c(self, val):
if isinstance(val, list):
val = np.array(val)
self._c = val
#property
def r(self):
"""
Radius of sphere
"""
return self._r
#r.setter
def r(self, val):
if val < 0:
raise ValueError("Sphere radius input must be positive")
self._r = val
#property
def n(self):
"""
Resolution of curvature
- Number of points used to represent circles and arcs
- For a sphere, n is the number of subdivisions per hemisphere (in both latitude and longitude)
"""
return self._n
#n.setter
def n(self, val):
if val < 0:
raise ValueError("Sphere n-value for specifying arc/circle resolution must be positive")
self._n = val
#property
def coordinates(self):
"""
Returns x, y, z coordinate arrays to visualize the shape in 3D
"""
self._lazy_update()
return self._coordinates
def _lazy_update(self):
"""
Only update the coordinates data if necessary
"""
#Get a newly calculated hash based on the sphere's inputs
new_hash = self._get_hash()
#Get the old hash
old_hash = self._hash
#Check if the sphere's state has changed
if new_hash != old_hash:
#Something changed - update the coordinates
self._update_coordinates()
def _get_hash(self):
"""
Get the sphere's inputs as an immutable data structure
"""
return tuple(map(tuple, [self._c, [self._r, self._n]]))
def _update_coordinates(self):
"""
Calculate 3D coordinates to represent the sphere
"""
c, r, n = self._c, self._r, self._n
#Get the angular distance between latitude and longitude lines
dphi, dtheta = pi / n, pi / n
#Generate a latitude and longitude grid
[phi, theta] = np.mgrid[0:pi + dphi*1.0:dphi,
0:2 * pi + dtheta*1.0:dtheta]
#Map the latitude longitude grid into cartesian x, y, z coordinates
x = c[0] + r * cos(phi) * sin(theta)
y = c[1] + r * sin(phi) * sin(theta)
z = c[2] + r * cos(theta)
#Store the coordinates
self._coordinates = x, y, z
#Update the hash to coordinates to these coordinates
self._hash = self._get_hash()
if __name__ == '__main__':
from mayavi import mlab
#Make a sphere
sphere = Sphere()
#Plot the sphere
source = mlab.mesh(*sphere.coordinates, representation='wireframe')
#Get the mlab_source
ms = source.mlab_source
#Increase the sphere's radius by 2
sphere.r *= 2
#New coordinates (with larger radius)
x, y, z = sphere.coordinates
#Currently plotted coordinates
x_old, y_old, z_old = ms.x, ms.y, ms.z
#Verify that new x, y, z are all same shape as old x, y, z
data_is_same_shape = all([i.shape == j.shape for i, j in zip([x_old, y_old, z_old], [x, y, z])])
#Pause to see the old sphere
sleep(2)
#Check if data has changed shape... (shouldn't have)
if data_is_same_shape:
print "Updating same-shaped data"
ms.set(x=x, y=y, z=z)
else:
print "Updating with different shaped data"
ms.reset(x=x, y=y, z=z)
#Increase the arc resolution
sphere.n = 50
#New coordinates (with more points)
x, y, z = sphere.coordinates
#Currently plotted coordinates
x_old, y_old, z_old = ms.x, ms.y, ms.z
#Verify that new x, y, z are all same shape as old x, y, z
data_is_same_shape = all([i.shape == j.shape for i, j in zip([x_old, y_old, z_old], [x, y, z])])
#Pause to see the bigger sphere
sleep(2)
#Check if data has changed shape... (should have this time...)
if data_is_same_shape:
print "Updating same-shaped data"
ms.set(x=x, y=y, z=z)
else:
#This is where the segfault / crash occurs
print "Updating with different shaped data"
ms.reset(x=x, y=y, z=z)
mlab.show()
EDIT:
I just verified that all of these mlab_source tests pass for me which includes testing reset on an MGridSource. This does show some possible workarounds like accessing source.mlab_source.dataset.points ... maybe there's a way to update the data manually?
EDIT 2:
I tried this:
p = np.array([x.flatten(), y.flatten(), z.flatten()]).T
ms.dataset.points = p
ms.dataset.point_data.scalars = np.zeros(x.shape)
ms.dataset.points.modified()
#Regenerate the data structure
ms.reset(x=x, y=y, z=z)
It appears that modifying the TVTK Polydata object directly partly works. It appears that it's updating the points without also auto-fixing the connectivity, which is why I have to also run the mlab_source.reset(). I assume the reset() can work now because the data coming in has the same number of points and the mlab_source handles auto-generating the connectivity data. It still crashes when reducing the number of points, maybe because connectivity data exists for points that don't exist? I'm still very frustrated with this.
EDIT 3:
I've implemented the brute force method of just generating a new surface from mlab.mesh(). To prevent resetting the view I disable rendering and store the camera settings, then restore the camera settings after mlab.mesh() and then re-enable rendering. Seems to work quick enough - still wish underlying data could be updated with reset()
Here's the entire class I use to manage plotting objects (responds to GUI signals after an edit has been made).
class PlottablePrimitive(QtCore.QObject):
def __init__(self, parent=None, shape=None, scene=None, mlab=None):
super(PlottablePrimitive, self).__init__(parent=parent)
self._shape = None
self._scene = None
self._mlab = None
self._source = None
self._color = [0.706, 0.510, 0.196]
self._visible = True
self._opacity = 1.0
self._camera = {'position': None,
'focal_point': None,
'view_angle': None,
'view_up': None,
'clipping_range': None}
if shape is not None:
self._shape = shape
if scene is not None:
self._scene = scene
if mlab is not None:
self._mlab = mlab
#property
def shape(self):
return self._shape
#shape.setter
def shape(self, val):
self._shape = val
#property
def color(self):
return self._color
#color.setter
def color(self, color):
self._color = color
if self._source is not None:
surface = self._source.children[0].children[0].children[0]
surface.actor.mapper.scalar_visibility = False
surface.actor.property.color = tuple(color)
def plot(self):
x, y, z = self._shape.coordinates
self._source = self._mlab.mesh(x, y, z)
def update_plot(self):
ms = self._source.mlab_source
x, y, z = self._shape.coordinates
a, b, c = ms.x, ms.y, ms.z
data_is_same_shape = all([i.shape == j.shape for i, j in zip([a, b, c], [x, y, z])])
if data_is_same_shape:
print "Same Data Shape... updating"
#Update the data in-place
ms.set(x=x, y=y, z=z)
else:
print "New Data Shape... resetting data"
method = 'new_source'
if method == 'tvtk':
#Modify TVTK directly
p = np.array([x.flatten(), y.flatten(), z.flatten()]).T
ms.dataset.points = p
ms.dataset.point_data.scalars = np.zeros(x.shape)
ms.dataset.points.modified()
#Regenerate the data structure
ms.reset(x=x, y=y, z=z)
elif method == 'reset':
#Regenerate the data structure
ms.reset(x=x, y=y, z=z)
elif method == 'new_source':
scene = self._scene
#Save camera settings
self._save_camera()
#Disable rendering
self._scene.disable_render = True
#Delete old plot
self.delete_plot()
#Generate new mesh
self._source = self._mlab.mesh(x, y, z)
#Reset camera
self._restore_camera()
self._scene.disable_render = False
def _save_camera(self):
scene = self._scene
#Save camera settings
for setting in self._camera.keys():
self._camera[setting] = getattr(scene.camera, setting)
def _restore_camera(self):
scene = self._scene
#Save camera settings
for setting in self._camera.keys():
if self._camera[setting] is not None:
setattr(scene.camera, setting, self._camera[setting])
def delete_plot(self):
#Remove
if self._source is not None:
self._source.remove()
self._source = None