matplotlib legend at the bottom of the figure with twinx - python-2.7

I am trying to draw a legend under two plots (created using twinx). I want the legend to draw at the bottom center aligned with 4 columns. So far no success. How can I make the legend with respect to the entire plot, not just with a single axis object. Any help ?
import matplotlib.pyplot as plt;
import numpy as np;
from matplotlib import rc;
filename = 'ml.pdf';
fig, ax1 = plt.subplots(frameon=False);
rc('mathtext', default='regular');
rc('lines',lw=2.6);
rc('lines',mew=2.4);
rc('text', usetex=True);
x = np.array([5,10,20,50]);
dp_g = np.array([23.43, 29.93, 36.50, 46.07]);
mr_g = np.array([25.33, 31.83, 38.39, 47.75]);
md_g = np.array([24.94, 31.33, 37.80, 47.10]);
sb_g = np.array([27.01, 34.86, 43.18, 54.35]);
lns1 = ax1.plot(x,dp_g,'bs:', label="MD\n($\lambda$=.8)");
lns2 = ax1.plot(x,mr_g,'bs--',label="MR\n($\lambda$=.1)");
lns3 = ax1.plot(x,md_g,'bs-.',label='MD');
lns4 = ax1.plot(x,sb_g,'bs-',label="SB\n($\gamma$=.1)");
ax1.set_ylabel('CG ($\times$ 100)',color='b',size=14);
ax1.set_ylim([20,57]);
ax1.set_xlim([4,51]);
ax1.set_xticks(x);
ax1.tick_params(axis='y', which=u'both', length=0, labelsize=14, colors='b');
ax1.tick_params(axis='x', which=u'both', length=0, labelsize=14);
ax2 = ax1.twinx();
dp_d = np.array([18.84, 19.55, 20.09, 20.08]);
mr_d = np.array([19.42, 19.73, 20.06, 20.04]);
md_d = np.array([19.02, 19.75, 20.28, 20.29]);
sb_d = np.array([20.81, 19.77, 19.20, 19.03]);
lns6 = ax2.plot(x,dp_d,'rv:',label="MD\n($\lambda$=.8)");
lns7 = ax2.plot(x,mr_d,'rv--',label="MR\n($\lambda$=.1)");
lns8 = ax2.plot(x,md_d,'rv-.',label='MD');
lns9 = ax2.plot(x,sb_d,'rv-',label="SB\n($\gamma$=.1)");
lns = lns1 + lns2 + lns3 + lns4 + lns6 + lns7 + lns8 + lns9;
labs = [l.get_label() for l in lns];
ax2.set_ylabel('LD ($\times$ 100)',color='r',size=14);
ax2.set_ylim([15,23]);
ax2.set_xlim([4,51]);
ax2.set_xticks(x);
ax2.tick_params(axis='y', which=u'both', length=0, labelsize=14, colors='r');
ax2.tick_params(axis='x', which=u'both', length=0, labelsize=14);
ax1.set_xlabel('\# of items',size=14);
borderaxespad=2.5, ncol = 1, fontsize='11.5');
lgd = ax1.legend(lns, labs, bbox_to_anchor=(1.01,1.0), loc='lower center', borderaxespad=2.5, ncol = 4, fontsize='14');
fig.savefig(filename,format='pdf',transparent=True, bbox_extra_artists=(lgd,), bbox_inches='tight');

Apart from the broken line borderaxespad=2.5, ncol = 1, fontsize='11.5');, I believe what you want to do is to just remove the bbox_to_anchor=(1.01, 1.0) from the legend-definition. Doing so will put the legend at the bottom center of the plot (however the legend is very wide so it will span the entire width of the plot).

Related

How can I extract object segmentations from the coco dataset?

From the MSCOCO dataset segmentation annotations, how can I extract just the segmented objects themselves? For example, given an image of a person standing with a house in the background, how can I extract just the person themselves?
If your data is already in FiftyOne, then you can write a simple function using OpenCV and Numpy to crop the segmentations in your FiftyOne labels. It could look something like this:
import os
import cv2
import numpy as np
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
def extract_classwise_instances(samples, output_dir, label_field, ext=".png"):
print("Extracted object instances...")
for sample in samples.iter_samples(progress=True):
img = cv2.imread(sample.filepath)
img_h,img_w,c = img.shape
for det in sample[label_field].detections:
mask = det.mask
[x,y,w,h] = det.bounding_box
x = int(x * img_w)
y = int(y * img_h)
h, w = mask.shape
mask_img = img[y:y+h, x:x+w, :]
alpha = mask.astype(np.uint8)*255
alpha = np.expand_dims(alpha, 2)
mask_img = np.concatenate((mask_img, alpha), axis=2)
label = det.label
label_dir = os.path.join(output_dir, label)
if not os.path.exists(label_dir):
os.mkdir(label_dir)
output_filepath = os.path.join(label_dir, det.id+ext)
cv2.imwrite(output_filepath, mask_img)
label_field = "ground_truth"
classes = ["person"]
dataset = foz.load_zoo_dataset(
"coco-2017",
split="validation",
label_types=["segmentations"],
classes=classes,
max_samples=20,
label_field=label_field,
dataset_name=fo.get_default_dataset_name(),
)
view = dataset.filter_labels(label_field, F("label").is_in(classes))
output_dir = "/tmp/coco-segmentations"
os.makedirs(output_dir, exist_ok=True)
extract_classwise_instances(view, output_dir, label_field)

How to fix the reprojection from EASE-2 grid product SMAP to geographic coordinates?

I'm have been working with SMAP data satellite, specially for moisture and soil proporties.
I follow the idea of use GDAL solve everything, and make something similar to this published in Link to first approach to download SMAP data
Modifing the code and testing:
import os
import h5py
import numpy as np
from osgeo import gdal, gdal_array, osr
# the file to download
https://n5eil01u.ecs.nsidc.org/SMAP/SPL4SMAU.003/2017.08.01/SMAP_L4_SM_aup_20170801T030000_Vv3030_001.h5
path = "/path/to/data"
h5File = h5py.File(path + "SMAP_L4_SM_aup_20170801T030000_Vv3030_001.h5", 'r')
data = h5File.get('Analysis_Data/sm_rootzone_analysis')
lat = h5File.get("cell_lat")
lon = h5File.get("cell_lon")
np_data = np.array(data)
np_lat = np.array(lat)
np_lon = np.array(lon)
num_cols = float(np_data.shape[1])
num_rows = float(np_data.shape[0])
xmin = np_lon.min()
xmax = np_lon.max()
ymin = np_lat.min()
ymax = np_lat.max()
xres = (xmax - xmin) / num_cols
yres = (ymax - ymin) / num_rows
nrows, ncols = np_data.shape
xres = (xmax - xmin) / float(ncols)
yres = (ymax - ymin) / float(nrows)
geotransform = (xmin, xres, 0, ymax, 0, -xres)
dataFileOutput = path + "sm_rootzone_analysis.tif"
output_raster = gdal.GetDriverByName('GTiff').Create(dataFileOutput, ncols, nrows, 1, gdal.GDT_Float32) # Open the file
output_raster.SetGeoTransform(geotransform)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
output_raster.SetProjection(srs.ExportToWkt())
output_raster.GetRasterBand(1).WriteArray(np_data) # Writes my array to the raster
del output_raster
So, using this approach, the result is a global map with many problems of projections, as for example the image below, produced by the python code above.
To compare with a correct data, the same image was extract from h5, using HEG nasa software.
If the data is really in the EASE2 Global grid, you shouldn't assign EPSG:4326 as a the coordinate system with lat/lon degrees in the geotransform.
If you convert the lat/lon coordinates to the EASE2 Grid at 9km, your geotransform should be something like:
geotransform = (-17367530.44516138, 9000, 0, 7314540.79258289, 0, -9000.0)
and the srs:
srs.ImportFromEPSG(6933)

Fitting an Image to a ROI

I have an ROI and an image. I have to fill the ROI with the image that I have. The image should scale according to the ROI shape and size and should fill the entire ROI without repeating the image. How can I achieve this using opencv? Is there any method in opencv to achieve this?
Suppose this white section is my ROI and
this is my input image
Is there any solution using imageMagick???
Finding optimal fit of one shape inside another is not trivial, but if you can settle for suboptimal result you can do the following:
import cv2
import numpy as np
from matplotlib import pyplot as plt
bg_contours, bg_hierarchy = cv2.findContours(bg_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
bg_contour = bg_contours[0]
bg_ellipse = cv2.fitEllipse(bg_contour)
p_contours, p_hierarchy = cv2.findContours(fruit_alpha, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
pear_hull = cv2.convexHull(p_contours[0])
pear_ellipse = cv2.fitEllipse(pear_hull)
min_ratio = min(bg_ellipse[1][0] / pear_ellipse[1][0], bg_ellipse[1][1] / pear_ellipse[1][1])
x_shift = bg_ellipse[0][0] - pear_ellipse[0][0] * min_ratio
y_shift = bg_ellipse[0][1] - pear_ellipse[0][1] * min_ratio
(Heuristic) Resize the fruit contour, start with an initial guess based on the ellipses, refine using the contour (this can be improved but it is a non trivial optimization problem, you can look more here):
r_contour = np.array([[[int(j) for j in i[0]]] for i in min_ratio * p_contours[max_c_ix]])
min_dist, bad_pt = GetMinDist(outer_contour=bg_contour, inner_contour=r_contour, offset=(int(x_shift), int(y_shift)))
mask_size = max(bg_ellipse[1][0], bg_ellipse[1][1])
scale = min_ratio * (mask_size + min_dist) / mask_size
r_contour = np.array([[[int(j) for j in i[0]]] for i in scale * p_contours[max_c_ix]])
Combine the images using the alpha channel:
combined = CombineImages(bg, fruit_rgb, fruit_alpha, scale, (int(x_shift), int(y_shift)))
Utility functions:
def GetMinDist(outer_contour, inner_contour, offset):
min_dist = 10000
bad_pt = (0,0)
for i_pt in inner_contour:
#pt = (float(i_pt[0][0]), float(i_pt[0][1]))
pt = (i_pt[0][0] + int(offset[0]), i_pt[0][1] + int(offset[1]))
dst = cv2.pointPolygonTest(outer_contour, pt, True)
if dst < min_dist:
min_dist = dst
bad_pt = pt
return min_dist, bad_pt
def CombineImages(mask_img, fruit_img, fruit_alpha, scale, offset):
mask_height, mask_width, mask_dim = mask_img.shape
combined_img = np.copy(mask_img)
resized_fruit = np.copy(mask_img)
resized_fruit[:] = 0
resized_alpha = np.zeros( (mask_height, mask_width), fruit_alpha.dtype)
f_height, f_width, f_dim = fruit_img.shape
r_fruit = cv2.resize(fruit_img, (int(f_width*scale), int(f_height*scale)) )
r_alpha = cv2.resize(fruit_alpha, (int(f_width*scale), int(f_height*scale)) )
height, width, channels = r_fruit.shape
roi_x_from = offset[0]
roi_x_to = offset[0] + width
roi_y_from = offset[1]
roi_y_to = offset[1] + height
resized_fruit[roi_y_from:roi_y_to, roi_x_from:roi_x_to, :] = r_fruit
resized_alpha[roi_y_from:roi_y_to, roi_x_from:roi_x_to] = r_alpha
for y in range(0,mask_height):
for x in range(0, mask_width):
if resized_alpha[y,x] > 0:
combined_img[y,x,:] = resized_fruit[y,x,:]
return combined_img
I Hope that helps.
(I omitted parts of the code that do not contribute to the understanding of the flow)

Update textbox with text from browsed file. python

I am currently using Python 2.7 and Tkinter. I have a button that browses my directory and takes the file's directory location and saves it to filename. I would like this to change the value of inputBox to the value of filename automatically when the file is chosen.
import os
from Tkinter import *
import tkFileDialog
root = Tk()
root.title("Doc Word Frequency")
root.geometry("600x300")
def close_window ():
root.destroy()
def browse_directory():
filename = tkFileDialog.askopenfilename()
print(filename)
#Change value of inputBox
inputBox = Entry(root, width = 50)
inputBox.grid(row = 0, column = 0, padx = 20, pady = 20)
inputBox.insert(END, '"Upload Document File"')
inputBox.config(state = DISABLED)
Button(root, width = 9, text = 'Browse', command = browse_directory).grid(row = 0, column = 1, sticky = W, padx = 4)
Button(root, width = 9, text = 'Upload').grid(row = 0, column = 2, sticky = W, padx = 4)
Button(root, width = 9, text = 'Quit', command = close_window).grid(row = 0, column = 3, sticky = W, padx = 4)
mainloop( )
PS. I am quite new to Python and any constructive criticism would be appreciated.
You can insert text into an entry widget with the insert method.
def browse_directory():
filename = tkFileDialog.askopenfilename()
print(filename)
inputBox.configure(state=NORMAL)
inputBox.delete(0, "end")
inputBox.insert(0, filename)
inputBox.configure(state=DISABLED)

Plotting points on Mapnik

I followed this tutorial in the mapnik github wiki to make a world map: https://github.com/mapnik/mapnik/wiki/GettingStartedInPython
I modified this example, and have now embedded the code into a Pyside Qt Widget. My question now is, how does one plot points on this map using x and y coordinates, or latitude and longitude points?
Here is the code I'm using to generate the map and to embed it in the widget:
import mapnik
m = mapnik.Map(1200,600)
m.background = mapnik.Color('steelblue')
s = mapnik.Style()
r = mapnik.Rule()
polygon_symbolizer = mapnik.PolygonSymbolizer(mapnik.Color('#f2eff9'))
r.symbols.append(polygon_symbolizer)
line_symbolizer = mapnik.LineSymbolizer(mapnik.Color('rgb(50%,50%,50%)'),0.1)
r.symbols.append(line_symbolizer)
s.rules.append(r)
m.append_style('My Style',s)
ds = mapnik.Shapefile(file='/home/lee/shapefiles/ne_110m_admin_0_countries.shp')
layer = mapnik.Layer('world')
layer.datasource = ds
layer.styles.append('My Style')
m.layers.append(layer)
m.zoom_all()
im = mapnik.Image(1200,600)
mapnik.render(m, im)
qim = QImage()
qim.loadFromData(QByteArray(im.tostring('png')))
label = QLabel(self)
label.setPixmap(QPixmap.fromImage(qim))
self.layout.addWidget(label)
Usually, you would connect your map to a datasource such as a PostGIS or SQLite database and let mapnik populate the points from said database, similar to something like this. Either in a python script or generated from xml.
However, in answer to your question, you could plot Lat/Lon points by creating a new Feature from a WKT string and adding that feature to a mapnik.MemoryDatasource().
Below is a simple snippet from a script using the mapfile found here
First we create our style and add it to our map:
s = mapnik.Style() # style object to hold rules
r = mapnik.Rule() # rule object to hold symbolizers
point_sym = mapnik.PointSymbolizer()
point_sym.filename = './symbols/airport.p.16.png'
r.symbols.append(point_sym) # add the symbolizer to the rule object
s.rules.append(r)
m.append_style('airport point', s)
Now we create our data source and add a Point geometry in WKT format:
ds = mapnik.MemoryDatasource()
f = mapnik.Feature(mapnik.Context(), 1)
f.add_geometries_from_wkt("POINT(-92.289595 34.746481)")
ds.add_feature(f)
Now we must create a new layer, add our style that we created, and add the layer to our map:
player = mapnik.Layer('airport_layer')
#since our map is mercator but you wanted to add lat lon points
#we must make sure our layer projection is set to lat lon
player.srs = longlat.params()
player.datasource = ds
player.styles.append('airport point')
m.layers.append(player)
m.zoom_all()
You can look at the entire script here.
If you need to get a geographic coordinate(ie:lat/lon) from the pixel coordinate, you probably need to add your converter functions.
The Google Maps JS code is as follow could perhaps help :
https://developers.google.com/maps/documentation/javascript/examples/map-coordinates
var TILE_SIZE = 256;
function bound(value, opt_min, opt_max) {
if (opt_min != null) value = Math.max(value, opt_min);
if (opt_max != null) value = Math.min(value, opt_max);
return value;
}
function degreesToRadians(deg) {
return deg * (Math.PI / 180);
}
function radiansToDegrees(rad) {
return rad / (Math.PI / 180);
}
/** #constructor */
function MercatorProjection() {
this.pixelOrigin_ = new google.maps.Point(TILE_SIZE / 2,
TILE_SIZE / 2);
this.pixelsPerLonDegree_ = TILE_SIZE / 360;
this.pixelsPerLonRadian_ = TILE_SIZE / (2 * Math.PI);
}
MercatorProjection.prototype.fromLatLngToPoint = function(latLng,
opt_point) {
var me = this;
var point = opt_point || new google.maps.Point(0, 0);
var origin = me.pixelOrigin_;
point.x = origin.x + latLng.lng() * me.pixelsPerLonDegree_;
// Truncating to 0.9999 effectively limits latitude to 89.189. This is
// about a third of a tile past the edge of the world tile.
var siny = bound(Math.sin(degreesToRadians(latLng.lat())), -0.9999,
0.9999);
point.y = origin.y + 0.5 * Math.log((1 + siny) / (1 - siny)) *
-me.pixelsPerLonRadian_;
return point;
};
MercatorProjection.prototype.fromPointToLatLng = function(point) {
var me = this;
var origin = me.pixelOrigin_;
var lng = (point.x - origin.x) / me.pixelsPerLonDegree_;
var latRadians = (point.y - origin.y) / -me.pixelsPerLonRadian_;
var lat = radiansToDegrees(2 * Math.atan(Math.exp(latRadians)) -
Math.PI / 2);
return new google.maps.LatLng(lat, lng);
};