imshow a gray image and a binary image python - python-2.7

I have a grayscale image and a binary image, and I want to plot them side by side using hstack. It looks like there is kind of adjustment that been made yielding to darken the binary. Anybody faced this problem?
Here is my code
O = (self.img >= t) * 1
I = img
both = np.hstack((I, O))
imshow(both, cmap='gray')
show()

This is to demonstrate a somewhat different from your case which I don't know of its data. I suspect that all the values in your array 'O' are zero, thus, the plot came out as a black pane.
import numpy as np
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(8, 4))
# make up some data for demo purposes
raw = np.random.randint(10, size=(6,6))
# apply some logic operatioin to the data
O = (raw >= 5) * 1 # get either 0 or 1 in the array
I = np.random.randint(10, size=(6,6)) # get 0-9 in the array
# plot each image ...
# ... side by side
fig.add_subplot(1, 2, 1) # subplot one
plt.imshow(I, cmap=plt.cm.gray)
fig.add_subplot(1, 2, 2) # subplot two
# my data is OK to use gray colormap (0:black, 1:white)
plt.imshow(O, cmap=plt.cm.gray) # use appropriate colormap here
plt.show()
The resulting image:

The code from the question works fine.
import matplotlib.pyplot as plt
import numpy as np
img = plt.imread("https://i.stack.imgur.com/oos05.png")[88:456,82:326]
t = 0.5
O = (img >= t) * 1
I = img
both = np.hstack((I, O))
plt.imshow(both, cmap='gray')
plt.show()

Related

want to create an empty matrix of unknown dimension and append feature vectors

I want to append HoG feature vectors to an empty matrix of unknown dimension. Is it required to specify the dimension of the matrix in advance? I have tried some code in python but it says all the input arrays must have same dimension.
import matplotlib.pyplot as plt
from skimage.feature import hog
from skimage import data, exposure, img_as_float
from skimage import data
import numpy as np
from scipy import linalg
import cv2
import glob
shape = (16576, 1)
X = np.empty(shape)
print X.shape
hog_image = np.empty(shape)
hog_image_rescaled = np.empty(shape)
for img in glob.glob("/home/madhuri/pythoncode/faces/*.jpg"):
n= cv2.imread(img)
gray = cv2.cvtColor(n, cv2.COLOR_RGB2GRAY)
hog_image = hog(gray, orientations=9, pixels_per_cell=(16, 16),
cells_per_block=(3, 3), visualise=False)
hog_image_rescaled = exposure.rescale_intensity(hog_image,
in_range=(0,10))
X = np.append(X, hog_image_rescaled, axis=1)
print 'X is'
print np.shape(X)
X = [] # use an 'empty' list
# hog_image = np.empty(shape) # no point initializing these variables
# hog_image_rescaled = np.empty(shape) # you just reassign them in the loop
for img in glob.glob("/home/madhuri/pythoncode/faces/*.jpg"):
n= cv2.imread(img)
gray = cv2.cvtColor(n, cv2.COLOR_RGB2GRAY)
hog_image = hog(gray, orientations=9, pixels_per_cell=(16, 16),
cells_per_block=(3, 3), visualise=False)
hog_image_rescaled = exposure.rescale_intensity(hog_image,
in_range=(0,10))
X.append(hog_image_rescaled)
Now X will be a list of rescaled images. Those elements can now be concatenated on which ever dimension is appropriate:
np.concatenate(X, axis=1)
np.stack(X)
# etc
The list model of
alist = []
for ....
alist.append(...)
does not translate well to arrays. np.append is a cover for np.concatenate, and makes a new array, which is more expensive than list append. And defining a good starting 'empty' array for such a loop is tricky. np.empty is not appropriate:
In [977]: np.empty((2,3))
Out[977]:
array([[1.48e-323, 1.24e-322, 1.33e-322],
[1.33e-322, 1.38e-322, 1.38e-322]])
In [978]: np.append(_, np.zeros((2,1)), axis=1)
Out[978]:
array([[1.48e-323, 1.24e-322, 1.33e-322, 0.00e+000],
[1.33e-322, 1.38e-322, 1.38e-322, 0.00e+000]])

Cut a bounding box using numpy meshgrid python

I want to create a bounding box out of the following dimensions using meshgrid but just not able to get the right box.
My parent dimensions are x = 0 to 19541 and y = 0 to 14394. Out of that, I want to cut a box from x' = 4692 to 12720 and y' = 4273 to 10117.
However, I am not getting the right bounds. Could someone please help me here?
from matplotlib.path import Path
xmin, xmax = 4692, 12720
ymin, ymax = 4273, 10117
sar_ver = [(4692, 10117), (12720, 10117), (12658, 4274), (4769, 4273), (4692, 10117)]
x, y = np.meshgrid(np.arange(xmin, xmax + 1), np.arange(ymin, ymax + 1))
shx = x
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
path = Path(sar_ver)
grid = path.contains_points(points)
grid.shape = shx.shape # 5845 X 8029
print grid
UPDATE: This is what I tried and I am close to what I want but not exactly. I want to change the original origin from 0 to the image's surrounding box as shown in expected output.
The updated code that I am using is this
from matplotlib.path import Path
nx, ny = 16886, 10079
sar_ver = [(16886, 1085), (15139, 2122), (14475, 5226), (8419, 5601), (14046, 6876), (14147, 10079), (16816, 3748), (16886, 1085)]
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
path = Path(sar_ver)
grid = path.contains_points(points)
grid.shape = (10079, 16886)
grid = np.multiply(grid,255)
int_grid = grid.astype(np.uint8)
grid_img = Image.fromarray(int_grid)
grid_img.save('grid_image.png') # ACTUAL OUTPUT IMAGE WITH ORIGIN NOT SHIFTED
Input geom:
Expected output is this: Doesn't matter if the image is rotated the other way round but will be a cherry on top if its aligned correctly.
However I am getting right now this so my ACTUAL OUTPUT from the updated code posted is this:
So I want to shift the origin around the box.
BOUNDING BOX PROBLEM DETAILS AFTER GETTING THE MASK: This code comes after the line posted in the second update grid_img.save('grid_image.png') # ACTUAL OUTPUT IMAGE WITH ORIGIN NOT SHIFTED
Here im is the matrix of the actual image. What should be the x-y min, max of im to have the same shape as mask and multiply both of them to get pixel values and the rest cancelled out with 0s.
img_x = 19541 # 0 - 19541
img_y = 14394 # 0 - 14394
im = np.fromfile(binary_file_path, dtype='>f4')
im = np.reshape(im.astype(np.float32), (img_x, img_y))
im = im[:10079, :16886]
bb_list = np.multiply(grid, im)
# slice and dice
slice_rows = np.any(bb_list, axis=1)
slice_cols = np.any(bb_list, axis=0)
ymin, ymax = np.where(slice_rows)[0][[0, -1]]
xmin, xmax = np.where(slice_cols)[0][[0, -1]]
answer = bb_list[ymin:ymax + 1, xmin:xmax + 1]
# convert to unit8
int_ans = answer.astype(np.uint8)
fin_img = Image.fromarray(int_ans)
fin_img.save('test_this.jpeg')
My GOAL is to cut out a polygon of a given geom out of a given image. So I am taking the mask out of that polygon and then using that mask to cut the same out of the original image. So multiplying mask's 1's and 0's with the pixel values in the image to just get 1*pixel values.
I tried the following to cut out the actual image to have the same dimensions so that I can multiply np.multiply(im, mask) but it didn't work as image's shape is not cut into same shape as mask's. I tried your min and max below but didn't work!
im = im[xmin:xmax, ymin:ymax]
ipdb> im.shape
(5975, 8994)
ipdb> mask.shape
(8994, 8467)
Clearly I cannot multiple mask and im now.
I think you got it almost right in the first attempt, in the second one you're building a meshgrid for the full image while you just want the shape mask, don't you?
import numpy as np
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib import patches
import matplotlib.pyplot as plt
from PIL import Image
sar_ver = [(16886, 1085), (15139, 2122), (14475, 5226), (8419, 5601),
(14046, 6876), (14147, 10079), (16816, 3748), (16886, 1085)]
path = Path(sar_ver)
xmin, ymin, xmax, ymax = np.asarray(path.get_extents(), dtype=int).ravel()
x, y = np.mgrid[xmin:xmax, ymin:ymax]
points = np.transpose((x.ravel(), y.ravel()))
mask = path.contains_points(points)
mask = mask.reshape(x.shape).T
img = Image.fromarray((mask * 255).astype(np.uint8))
img.save('mask.png')
# plot shape and mask for debug purposes
fig = plt.figure(figsize=(8,4))
gs = mpl.gridspec.GridSpec(1,2)
gs.update(wspace=0.2, hspace= 0.01)
ax = plt.subplot(gs[0])
patch = patches.PathPatch(path, facecolor='orange', lw=2)
ax.add_patch(patch)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax = plt.subplot(gs[1])
ax.imshow(mask, origin='lower')
plt.savefig("shapes.png", bbox_inches="tight", pad_inches=0)
It produces the mask:
And also plots both the mask and the path for debugging purposes:
The different orientation comes from the different origin position in matplotlib plots and images, but it should be trivial enough to change it the way you want.
EDIT after latest question edits
Here's an updated script that takes an image, generates a mask for your path and cuts it out. I'm using a dummy image and scaling down shapes a bit so they're easier to work with.
import numpy as np
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib import patches
import matplotlib.pyplot as plt
import skimage.transform
import skimage.data
from PIL import Image
sar_ver = np.asarray([(16886, 1085), (15139, 2122), (14475, 5226), (8419, 5601),
(14046, 6876), (14147, 10079), (16816, 3748), (16886, 1085)])
# reshape into smaller path for faster debugging
sar_ver = sar_ver // 20
# create dummy image
img = skimage.data.chelsea()
img = skimage.transform.rescale(img, 2)
# matplotlib path
path = Path(sar_ver)
xmin, ymin, xmax, ymax = np.asarray(path.get_extents(), dtype=int).ravel()
# create a mesh grid of the shape of the final mask
x, y = np.mgrid[:img.shape[1], :img.shape[0]]
# mesh grid to points
points = np.vstack((x.ravel(), y.ravel())).T
# mask for the point included in the path
mask = path.contains_points(points)
mask = mask.reshape(x.shape).T
# plots
fig = plt.figure(figsize=(8,6))
gs = mpl.gridspec.GridSpec(2,2)
gs.update(wspace=0.2, hspace= 0.2)
# image + patch
ax = plt.subplot(gs[0])
ax.imshow(img)
patch = patches.PathPatch(path, facecolor="None", edgecolor="cyan", lw=3)
ax.add_patch(patch)
# mask
ax = plt.subplot(gs[1])
ax.imshow(mask)
# filter image with mask
ax = plt.subplot(gs[2])
ax.imshow(img * mask[..., np.newaxis])
# remove mask from image
ax = plt.subplot(gs[3])
ax.imshow(img * ~mask[..., np.newaxis])
# plt.show()
plt.savefig("shapes.png", bbox_inches="tight", pad_inches=0)
I tried the open cv2 library and it appears to be faster than meshgrid or mgrid on large images. Posting opencv2 solution:
import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib.path import Path
sar_ver = np.array([[[1688, 108], [1513, 212], [1447, 522], [841, 560], [1404, 687], [1414, 1007], [1681, 374], [1688, 108]]] , 'int32')
print sar_ver.shape
mask=np.zeros((1439, 1954))
cv2.fillPoly(mask, sar_ver, 255)
sar_ver = np.asarray([(1688, 108), (1513, 212), (1447, 522), (841, 560), (1404, 687), (1414, 1007), (1681, 374), (1688, 108)])
path = Path(sar_ver)
xmin, ymin, xmax, ymax = np.asarray(path.get_extents(), dtype=int).ravel()
plt.imshow(mask[ymin:ymax+1, xmin:xmax+1])
plt.show()
Also, posting mgrid solution helped by Filippo above and on online chat:
import cv2
from matplotlib.path import Path
from PIL import Image
import numpy as np
sar_ver = np.asarray([(1518, 2024), (2018, 2024), (1518, 2524), (1518, 2024)])
imag = cv2.imread('test_image.jpg')
img = cv2.cvtColor(imag, cv2.COLOR_BGR2GRAY)
h, w = img.shape
path = Path(sar_ver)
xmin, ymin, xmax, ymax = np.asarray(path.get_extents(), dtype=int).ravel()
# create a mesh grid of the shape of the final mask
x, y = np.mgrid[:w, :h]
# mesh grid to points
points = np.vstack((x.ravel(), y.ravel())).T
# mask for the point included in the path
mask = path.contains_points(points)
mask = mask.reshape(x.shape).T
im = np.array(img)
bb = np.multiply(im, mask)[ymin:ymax+1, xmin:xmax+1]
# saving image or we can do plt.show
int_ans = bb.astype(np.uint8)
fin = Image.fromarray(int_ans)
fin.save('crop_test.png')

Animating Steronets

I have been looking around and have got to nowhere with this. I am trying to animate the poles on a stereonet diagram. However, the poles do not appear at the location that they should be in.
Figure 1 is the animated pole plot while Figure 2 is how the plot should be. I was wondering if anyone had an idea on how to proceed with this?
import matplotlib as mpl
mpl.use("TkAgg")
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
import mplstereonet
fig, ax = mplstereonet.subplots()
fig2, ax1 = mplstereonet.subplots()
ax.grid(True)
ax1.grid(True)
# Assume a strike and dip with a random variance.
# Current values should plot the poles at either 0, 180
strike, dip = 90, 80
num = 10
strikes = strike + 10 * np.random.randn(num)
dips = dip + 10 * np.random.randn(num)
poles, = ax.pole([], [], 'o')
def init():
poles.set_data([], [])
return poles,
def animate(i):
poles.set_data(strikes[:i], dips[:i])
return poles,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames = len(strikes), interval = 100, blit=True, repeat=False)
poles1 = ax1.pole(strikes, dips, 'o') # This is how the final image should look like
plt.show()

add multiple colorbars to a subplot of polar contourf [duplicate]

I would like to add a separate colorbar to each subplot in a 2x2 plot.
fig , ( (ax1,ax2) , (ax3,ax4)) = plt.subplots(2, 2,sharex = True,sharey=True)
z1_plot = ax1.scatter(x,y,c = z1,vmin=0.0,vmax=0.4)
plt.colorbar(z1_plot,cax=ax1)
z2_plot = ax2.scatter(x,y,c = z2,vmin=0.0,vmax=40)
plt.colorbar(z1_plot,cax=ax2)
z3_plot = ax3.scatter(x,y,c = z3,vmin=0.0,vmax=894)
plt.colorbar(z1_plot,cax=ax3)
z4_plot = ax4.scatter(x,y,c = z4,vmin=0.0,vmax=234324)
plt.colorbar(z1_plot,cax=ax4)
plt.show()
I thought that this is how you do it, but the resulting plot is really messed up; it just has an all grey background and ignores the set_xlim , set_ylim commands I have (not shown here for simplicity). + it shows no color bars. Is this the right way to do it?
I also tried getting rid of the "cax = ...", but then the colorbar all goes on the bottom right plot and not to each separate plot!
This can be easily solved with the the utility make_axes_locatable. I provide a minimal example that shows how this works and should be readily adaptable:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
m1 = np.random.rand(3, 3)
m2 = np.arange(0, 3*3, 1).reshape((3, 3))
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(121)
im1 = ax1.imshow(m1, interpolation='None')
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
ax2 = fig.add_subplot(122)
im2 = ax2.imshow(m2, interpolation='None')
divider = make_axes_locatable(ax2)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im2, cax=cax, orientation='vertical');
In plt.colorbar(z1_plot,cax=ax1), use ax= instead of cax=, i.e. plt.colorbar(z1_plot,ax=ax1)
Specify the ax argument to matplotlib.pyplot.colorbar(), e.g.
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 2)
for i in range(2):
for j in range(2):
data = np.array([[i, j], [i+0.5, j+0.5]])
im = ax[i, j].imshow(data)
plt.colorbar(im, ax=ax[i, j])
plt.show()
Please have a look at this matplotlib example page. There it is shown how to get the following plot with four individual colorbars for each subplot:
I hope this helps.
You can further have a look here, where you can find a lot of what you can do with matplotlib.
Try to use the func below to add colorbar:
def add_colorbar(mappable):
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
last_axes = plt.gca()
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(mappable, cax=cax)
plt.sca(last_axes)
return cbar
Then you codes need to be modified as:
fig , ( (ax1,ax2) , (ax3,ax4)) = plt.subplots(2, 2,sharex = True,sharey=True)
z1_plot = ax1.scatter(x,y,c = z1,vmin=0.0,vmax=0.4)
add_colorbar(z1_plot)

How to blend multiple images in OpenCV?

I have group of images and associated weights with it. I want to blend them together. I know there is a blend command in OpenCV to blend two images. But how do I blend multiple images together?
The blending can be done using the below code(This is in Java using OpenCV):
//Create a black-colored image
Mat mergedImage = new Mat(inputImageSize, inputImageType, new Scalar(0));
//Add each image from a vector<Mat> inputImages with weight 1.0/n where n is number of images to merge
for (Mat mat : inputImages) {
Core.addWeighted(mergedImage, 1, mat, 1.0/n, 0, mergedImage);
}
Edit:- The above code suffers from rounding error. If inputImageType is an integer type, division by 1/n will cause this issue. Thus, above code should be used only for floating matrices.
What about simple matrix operations, like the following?
blendedImage = weight_1 * image_1 + weight_2 * image_2 + ... + weight_n * image_n
Here is Python code to blend multiple images in a list. I used the basic formulation from Dennis' answer.
First, let's get three images.
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
dim = (425, 425)
apple = mpimg.imread('apple.jpg')
apple = cv2.resize(apple, dim)
banana = mpimg.imread('banana.jpg')
banana = cv2.resize(banana, dim)
orange = mpimg.imread('orange.jpg')
orange = cv2.resize(orange, dim)
_ = plt.imshow(apple)
_ = plt.show()
_ = plt.imshow(banana)
_ = plt.show()
_ = plt.imshow(orange)
_ = plt.show()
Here are the images:
Now let's blend them together equally. Since there are three images, the fraction of each image's contribution to the final output is 0.333.
def blend(list_images): # Blend images equally.
equal_fraction = 1.0 / (len(list_images))
output = np.zeros_like(list_images[0])
for img in list_images:
output = output + img * equal_fraction
output = output.astype(np.uint8)
return output
list_images = [apple, banana, orange]
output = blend(list_images)
_ = plt.imshow(output)
And here is the result: