I'd like to sum areas of the largest five contours after sorted or all of them if less than five.
The robot follows the people's base on the color but sometimes people have the same color and I'd like to choose one from them using the area. I used this line for two contours but this method is not good area1 = cv2.contourArea(cnts[0]) + cv2.contourArea(cnts[1])
Full code:
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
black_lower = np.array([0,0,0], np.uint8)
black_upper = np.array([180,255,30], np.uint8)
black = cv2.inRange(hsv, black_lower, black_upper)
kernal = np.ones((5, 5), "uint8")
black = cv2.dilate(black, kernal)
res_black = cv2.bitwise_and(img, img, mask=black)
# Tracking black
(_, contours, hierarchy) = cv2.findContours(black, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:2000] # get largest 2000 contour area
area1 = cv2.contourArea(cnts[0]) + cv2.contourArea(cnts[1])
# area2 = cv2.contourArea(cnts[0])
# total = area1 +area2
print 'area', area1, type(cnts)
rects = []
print len(cnts) , type(cnts[1])
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
x, y, w, h = cv2.boundingRect(approx)
if h >= 15:
rect = (x, y, w, h)
rects.append(rect)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.putText(img, "Black Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
Any help or suggestions would be appreciated.
You can sum them using list = [], but maybe you face another issue, sum of areas for all people.
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
black_lower = np.array([0,0,0], np.uint8)
black_upper = np.array([180,255,30], np.uint8)
black = cv2.inRange(hsv, black_lower, black_upper)
kernal = np.ones((5, 5), "uint8")
black = cv2.dilate(black, kernal)
res_black = cv2.bitwise_and(img, img, mask=black)
# Tracking black
(_, contours, hierarchy) = cv2.findContours(black, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:5] # get largest five contour area
areas = []
for contour in cnts:
area = cv2.contourArea(contour)
if area > 300:
areas.append(area)
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.putText(img, "Black Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
a = sum(areas)
print areas
print a
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
you can use just this line:
area = sum([cv2.contourArea(cnt) for cnt in sorted(cnts, key=cv2.contourArea, reverse=True)[:5]])
I will add the full code to compare between them.
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
black_lower = np.array([0,0,0], np.uint8)
black_upper = np.array([180,255,30], np.uint8)
black = cv2.inRange(hsv, black_lower, black_upper)
kernal = np.ones((5, 5), "uint8")
black = cv2.dilate(black, kernal)
res_black = cv2.bitwise_and(img, img, mask=black)
# Tracking black
(_, contours, hierarchy) = cv2.findContours(black, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:5] # get largest five contour area
area = sum([cv2.contourArea(cnt) for cnt in sorted(cnts, key=cv2.contourArea, reverse=True)[:5]])
print 'area_method1', area
areas = []
for contour in cnts:
area = cv2.contourArea(contour)
if area > 300:
areas.append(area)
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.putText(img, "Black Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
a = sum(areas)
# print areas
print 'area_method2', a
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
Related
I needed to run some parts of the code in GPU using cupy instead of numpy. So, I only made comment out for this line # import numpy as np and used this line instead of it import cupy as np
the full code:
from imutils.video import VideoStream
from imutils.video import FPS
# import numpy as np
import cupy as np
import argparse
import imutils
import time
import cv2
net = cv2.dnn.readNetFromCaffe('prototxt.txt', 'caffemodel')
vs = cv2.VideoCapture(0)
vs.release()
vs = cv2.VideoCapture(0)
time.sleep(2.0)
fps = FPS().start()
while True:
ret, frame = vs.read() # add ret,
frame = imutils.resize(frame, width=400)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
big_area = 0
big_center = 320
detected = 0
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
object_type = int(detections[0, 0, i, 1])
if object_type == 15 and confidence > 0.2:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format('person', confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY), [0, 0, 255], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0, 0, 255], 2)
rect_area = (endX - startX) * (endY - startY)
detected = 1
if rect_area > big_area:
big_area = rect_area
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
vs.release()
cv2.destroyAllWindows()
how to fix this error so that use cupy.
/home/redhwan/learn1.py:26: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
confidence = detections[0, 0, i, 2]
/home/redhwan/learn1.py:27: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
object_type = int(detections[0, 0, i, 1])
/home/redhwan/learn1.py:29: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
Traceback (most recent call last):
File "/home/redhwan/learn1.py", line 29, in <module>
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
File "cupy/core/core.pyx", line 940, in cupy.core.core.ndarray.__mul__
File "cupy/core/_kernel.pyx", line 811, in cupy.core._kernel.ufunc.__call__
File "cupy/core/_kernel.pyx", line 89, in cupy.core._kernel._preprocess_args
TypeError: Unsupported type <type 'numpy.ndarray'>
data here
please, your ideas or any suggestions?
The detections need to be a Cupy array too.
detections = np.array(net.forward())
I need to fill with different colors (green and red maybe) the two sides of a graph. I'm using the following code:
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
range_x = [-1, 0, 1, 2]
range_y = [-5, -3, -1, 1]
ax = sns.lineplot(x = range_x, y = range_y, markers = True)
sns.lineplot(ax = ax, x = [range_x[0], range_x[-1]], y = [0, 0], color = 'black')
sns.lineplot(ax = ax, x = [0, 0], y = [range_y[0], range_y[-1]], color = 'black')
ax.fill_between(range_x, range_y, facecolor = 'red', alpha = 0.5)
plt.savefig('test_fig', bbox_inches = 'tight')
plt.close()
With that code I get the following figure:
But obviously this is a fail, because I want red color all above the blue line. Besides I want my x and y axis in a remarkable way, I get it with x axis but I don't know why I can't get it with y axis.
Thanks you very much in advance!
Something like this?:
ax = sns.lineplot(x = range_x, y = range_y, markers = True)
sns.lineplot(ax = ax, x = [range_x[0], range_x[-1]], y = [0, 0], color = 'black')
sns.lineplot(ax = ax, x = [0, 0], y = [range_y[0], range_y[-1]], color = 'black')
ax.fill_between(range_x, range_y,[ax.get_ylim()[1]]*len(range_x), facecolor = 'red', alpha = 0.5)
ax.fill_between(range_x, range_y,[ax.get_ylim()[0]]*len(range_x), facecolor = 'green', alpha = 0.5)
From the documentation of fill_between:
y2 : array (length N) or scalar, optional, default: 0
The y coordinates of the nodes defining the second curve.
I am attempting to implement a perceptron. I have loaded a 100x2 array of values between 0 and 100. Each item in the array has a label of either -1 or 1.
I believe the perceptron is working, however I cannot plot decision boundary as shown here: plot decision boundary matplotlib
When I run my code I only see a single color background. I would expect to see two colors, one color for each label in my data set (-1 and 1).
My current output, I expect to see 2 colors for the background (-1 or 1)
An example of what I hope to see, from the sklearn documentation
import numpy as np
from matplotlib import pyplot as plt
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
def perceptron_sgd(X, l, c, epochs):
#initialize weights
w = np.zeros(3)
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print "epoch " + str(epoch) + ": " + str(w)
return w, errors
def classify(X, l, w):
z = np.dot(X, w)
print z
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X, l, .001, 36)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .2), np.arange(y_min, y_max, .2))
# here "model" is your model's prediction (classification) function
Z = classify(np.c_[xx.ravel(), yy.ravel()], l, w[:-1]) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)
I got it to work.
Standardized your X
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
Better initialization than zero.
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
Add learned biases during prediction
z = np.dot(X, w[:-1]) + w[-1]
Standardize during prediction as well (using standardization learned from input)
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]),
l, w) #strip the bias from weights
Generally, always a good idea to standardize the inputs.
Entire code:
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
def perceptron_sgd(X, l, c, epochs):
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print("epoch " + str(epoch) + ": " + str(w))
return w, errors
def classify(X, l, w):
z = np.dot(X, w[:-1]) + w[-1]
print(z)
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X_trans, l, .01, 25)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
# here "model" is your model's prediction (classification) function
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]), l, w) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
#plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)
I am trying to detect tables and chairs from an image and do no have much success with the code below. It also detects other squares etc. Any hints?
import cv2
image = cv2.imread("nao.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
cv2.waitKey(0)
edged = cv2.Canny(gray, 10, 250)
cv2.waitKey(0)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
cv2.waitKey(0)
(_,cnts, _) = cv2.findContours(closed.copy(), cv2.CHAIN_APPROX_NONE, cv2.CHAIN_APPROX_SIMPLE)
total = 0
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.01 * peri, True)
if len(approx) == 4:
print approx
cv2.drawContours(image, [approx], -1, (0, 255, 0), 4)
total += 1
print "I found {0} lines in that image".format(total)
cv2.imshow("Output", image)
cv2.waitKey(0)
So I wanted to see if I could make fractal flames using matplotlib and figured a good test would be the sierpinski triangle. I modified a working version I had that simply performed the chaos game by normalizing the x range from -2, 2 to 0, 400 and the y range from 0, 2 to 0, 200. I also truncated the x and y coordinates to 2 decimal places and multiplied by 100 so that the coordinates could be put in to a matrix that I could apply a color map to. Here's the code I'm working on right now (please forgive the messiness):
import numpy as np
import matplotlib.pyplot as plt
import math
import random
def f(x, y, n):
N = np.array([[x, y]])
M = np.array([[1/2.0, 0], [0, 1/2.0]])
b = np.array([[.5], [0]])
b2 = np.array([[0], [.5]])
if n == 0:
return np.dot(M, N.T)
elif n == 1:
return np.dot(M, N.T) + 2*b
elif n == 2:
return np.dot(M, N.T) + 2*b2
elif n == 3:
return np.dot(M, N.T) - 2*b
def norm_x(n, minX_1, maxX_1, minX_2, maxX_2):
rng = maxX_1 - minX_1
n = (n - minX_1) / rng
rng_2 = maxX_2 - minX_2
n = (n * rng_2) + minX_2
return n
def norm_y(n, minY_1, maxY_1, minY_2, maxY_2):
rng = maxY_1 - minY_1
n = (n - minY_1) / rng
rng_2 = maxY_2 - minY_2
n = (n * rng_2) + minY_2
return n
# Plot ranges
x_min, x_max = -2.0, 2.0
y_min, y_max = 0, 2.0
# Even intervals for points to compute orbits of
x_range = np.arange(x_min, x_max, (x_max - x_min) / 400.0)
y_range = np.arange(y_min, y_max, (y_max - y_min) / 200.0)
mat = np.zeros((len(x_range) + 1, len(y_range) + 1))
random.seed()
x = 1
y = 1
for i in range(0, 100000):
n = random.randint(0, 3)
V = f(x, y, n)
x = V.item(0)
y = V.item(1)
mat[norm_x(x, -2, 2, 0, 400), norm_y(y, 0, 2, 0, 200)] += 50
plt.xlabel('x0')
plt.ylabel('y')
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
The mathematics seem solid here so I suspect something weird is going on with how I'm handling where things should go into the 'mat' matrix and how the values in there correspond to the colormap.
If I understood your problem correctly, you need to transpose your matrix using the method .T. So just replace
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
by
fig = plt.figure(figsize=(10,10))
ax = gca()
ax.imshow(mat.T, cmap="spectral", extent=[-2,2, 0, 2], origin="bottom")
plt.show()
The argument origin=bottom tells to imshow to have the origin of your matrix at the bottom of the figure.
Hope it helps.