Related
I'd like to sum areas of the largest five contours after sorted or all of them if less than five.
The robot follows the people's base on the color but sometimes people have the same color and I'd like to choose one from them using the area. I used this line for two contours but this method is not good area1 = cv2.contourArea(cnts[0]) + cv2.contourArea(cnts[1])
Full code:
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
black_lower = np.array([0,0,0], np.uint8)
black_upper = np.array([180,255,30], np.uint8)
black = cv2.inRange(hsv, black_lower, black_upper)
kernal = np.ones((5, 5), "uint8")
black = cv2.dilate(black, kernal)
res_black = cv2.bitwise_and(img, img, mask=black)
# Tracking black
(_, contours, hierarchy) = cv2.findContours(black, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:2000] # get largest 2000 contour area
area1 = cv2.contourArea(cnts[0]) + cv2.contourArea(cnts[1])
# area2 = cv2.contourArea(cnts[0])
# total = area1 +area2
print 'area', area1, type(cnts)
rects = []
print len(cnts) , type(cnts[1])
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
x, y, w, h = cv2.boundingRect(approx)
if h >= 15:
rect = (x, y, w, h)
rects.append(rect)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.putText(img, "Black Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
Any help or suggestions would be appreciated.
You can sum them using list = [], but maybe you face another issue, sum of areas for all people.
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
black_lower = np.array([0,0,0], np.uint8)
black_upper = np.array([180,255,30], np.uint8)
black = cv2.inRange(hsv, black_lower, black_upper)
kernal = np.ones((5, 5), "uint8")
black = cv2.dilate(black, kernal)
res_black = cv2.bitwise_and(img, img, mask=black)
# Tracking black
(_, contours, hierarchy) = cv2.findContours(black, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:5] # get largest five contour area
areas = []
for contour in cnts:
area = cv2.contourArea(contour)
if area > 300:
areas.append(area)
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.putText(img, "Black Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
a = sum(areas)
print areas
print a
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
you can use just this line:
area = sum([cv2.contourArea(cnt) for cnt in sorted(cnts, key=cv2.contourArea, reverse=True)[:5]])
I will add the full code to compare between them.
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
black_lower = np.array([0,0,0], np.uint8)
black_upper = np.array([180,255,30], np.uint8)
black = cv2.inRange(hsv, black_lower, black_upper)
kernal = np.ones((5, 5), "uint8")
black = cv2.dilate(black, kernal)
res_black = cv2.bitwise_and(img, img, mask=black)
# Tracking black
(_, contours, hierarchy) = cv2.findContours(black, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:5] # get largest five contour area
area = sum([cv2.contourArea(cnt) for cnt in sorted(cnts, key=cv2.contourArea, reverse=True)[:5]])
print 'area_method1', area
areas = []
for contour in cnts:
area = cv2.contourArea(contour)
if area > 300:
areas.append(area)
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
cv2.putText(img, "Black Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
a = sum(areas)
# print areas
print 'area_method2', a
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
I am attempting to implement a perceptron. I have loaded a 100x2 array of values between 0 and 100. Each item in the array has a label of either -1 or 1.
I believe the perceptron is working, however I cannot plot decision boundary as shown here: plot decision boundary matplotlib
When I run my code I only see a single color background. I would expect to see two colors, one color for each label in my data set (-1 and 1).
My current output, I expect to see 2 colors for the background (-1 or 1)
An example of what I hope to see, from the sklearn documentation
import numpy as np
from matplotlib import pyplot as plt
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
def perceptron_sgd(X, l, c, epochs):
#initialize weights
w = np.zeros(3)
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print "epoch " + str(epoch) + ": " + str(w)
return w, errors
def classify(X, l, w):
z = np.dot(X, w)
print z
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X, l, .001, 36)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .2), np.arange(y_min, y_max, .2))
# here "model" is your model's prediction (classification) function
Z = classify(np.c_[xx.ravel(), yy.ravel()], l, w[:-1]) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)
I got it to work.
Standardized your X
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
Better initialization than zero.
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
Add learned biases during prediction
z = np.dot(X, w[:-1]) + w[-1]
Standardize during prediction as well (using standardization learned from input)
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]),
l, w) #strip the bias from weights
Generally, always a good idea to standardize the inputs.
Entire code:
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
def perceptron_sgd(X, l, c, epochs):
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print("epoch " + str(epoch) + ": " + str(w))
return w, errors
def classify(X, l, w):
z = np.dot(X, w[:-1]) + w[-1]
print(z)
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X_trans, l, .01, 25)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
# here "model" is your model's prediction (classification) function
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]), l, w) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
#plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)
So I wanted to see if I could make fractal flames using matplotlib and figured a good test would be the sierpinski triangle. I modified a working version I had that simply performed the chaos game by normalizing the x range from -2, 2 to 0, 400 and the y range from 0, 2 to 0, 200. I also truncated the x and y coordinates to 2 decimal places and multiplied by 100 so that the coordinates could be put in to a matrix that I could apply a color map to. Here's the code I'm working on right now (please forgive the messiness):
import numpy as np
import matplotlib.pyplot as plt
import math
import random
def f(x, y, n):
N = np.array([[x, y]])
M = np.array([[1/2.0, 0], [0, 1/2.0]])
b = np.array([[.5], [0]])
b2 = np.array([[0], [.5]])
if n == 0:
return np.dot(M, N.T)
elif n == 1:
return np.dot(M, N.T) + 2*b
elif n == 2:
return np.dot(M, N.T) + 2*b2
elif n == 3:
return np.dot(M, N.T) - 2*b
def norm_x(n, minX_1, maxX_1, minX_2, maxX_2):
rng = maxX_1 - minX_1
n = (n - minX_1) / rng
rng_2 = maxX_2 - minX_2
n = (n * rng_2) + minX_2
return n
def norm_y(n, minY_1, maxY_1, minY_2, maxY_2):
rng = maxY_1 - minY_1
n = (n - minY_1) / rng
rng_2 = maxY_2 - minY_2
n = (n * rng_2) + minY_2
return n
# Plot ranges
x_min, x_max = -2.0, 2.0
y_min, y_max = 0, 2.0
# Even intervals for points to compute orbits of
x_range = np.arange(x_min, x_max, (x_max - x_min) / 400.0)
y_range = np.arange(y_min, y_max, (y_max - y_min) / 200.0)
mat = np.zeros((len(x_range) + 1, len(y_range) + 1))
random.seed()
x = 1
y = 1
for i in range(0, 100000):
n = random.randint(0, 3)
V = f(x, y, n)
x = V.item(0)
y = V.item(1)
mat[norm_x(x, -2, 2, 0, 400), norm_y(y, 0, 2, 0, 200)] += 50
plt.xlabel('x0')
plt.ylabel('y')
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
The mathematics seem solid here so I suspect something weird is going on with how I'm handling where things should go into the 'mat' matrix and how the values in there correspond to the colormap.
If I understood your problem correctly, you need to transpose your matrix using the method .T. So just replace
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
by
fig = plt.figure(figsize=(10,10))
ax = gca()
ax.imshow(mat.T, cmap="spectral", extent=[-2,2, 0, 2], origin="bottom")
plt.show()
The argument origin=bottom tells to imshow to have the origin of your matrix at the bottom of the figure.
Hope it helps.
I have a set of X and Y coordinates and each point has a different pixel value - the Z quantity. I would like to plot these values using a raster or contour plot.
I am having difficulty doing this because there is no mathematical relationship between the pixel value and the X and Y coordinates.
I have created an array for the range of x and y values and I have tried constructing a dictionary where I can look up the value of z using a concatenated x and y string. At the moment I am having an index issue and I am wondering if there is a better way of achieving this?
My code so far is:
import matplotlib.pyplot as plt
import numpy as np
XY_Zpoints = {'11':8,
'12':8,
'13':8,
'14':6,
'21':6,
'22':8,
'23':6,
'24':6,
'31':8,
'32':3,
'33':8,
'34':6,
'41':8,
'42':3,
'43':3,
'44':8,
}
x, y = np.meshgrid(np.linspace(1,4,4), np.linspace(1,4,4))
z = XY_Zpoints[str(x)+str(y)]
# Plot the grid
plt.imshow(z)
plt.spectral()
plt.show()
Thanks in advance for any help you can offer!
Instead of a dictionary, you can use a numpy array where the position of each pixel value coresponds to the x and y coordinates. For your example this array would look like:
z = np.array([[8, 8, 8, 6], [6, 8, 6, 6], [8, 3, 8, 6], [8, 3, 3, 8]])
To access the pixel value at x = 2 and y = 3, for example you can do this:
x = 2
y = 3
pixel = z[x-1][y - 1]
z can be displayed with:
imshow(z)
I need create class with list of sprites which are characterise number 1 in grid list. But I need split window to len(grid), len(grid[0]) how Im doing in WallTile class. But I need to fill window with tiles on that place where is number 1 in grid. It will be a maze of tiles. I want to use sprite because I need use collision detection with tile wall.
width, height = 700, 700 #window size
grid = [[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1]]
class WallTile(pygame.sprite.Sprite):
def __init__(self):
super(WallTile, self).__init__()
self.TILE_W = width / len(grid) # spliting window by len(grid) and set size of one tile
self.TILE_H = height / len(grid[0])
self.tiles_x = width / self.TILE_W # size of all tiles
self.tiles_y = height / self.TILE_H
self.imageMaster = pygame.image.load("images/wallBig.png")
self.image = self.imageMaster
self.pozX = 0.0
self.pozY = 0.0
self.image = pygame.transform.scale(self.imageMaster, (self.TILE_W, self.TILE_H))
self.rect = self.image.get_rect()
self.rect.center = (self.pozX, self.pozY)
# here I need fill some sprite list with tiles with different coordinates.
# I want to put tile on place where is number 1 in grid. But put it to screen.
for y in range(self.tiles_y):
for x in range(self.tiles_x):
for i in range(len(grid[0])):
for j in range(len(grid)):
if grid[i][j] is 1:
# I need make new tile and put it to sprite list
self.pozX, self.pozY = (x * self.TILE_W), (y * self.TILE_H)
#
#--- here I dont know what to do...
#
class MainClass:
def Main(self):
# here is how Im drawing sprites on screen.
screen = pygame.display.set_mode(size)
screen.fill((grey))
wallTile = WallTile()
spriteGroup_1 = pygame.sprite.Group(wallTile)
background = pygame.Surface(obrazovka.get_size())
background.fill((grey))
background.set_colorkey((60,0,0))
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT or \
event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):
running = False
spriteGroup_1.clear(screen, background)
spriteGroup_1.update()
updateScreen = spriteGroup_1.draw(screen)
pygame.display.update(updateScreen)
pygame.display.flip()
if __name__ == '__main__':
mainWin = MainClass()
sys.exit((mainWin.Main()))
Really thanks for advices.
I would first redefine your WallTile class to only represent one tile. The WallTile's constructor now accepts a size and location argument, which represent its size and location on the screen.
class WallTile(pygame.sprite.Sprite):
def __init__(self, size, location):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.location = location
self.image = pygame.image.load("images/wallBig.png")
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
Then, outside of the code for the WallTile class, you can inspect the grid for '1's and create a new tile whenever you find one. You can look at the indices of that element on the grid, and multiply those values by the size of each tile to determine the location on the screen for that tile. Then you can take that list of tiles and create a Sprite Group with them.
tile_size = (20, 20)
tiles = list()
for y in range(len(grid)):
for x in range(len(grid[y])):
if grid[x][y] == 1:
new_tile = WallTile(tile_size, (x * tile_size[0], y * tile_size[1]))
tiles.append(new_tile)
tile_group = pygame.sprite.Group(tiles)
If you want to pull a specific tile from the group, and you don't already have a reference to that tile, you can loop over the group and look for a certain location
search_location = (40, 80)
for tile in tile_group:
if tile.location = search_location:
# we've found that tile, now do something with it