Dataset creator with OpenCV and Python error - python-2.7

OS : Ubuntu 17.10
I am trying this code to create a dataset on face detection using Python2.7 and Open CV (installed with pip)
import cv2
import numpy as np
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
Id = raw_input('enter your id')
sampleNum = 0
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
#incrementing sample number
sampleNum = sampleNum+1
#saving the captured face in the dataset folder
cv2.imwrite("dataSet/User."+Id +'.'+ str(sampleNum) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('frame', img)
#wait for 100 miliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):break
# break if the sample number is morethan 20
elif sampleNum > 20: break
cam.release()
cv2.destroyAllWindows()
But I am getting following error
Traceback (most recent call last):
File "/home/anushi/face/datasetCreator.py", line 10, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
error: /io/opencv/modules/imgproc/src/color.cpp:10638: error: (-215) scn == 3 || scn == 4 in function cvtColor

As the comments correctly mention, one possible cause is that the image is empty (not captured properly). Another possibility is that the image is not a color image.
You can add
cv2.imshow('frame', img)
Before
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
and see what the captured image looks like.
The rest of the code looks fine.

Related

Region growing with the watershed transform

I am trying out the code by adfoucart for Region growing with the watershed transform but I ran into some errors when identifying the markers for the image.
from skimage.filters import rank,gaussian
from skimage.morphology import disk
from skimage.feature import peak_local_max
def get_markers(img2, indices=False):
im_ = gaussian(img2, sigma=4)
gradr = rank.gradient(im_[:,:,0],disk(5)).astype('int')
gradg = rank.gradient(im_[:,:,1],disk(5)).astype('int')
gradb = rank.gradient(im_[:,:,2],disk(5)).astype('int')
grad = gradr+gradg+gradb
return peak_local_max(grad.max()-grad,threshold_rel=0.5, min_distance=60,indices=indices),grad
markers,grad = get_markers(img2, True)
plt.figure()
plt.imshow(grad, cmap=plt.cm.gray)
plt.plot(markers[:,1],markers[:,0],'b+')
plt.show()
and I am receiving this error.
IndexError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_17316/2204442073.py in <module>
12 return peak_local_max(grad.max()-grad,threshold_rel=0.5, min_distance=60,indices=indices),grad
13
---> 14 markers,grad = get_markers(img2, True)
15 plt.figure()
16 plt.imshow(grad, cmap=plt.cm.gray)
~\AppData\Local\Temp/ipykernel_17316/2204442073.py in get_markers(img2, indices)
5 def get_markers(img2, indices=False):
6 im_ = gaussian(img2, sigma=4)
----> 7 gradr = rank.gradient(im_[:,:,0],disk(5)).astype('int')
8 gradg = rank.gradient(im_[:,:,1],disk(5)).astype('int')
9 gradb = rank.gradient(im_[:,:,2],disk(5)).astype('int')
IndexError: too many indices for array: array is 2-dimensional, but 3 were indexed
Any help will be appreciated thanj you!
You are probably trying to run the code on a grayscale image, which will only have 2 dimensions (height and width), while the code was written expecting an RGB image with 3 dimensions (height, width and color channel).
On a grayscale image, the lines:
gradr = rank.gradient(im_[:,:,0],disk(5)).astype('int')
gradg = rank.gradient(im_[:,:,1],disk(5)).astype('int')
gradb = rank.gradient(im_[:,:,2],disk(5)).astype('int')
grad = gradr+gradg+gradb
Could be simply replaced by:
grad = rank.gradient(im_, disk(5))

OpenCV python module error

The code is for face detection and recognition with the help of haar cascade algorithm. Running on OpenCV 3.2.0 and Python 2.7. Throwing a Module Error
Is there anybody who can help me out, it will be big help for me. Thank you.
# facerec.py
import cv2, sys, numpy, os
size = 1
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
# Get the folders containing the training data
for (subdirs, dirs, files) in os.walk(fn_dir):
# Loop through each folder named after the subject in the photos
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
# Loop through each photo in the folder
for filename in os.listdir(subjectpath):
# Skip non-image formates
f_name, f_extension = os.path.splitext(filename)
if(f_extension.lower() not in
['.png','.jpg','.jpeg','.gif','.pgm']):
print("Skipping "+filename+", wrong file type")
continue
path = subjectpath + '/' + filename
lable = id
# Add to training data
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
# Loop until the camera is working
rval = False
while(not rval):
# Put the image from the webcam into 'frame'
(rval, frame) = webcam.read()
if(not rval):
print("Failed to open webcam. Trying again...")
# Flip the image (optional)
frame=cv2.flip(frame,1,0)
# Convert to grayscalel
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Resize to speed up detection (optinal, change size above)
mini = cv2.resize(gray, (int(gray.shape[1] / size), int(gray.shape[0] / size)))
# Detect faces and loop through each one
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
# Coordinates of face after scaling back by `size`
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# [1]
# Write the name of recognized face
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
# Show the image and check for ESC being pressed
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
It looks to me like you have not heeded the comment in your own code. The error message tells you that the module cv2 does not define face and the comment seems to warn about the same thing.
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.createFisherFaceRecognizer()
What happens if you heed the comment and code
model = cv2.createFisherFaceRecognizer()
instead?
I just solve the error by installing opencv_Contrib zip from github.

How to save images to a server folder via sockets using python and opencv?

I would like to know how to save images captured by Opencv, in a directory created by code that should stay on the server ...**
import cv2, sys, numpy, os
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets' #All the faces data will be present this folder
sub_data = raw_input ('digite o seu nome') #These are sub data sets of folder, for my faces I've used my name
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
(width, height) = (130, 100) # defining the size of images
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0) #'0' is use for my webcam, if you've any other camera attached use '1' like this
# The program loops until it has 100 images of the face.
count = 1
while count < 101:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite('%s/%s.png' % (path,count), face_resize)
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break

Tracking black objects live streaming

I'm trying to make a project that tracks the black objects on the ground with a harsh desert sunny environment and I tried to make some trials but it doesn't work. I tried converting the black pixels in the image to Red because the Red pixels is easy to track (higher intensity) but it doesn't result in what I want. I tried inverting the image (converting the black to white and white to black) but it doesn't give the desired result either. So any ideas?
Here is the code:
import cv2
import numpy as np
import os
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret,frame = cap.read()
warp = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ORANGE_MIN = np.array([40, 40, 60], np.uint8)
ORANGE_MAX = np.array([50, 70, 80], np.uint8)
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
Conv_hsv_Gray = cv2.cvtColor(hsv_img, cv2.COLOR_BGR2GRAY)
imf = np.float32(Conv_hsv_Gray) / 255.0 # float conversion/scale
Desc = cv2.dct(imf)
sub_to = Desc[0, 0]
print sub_to
result_frame = Desc - sub_to
Fitr = cv2.blur(result_frame, (1, 1))
Filter2 = cv2.GaussianBlur(Desc, (1, 1), 0)
iddt1 = cv2.idct(Fitr)
iddt2 = cv2.idct(Filter2)
framethreshed = cv2.inRange(hsv_img, ORANGE_MIN, ORANGE_MAX)
ret, mask = cv2.threshold(Conv_hsv_Gray, 100, 150, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(warp)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
res = cv2.bitwise_and(iddt1, iddt1, mask=mask)
cv2.imwrite('output2.jpg', framethreshed)
cv2.imshow("imgOriginal(1)", frame) # show windows
cv2.imshow("Masking res(3)", res) # show windows
cv2.imshow("mask_inv)", mask_inv) # show windows
if cv2.waitKey(60) & 0xFF == ord('q'):
break
cap.release()

Is there a support for BackgroundSubtractorMOG2 in python opencv

Is there a Python binding for BackgroundSubtractorMOG2, cpp apparently has.
I was thinking that there is compatibility between python version of
openCV and cpp version.
The OpenCV 2.x API gives you a Python binding for BackgroundSubtractorMOG, just check this web
http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html
Here is an example
import cv2
backsub = cv2.BackgroundSubtractorMOG()
capture = cv2.VideoCapture("Balcony4_Vis.mpg")
if capture:
while True:
ret, frame = capture.read()
if ret:
fgmask = backsub.apply(frame, None, 0.01)
contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
if w > 10 and h > 10:
# figure out id
best_id = 1
.....
cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.putText(frame, str(best_id), (x,y-5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 0, 0), 2)
cv2.imshow("Track", frame)
key = cv2.waitKey(10)
if key == ord('q'):
break