Related
Apologies in advance as i am newbie to OpenCV-Python. I set myself a task to create a Passport type image from the video capture.
Using a head and shoulders Haar Cascade i was able to create a portrait photo but i now want to turn the background to a white background (leaving the head and shoulders portrait in the foreground).
Just not sure how/ best way to do this. Any help would be welcome.
Many thanks in advance.
Here is the code:
import numpy as np
import cv2
# face file
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# eye file
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# head shoulders file
hs_cascade = cv2.CascadeClassifier('HS.xml')
cap = cv2.VideoCapture(1)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
headshoulders = hs_cascade.detectMultiScale(gray, 1.3, 3)
# find the head and shoulders
for (x,y,w,h) in headshoulders:
# variable change to make portrait orientation
x = int(x*1.5)
w = int(w/1.5)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# crop the image
crop_img = img[y: y + h, x: x + w]
# show original and crop
cv2.imshow('crop', crop_img)
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
# save out the portrait image
cv2.imwrite('cropimage.png',crop_img)
# release the camera
cap.release()
cv2.destroyAllWindows()
I got it to work. Here is my solution.
PLEASE NOTE: This worked for HI-RES images (Nikon D7100 - JPEG). LOW-RES did NOT work when i tried a Webcam (Logitech C615).
I used some of the code from a link that was suggested.
# import numpy
import numpy as np
# import cv2
import cv2
# import Matplitlib
from matplotlib import pyplot as plt
# Fill any holes function
def get_holes(image, thresh):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im_bw = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]
im_bw_inv = cv2.bitwise_not(im_bw)
im_bw_inv, contour, _ = cv2.findContours(im_bw_inv, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(im_bw_inv, [cnt], 0, 255, -1)
nt = cv2.bitwise_not(im_bw)
im_bw_inv = cv2.bitwise_or(im_bw_inv, nt)
return im_bw_inv
# Remove background Function
def remove_background(image, thresh, scale_factor=.25, kernel_range=range(1, 15), border=None):
border = border or kernel_range[-1]
holes = get_holes(image, thresh)
small = cv2.resize(holes, None, fx=scale_factor, fy=scale_factor)
bordered = cv2.copyMakeBorder(small, border, border, border, border, cv2.BORDER_CONSTANT)
for i in kernel_range:
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*i+1, 2*i+1))
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (2*i+1, 2*i+1))
bordered = cv2.morphologyEx(bordered, cv2.MORPH_CLOSE, kernel)
unbordered = bordered[border: -border, border: -border]
mask = cv2.resize(unbordered, (image.shape[1], image.shape[0]))
fg = cv2.bitwise_and(image, image, mask=mask)
return fg
# Load a color image in grayscale
img = cv2.imread('original/11.png')
# Start background removal -- Parameters are <image> and <threshold level>
nb_img = remove_background(img, 180)
# Change Black Pixels to WHITE
nb_img[np.where((nb_img==[0,0,0]).all(axis=2))] = [255,255,255]
# resize the viewing size (as the images are too big for the screen
small = cv2.resize(nb_img, (300, 400))
# Show the finished image
cv2.imshow('image',small)
k = cv2.waitKey(0) & 0xFF
if k == 27: #wait for ESC key to exit
# if ESC pressed close the camera windows
cv2.destroyAllWindows()
elif k == ord('s'): #wait for 's' key to save and exit
# Save the img(greyscale version)
cv2.imwrite('bg_removal/11.png',small)
cv2.destroyAllWindows()
I want to find the HSV value of a LASER dot using opencv and python. I got the code http://opencv-srf.blogspot.com.au/2010/09/object-detection-using-color-seperation.html from here but it is in c++, installing visual studio and opencv takes time so i changed the code in python
import cv2
import numpy as np
def callback(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow('image')
ilowH = 0
ihighH = 179
ilowS = 0
ihighS = 255
ilowV = 0
ihighV = 255
# create trackbars for color change
cv2.createTrackbar('lowH','image',ilowH,179,callback)
cv2.createTrackbar('highH','image',ihighH,179,callback)
cv2.createTrackbar('lowS','image',ilowS,255,callback)
cv2.createTrackbar('highS','image',ihighS,255,callback)
cv2.createTrackbar('lowV','image',ilowV,255,callback)
cv2.createTrackbar('highV','image',ihighV,255,callback)
while(1):
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.imshow('hsv', hsv)
lower_hsv = np.array([ilowH, ilowS, ilowV])
higher_hsv = np.array([ihighH, ihighS, ihighV])
mask = cv2.inRange(hsv, lower_hsv, higher_hsv)
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
print ilowH, ilowS, ilowV
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
cv2.destroyAllWindows()
cap.release()
but this code doesnot threshold anything. It seems like the trackbars i created doesnot change the value of ilowH ,ilowS, ilowV . I checked it by printing those values inside while loop. What could be the problem for not thresholding any of those values or is there better code in python to find HSV values of the LASER.
Thank you, any help is appreciated.
You can grab the trackbar values with cv2.getTrackbarPos(). Also note that sometimes it puts trackbars out of order, which is annoying, but at least they're labeled.
However, I don't think that these trackbars will work very well for live video feed. There's a lot of freezing issues. You'll have to have a super low framerate (works for me with cv2.waitKey(500) if you're actually trying to display it). This is mostly due to the trackbars sucking, not the thresholding operation, which is not that slow.
You need to add your trackbars after you create the named window. Then, for your while loop, try:
while True:
# grab the frame
ret, frame = cap.read()
# get trackbar positions
ilowH = cv2.getTrackbarPos('lowH', 'image')
ihighH = cv2.getTrackbarPos('highH', 'image')
ilowS = cv2.getTrackbarPos('lowS', 'image')
ihighS = cv2.getTrackbarPos('highS', 'image')
ilowV = cv2.getTrackbarPos('lowV', 'image')
ihighV = cv2.getTrackbarPos('highV', 'image')
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_hsv = np.array([ilowH, ilowS, ilowV])
higher_hsv = np.array([ihighH, ihighS, ihighV])
mask = cv2.inRange(hsv, lower_hsv, higher_hsv)
frame = cv2.bitwise_and(frame, frame, mask=mask)
# show thresholded image
cv2.imshow('image', frame)
k = cv2.waitKey(1000) & 0xFF # large wait time to remove freezing
if k == 113 or k == 27:
break
and finally end the file with a cv2.destroyAllWindows()
As an aside, the maximum H value for HSV is 180, not 179.
Shameless plug: I happened to just finish a project doing precisely this, but on images. You can grab it on GitHub here. There is an example; try running it and then modifying as you need. It will let you change the colorspace and threshold inside each different colorspace, and it will print the final thresholding values that you ended on. Additionally it will return the output image from the operation for you to use, too. Hopefully it is useful for you! Feel free to send any issues or suggestions through GitHub for the project.
Here is an example of it running:
And as output it gives you:
Colorspace: HSV
Lower bound: [68.4, 0.0, 0.0]
Upper bound: [180.0, 255.0, 255.0]
as well as the binary image. I am currently working on getting this into a web application as well, but that probably won't be finished for a few days.
Use this code to find range of masking of real-time video! this might save you time. Below is a whole code, Check it and run it to have a test.
import cv2
import numpy as np
camera = cv2.VideoCapture(0)
def nothing(x):
pass
cv2.namedWindow('marking')
cv2.createTrackbar('H Lower','marking',0,179,nothing)
cv2.createTrackbar('H Higher','marking',179,179,nothing)
cv2.createTrackbar('S Lower','marking',0,255,nothing)
cv2.createTrackbar('S Higher','marking',255,255,nothing)
cv2.createTrackbar('V Lower','marking',0,255,nothing)
cv2.createTrackbar('V Higher','marking',255,255,nothing)
while(1):
_,img = camera.read()
img = cv2.flip(img,1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hL = cv2.getTrackbarPos('H Lower','marking')
hH = cv2.getTrackbarPos('H Higher','marking')
sL = cv2.getTrackbarPos('S Lower','marking')
sH = cv2.getTrackbarPos('S Higher','marking')
vL = cv2.getTrackbarPos('V Lower','marking')
vH = cv2.getTrackbarPos('V Higher','marking')
LowerRegion = np.array([hL,sL,vL],np.uint8)
upperRegion = np.array([hH,sH,vH],np.uint8)
redObject = cv2.inRange(hsv,LowerRegion,upperRegion)
kernal = np.ones((1,1),"uint8")
red = cv2.morphologyEx(redObject,cv2.MORPH_OPEN,kernal)
red = cv2.dilate(red,kernal,iterations=1)
res1=cv2.bitwise_and(img, img, mask = red)
cv2.imshow("Masking ",res1)
if cv2.waitKey(10) & 0xFF == ord('q'):
camera.release()
cv2.destroyAllWindows()
break`
Thanks!
Hugs..
Hello I am running the python opencv code below on a raspberry pi 3 and using a usb camera.The program tracks a persons face and overlays a mask image over the face. My program keeps crashing and showing the error below:. Also the image masking the persons face is not removing the background color to just leave the image mask. Hope you can help.
:1036: error : (-215) mask.size == src1.size in function binary_op
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('cascade_files/haarcascade_frontalface_alt.xml')
face_mask = cv2.imread('../images/mask_skull.png')
h_mask, w_mask = face_mask.shape[:2]
if face_cascade.empty():
raise IOError('Unable to load the face cascade classifier xml file')
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_rects = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in face_rects:
if h > 0 and w > 0:
h, w = int(1.4*h), int(1.0*w)
y -= 0.1*h
frame_roi = frame[y:y+h, x:x+w]
face_mask_small = cv2.resize(face_mask, (w, h), interpolation=cv2.INTER_AREA)
gray_mask = cv2.cvtColor(face_mask_small, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(gray_mask, 180, 255, cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
masked_face = cv2.bitwise_and(face_mask_small, face_mask_small, mask=mask)
masked_frame = cv2.bitwise_and(frame_roi, frame_roi, mask=mask_inv)
frame[y:y+h, x:x+w] = cv2.add(masked_face, masked_frame)
cv2.imshow('Face Detector', frame)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
I want to detect circles/ellipses on a music staff by using SimpleBlobDetection but when I try to detect them, it finds unrelated points on picture.
Original Image:
After Blob Detection:
Please see the code in below:
cv::SimpleBlobDetector::Params params;
//Thresholds
params.minThreshold = 10;
params.maxThreshold = 200;
// Filter by Area
params.filterByArea = true;
params.minArea = 100;
params.maxArea = 500;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
params.maxCircularity = 0.5;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.57;
params.maxConvexity = 0.97;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
// set up and create the detector using the parameters
cv::SimpleBlobDetector blob_detector(params);
// detect!
vector<cv::KeyPoint> keypoints;
blob_detector.detect(tresh, keypoints);
// extract the x y coordinates of the keypoints:
for (int i = 0; i < keypoints.size(); i++){
float X = keypoints[i].pt.x;
float Y = keypoints[i].pt.y;
circle(tresh, Point(X, Y), 1, Scalar(0, 255, 0), 3, CV_AA);
}
imshow("Detected Blobs", tresh);
Help me please...
Here is the solution. I copied it from SimpleBlobDetector Tutorial
# Standard imports
import cv2
import numpy as np;
# Read image
im = cv2.imread("blob.jpg", cv2.IMREAD_GRAYSCALE)
im_orig = im
_, im = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY)
im = 255 - im;
im = 255 - cv2.erode(im, np.ones((3,3)), iterations=2)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Filter by Area.
params.filterByArea = True
params.minArea = 20
params.filterByConvexity = False
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw blobs
im_with_keypoints = cv2.drawKeypoints(im_orig, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#Write image
cv2.imwrite("treble_staff.jpg", im_with_keypoints)
# Show blobs
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
How about detecting circles with Hough transform ?
Have you tried different filter values (especially min/max area - those are squared values)? Also, what about applying filtering by color (i.e. white only)?
I am attempting to create a main function that will use the program below with a webcam to display what cards are being detected on the screen currently. what would be the best way to do this? any help is appreciated.
"""
Usage:
./card_img.py filename, num_cards, training_image_filename, training_labels_filename, num_training_cards
Example:
./card_img.py test.JPG 4 train.png train.tsv 56
"""
import sys
import numpy as np
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import cv2
###############################################################################
# Utility code
###############################################################################
def rectify(h):
#//changes the array of the picture\\
h = h.reshape((4,2))
hnew = np.zeros((4,2),dtype = np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis = 1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
###############################################################################
# Image Matching
###############################################################################
def preprocess(img):
#Example: test_img = cv2.imread("test.JPG")
# preprocess(test_img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),2 )
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,1)
return thresh
def imgdiff(img1,img2):
img1 = cv2.GaussianBlur(img1,(5,5),5)
img2 = cv2.GaussianBlur(img2,(5,5),5)
diff = cv2.absdiff(img1,img2)
diff = cv2.GaussianBlur(diff,(5,5),5)
flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)
return np.sum(diff)
def find_closest_card(training,img):
features = preprocess(img)
return sorted(training.values(), key=lambda x:imgdiff(x[1],features))[0][0]
###############################################################################
# Card Extraction
###############################################################################
def getCards(im, numcards=4):
#Example: test = cv2.imread("test.JPG") //also works with 1 arg(file)\\
# getCards(test,2)
#or: getCards("test.jpg",2)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(1,1),1000)
flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea,reverse=True)[:numcards]
for card in contours:
peri = cv2.arcLength(card,True)
approx = rectify(cv2.approxPolyDP(card,0.02*peri,True))
# box = np.int0(approx)
# cv2.drawContours(im,[box],0,(255,255,0),6)
# imx = cv2.resize(im,(1000,600))
# cv2.imshow('a',imx)
h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)
transform = cv2.getPerspectiveTransform(approx,h)
warp = cv2.warpPerspective(im,transform,(450,450))
yield warp
def get_training(training_labels_filename,training_image_filename,num_training_cards,avoid_cards=None):
#Example: get_training("train.tsv","train.png",4,None)
training = {}
labels = {}
for line in file(training_labels_filename):
key, num, suit = line.strip().split()
labels[int(key)] = (num,suit)
print "Training"
im = cv2.imread(training_image_filename)
for i,c in enumerate(getCards(im,num_training_cards)):
if avoid_cards is None or (labels[i][0] not in avoid_cards[0] and labels[i][1] not in avoid_cards[1]):
training[i] = (labels[i], preprocess(c))
print "Done training"
return training
if __name__ == '__main__':
if len(sys.argv) == 6:
filename = sys.argv[1]
num_cards = int(sys.argv[2])
training_image_filename = sys.argv[3]
training_labels_filename = sys.argv[4]
num_training_cards = int(sys.argv[5])
training = get_training(training_labels_filename,training_image_filename,num_training_cards)
im = cv2.imread("test.")
width = im.shape[0]
height = im.shape[1]
if width < height:
im = cv2.transpose(im)
im = cv2.flip(im,1)
# Debug: uncomment to see registered images
#for i,c in enumerate(getCards(im,num_cards)):
# card = find_closest_card(training,c,)
# cv2.imshow(str(card),c)
# cv2.waitKey(0)
cards = [find_closest_card(training,c) for c in getCards(im,num_cards)]
print cards
else:
print __doc__
Build open cv with nonfree modules and use SURF Features. They are awesome and can detect and seperate almost everything:
http://docs.opencv.org/master/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.html
Here are some Code examples (sorry just C++ not python): http://docs.opencv.org/doc/user_guide/ug_features2d.html
This is the important code:
// detecting keypoints
SurfFeatureDetector detector(400);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
If speed matter you can use
cv::FlannBasedMatcher matcher;
std::vector< cv::DMatch > matches;
matcher.match( descriptors_this, descriptors_givenImage, matches );
Where FLANN is just a very cool, very fast framework which uses a lot of methods like tree, bvhs and other stuff to speed things up a lot.
Here are some answers if you need help with installing surf: OpenCV - undefined reference: SurfFeatureDetector and BruteForceMatcher