Is there a support for BackgroundSubtractorMOG2 in python opencv - c++

Is there a Python binding for BackgroundSubtractorMOG2, cpp apparently has.
I was thinking that there is compatibility between python version of
openCV and cpp version.

The OpenCV 2.x API gives you a Python binding for BackgroundSubtractorMOG, just check this web
http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html
Here is an example
import cv2
backsub = cv2.BackgroundSubtractorMOG()
capture = cv2.VideoCapture("Balcony4_Vis.mpg")
if capture:
while True:
ret, frame = capture.read()
if ret:
fgmask = backsub.apply(frame, None, 0.01)
contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
if w > 10 and h > 10:
# figure out id
best_id = 1
.....
cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.putText(frame, str(best_id), (x,y-5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 0, 0), 2)
cv2.imshow("Track", frame)
key = cv2.waitKey(10)
if key == ord('q'):
break

Related

Create UV Texture map from DensePose Output

I am trying to generate a single UV-texture map in the format of the SURREAL dataset. There is a notebook in the original DensePose repository that discusses how to apply texture transfer using an image from SMPL: github.com/facebookresearch/DensePose/blob/master/notebooks/DensePose-RCNN-Texture-Transfer.ipynb
However, in this case I am trying to use the outputs we get from DensePose directly:
In dump mode, I get the uv coordinates in data[0]['pred_densepose'][0].uv with dimensions: torch.Size([2, 1098, 529])
I overlayed the output from running inference on an image with dp_u,dp_v visualization on a black background. Here is the link to the image: https://densepose.s3.amazonaws.com/test1uv.0001.png
This is the command I used to get this inference: python3 apply_net.py show configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml model_final_de6e7a.pkl input.jpg dp_u,dp_v -v --output output.png
This is the link to the original image: https://densepose.s3.amazonaws.com/02_1_front.jpg
Using these components, I am trying to generate the 24 part uv texture map in the same format as SMPL:
https://densepose.s3.amazonaws.com/extracted_smpl_texture_apprearance.png
https://densepose.s3.amazonaws.com/texture_from_SURREAL.png
It would be extremely helpful if someone can share how to solve this problem. Please let me know if additional information is needed.
I don't know if the problem still persists or you were able to find a solution. In case that anyone else would challenge the same issues, here is my solution. I put together several different codes and ideas from official github issue page for densepose (https://github.com/facebookresearch/DensePose/issues/68).
I assume that we already have output of apply_net.py utility from github denspose repository. From your post it is a data output (one you were able to obtain data[0]['pred_densepose'][0].uv from).
Let's do some coding:
import copy
import cv2
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
# I assume the data are stored in pickle, and you are able to read them
results = data[0]
IMAGE_FILE = 'path/to/image.png'
def parse_iuv(result):
i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
iuv = np.transpose(iuv, (1, 2, 0))
return iuv
def parse_bbox(result):
return result["pred_boxes_XYXY"][0].cpu().numpy()
def concat_textures(array):
texture = []
for i in range(4):
tmp = array[6 * i]
for j in range(6 * i + 1, 6 * i + 6):
tmp = np.concatenate((tmp, array[j]), axis=1)
texture = tmp if len(texture) == 0 else np.concatenate((texture, tmp), axis=0)
return texture
def interpolate_tex(tex):
# code is adopted from https://github.com/facebookresearch/DensePose/issues/68
valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
radius_increase = 10
kernel = np.ones((radius_increase, radius_increase), np.uint8)
dilated_mask = cv2.dilate(valid_mask, kernel, iterations=1)
region_to_fill = dilated_mask - valid_mask
invalid_region = 1 - valid_mask
actual_part_max = tex.max()
actual_part_min = tex.min()
actual_part_uint = np.array((tex - actual_part_min) / (actual_part_max - actual_part_min) * 255, dtype='uint8')
actual_part_uint = cv2.inpaint(actual_part_uint.transpose((1, 2, 0)), invalid_region, 1,
cv2.INPAINT_TELEA).transpose((2, 0, 1))
actual_part = (actual_part_uint / 255.0) * (actual_part_max - actual_part_min) + actual_part_min
# only use dilated part
actual_part = actual_part * dilated_mask
return actual_part
def get_texture(im, iuv, bbox, tex_part_size=200):
# this part of code creates iuv image which corresponds
# to the size of original image (iuv from densepose is placed
# within pose bounding box).
im = im.transpose(2, 1, 0) / 255
image_w, image_h = im.shape[1], im.shape[2]
bbox[2] = bbox[2] - bbox[0]
bbox[3] = bbox[3] - bbox[1]
x, y, w, h = [int(v) for v in bbox]
bg = np.zeros((image_h, image_w, 3))
bg[y:y + h, x:x + w, :] = iuv
iuv = bg
iuv = iuv.transpose((2, 1, 0))
i, u, v = iuv[2], iuv[1], iuv[0]
# following part of code iterate over parts and creates textures
# of size `tex_part_size x tex_part_size`
n_parts = 24
texture = np.zeros((n_parts, 3, tex_part_size, tex_part_size))
for part_id in range(1, n_parts + 1):
generated = np.zeros((3, tex_part_size, tex_part_size))
x, y = u[i == part_id], v[i == part_id]
# transform uv coodrinates to current UV texture coordinates:
tex_u_coo = (x * (tex_part_size - 1) / 255).astype(int)
tex_v_coo = (y * (tex_part_size - 1) / 255).astype(int)
# clipping due to issues encountered in denspose output;
# for unknown reason, some `uv` coos are out of bound [0, 1]
tex_u_coo = np.clip(tex_u_coo, 0, tex_part_size - 1)
tex_v_coo = np.clip(tex_v_coo, 0, tex_part_size - 1)
# write corresponding pixels from original image to UV texture
# iterate in range(3) due to 3 chanels
for channel in range(3):
generated[channel][tex_v_coo, tex_u_coo] = im[channel][i == part_id]
# this part is not crucial, but gives you better results
# (texture comes out more smooth)
if np.sum(generated) > 0:
generated = interpolate_tex(generated)
# assign part to final texture carrier
texture[part_id - 1] = generated[:, ::-1, :]
# concatenate textures and create 2D plane (UV)
tex_concat = np.zeros((24, tex_part_size, tex_part_size, 3))
for i in range(texture.shape[0]):
tex_concat[i] = texture[i].transpose(2, 1, 0)
tex = concat_textures(tex_concat)
return tex
iuv = parse_iuv(results)
bbox = parse_bbox(results)
image = cv2.imread(IMAGE_FILE)[:, :, ::-1]
uv_texture = get_texture(image, iuv, bbox)
# plot texture or do whatever you like
plt.imshow(uv_texture)
plt.show()
Enjoy

Dataset creator with OpenCV and Python error

OS : Ubuntu 17.10
I am trying this code to create a dataset on face detection using Python2.7 and Open CV (installed with pip)
import cv2
import numpy as np
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
Id = raw_input('enter your id')
sampleNum = 0
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
#incrementing sample number
sampleNum = sampleNum+1
#saving the captured face in the dataset folder
cv2.imwrite("dataSet/User."+Id +'.'+ str(sampleNum) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('frame', img)
#wait for 100 miliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):break
# break if the sample number is morethan 20
elif sampleNum > 20: break
cam.release()
cv2.destroyAllWindows()
But I am getting following error
Traceback (most recent call last):
File "/home/anushi/face/datasetCreator.py", line 10, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
error: /io/opencv/modules/imgproc/src/color.cpp:10638: error: (-215) scn == 3 || scn == 4 in function cvtColor
As the comments correctly mention, one possible cause is that the image is empty (not captured properly). Another possibility is that the image is not a color image.
You can add
cv2.imshow('frame', img)
Before
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
and see what the captured image looks like.
The rest of the code looks fine.

OpenCV python module error

The code is for face detection and recognition with the help of haar cascade algorithm. Running on OpenCV 3.2.0 and Python 2.7. Throwing a Module Error
Is there anybody who can help me out, it will be big help for me. Thank you.
# facerec.py
import cv2, sys, numpy, os
size = 1
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
# Get the folders containing the training data
for (subdirs, dirs, files) in os.walk(fn_dir):
# Loop through each folder named after the subject in the photos
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
# Loop through each photo in the folder
for filename in os.listdir(subjectpath):
# Skip non-image formates
f_name, f_extension = os.path.splitext(filename)
if(f_extension.lower() not in
['.png','.jpg','.jpeg','.gif','.pgm']):
print("Skipping "+filename+", wrong file type")
continue
path = subjectpath + '/' + filename
lable = id
# Add to training data
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
# Loop until the camera is working
rval = False
while(not rval):
# Put the image from the webcam into 'frame'
(rval, frame) = webcam.read()
if(not rval):
print("Failed to open webcam. Trying again...")
# Flip the image (optional)
frame=cv2.flip(frame,1,0)
# Convert to grayscalel
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Resize to speed up detection (optinal, change size above)
mini = cv2.resize(gray, (int(gray.shape[1] / size), int(gray.shape[0] / size)))
# Detect faces and loop through each one
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
# Coordinates of face after scaling back by `size`
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# [1]
# Write the name of recognized face
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
# Show the image and check for ESC being pressed
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
It looks to me like you have not heeded the comment in your own code. The error message tells you that the module cv2 does not define face and the comment seems to warn about the same thing.
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.createFisherFaceRecognizer()
What happens if you heed the comment and code
model = cv2.createFisherFaceRecognizer()
instead?
I just solve the error by installing opencv_Contrib zip from github.

How to save images to a server folder via sockets using python and opencv?

I would like to know how to save images captured by Opencv, in a directory created by code that should stay on the server ...**
import cv2, sys, numpy, os
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets' #All the faces data will be present this folder
sub_data = raw_input ('digite o seu nome') #These are sub data sets of folder, for my faces I've used my name
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
(width, height) = (130, 100) # defining the size of images
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0) #'0' is use for my webcam, if you've any other camera attached use '1' like this
# The program loops until it has 100 images of the face.
count = 1
while count < 101:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite('%s/%s.png' % (path,count), face_resize)
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break

Tracking black objects live streaming

I'm trying to make a project that tracks the black objects on the ground with a harsh desert sunny environment and I tried to make some trials but it doesn't work. I tried converting the black pixels in the image to Red because the Red pixels is easy to track (higher intensity) but it doesn't result in what I want. I tried inverting the image (converting the black to white and white to black) but it doesn't give the desired result either. So any ideas?
Here is the code:
import cv2
import numpy as np
import os
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret,frame = cap.read()
warp = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ORANGE_MIN = np.array([40, 40, 60], np.uint8)
ORANGE_MAX = np.array([50, 70, 80], np.uint8)
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
Conv_hsv_Gray = cv2.cvtColor(hsv_img, cv2.COLOR_BGR2GRAY)
imf = np.float32(Conv_hsv_Gray) / 255.0 # float conversion/scale
Desc = cv2.dct(imf)
sub_to = Desc[0, 0]
print sub_to
result_frame = Desc - sub_to
Fitr = cv2.blur(result_frame, (1, 1))
Filter2 = cv2.GaussianBlur(Desc, (1, 1), 0)
iddt1 = cv2.idct(Fitr)
iddt2 = cv2.idct(Filter2)
framethreshed = cv2.inRange(hsv_img, ORANGE_MIN, ORANGE_MAX)
ret, mask = cv2.threshold(Conv_hsv_Gray, 100, 150, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(warp)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
res = cv2.bitwise_and(iddt1, iddt1, mask=mask)
cv2.imwrite('output2.jpg', framethreshed)
cv2.imshow("imgOriginal(1)", frame) # show windows
cv2.imshow("Masking res(3)", res) # show windows
cv2.imshow("mask_inv)", mask_inv) # show windows
if cv2.waitKey(60) & 0xFF == ord('q'):
break
cap.release()