Some background: I'm reading frames from a video capture device (webcam) and writing it to a video file (out.avi) with OpenCV in python. The script I've written is meant to be called from a Node.js process so I can start recording video in response to whatever occurs in node.
I'm using Python 2.7, and OpenCV is installed on Ubuntu from the aptitude package repos so I'm not sure what version that is or if its important.
Here is the script I wrote:
#!/usr/bin/env python
# adapted from https://stackoverflow.com/questions/32943227/python-opencv-capture-images-from-webcam
from __future__ import print_function
from datetime import datetime
from cvDebug import CvDebug
import argparse
import json
import sys
import cv2
# TODO allow storing to a directory (prepended with date or not) --- TWW
parser = argparse.ArgumentParser(description='record and save a camera video')
parser.add_argument('-d', '--debug', action='store_true', help='turn on debugging')
parser.add_argument('-c', '--camera', type=int, default=0, help='camera number for recording the video')
parser.add_argument('-o', '--out', type=str, default='out.avi', help='name of the output-file')
parser.add_argument('-f', '--fps', type=int, default=10, help='frames per second for output video')
parser.add_argument('-l', '--length', type=int, default=1, help='length of time to record video in seconds')
parser.add_argument('-W', '--width', type=int, default=640, help='width of the image')
parser.add_argument('-H', '--height', type=int, default=480, help='height of the image')
parser.add_argument('-D', '--prepend-date', action='store_true')
parser.add_argument('-T', '--prepend-time', action='store_true')
parser.add_argument('--codec', type=str, default='XVID', help='codec to use when writing video')
# TODO argument to separate out image capture --- TWW
args = parser.parse_args(sys.argv[1:])
now = datetime.now()
if args.prepend_time is True:
args.out = '{0}_{1}'.format(now.time().strftime('%H%M%S'), args.out)
if args.prepend_date is True:
args.out = '{0}_{1}'.format(now.today().strftime('%Y%m%d'), args.out)
d = CvDebug(args.debug)
def main():
# capture from camera at location 0
d.time('opening camera', args.camera)
cap = cv2.VideoCapture(args.camera)
if not cap.isOpened():
print('opening camera failed')
cap.release()
exit(1)
d.time('setting width and height')
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, args.width)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, args.height)
# Change the camera setting using the set() function
# cap.set(cv2.cv.CV_CAP_PROP_EXPOSURE, -6.0)
# cap.set(cv2.cv.CV_CAP_PROP_GAIN, 4.0)
# cap.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS, 144.0)
# cap.set(cv2.cv.CV_CAP_PROP_CONTRAST, 27.0)
# cap.set(cv2.cv.CV_CAP_PROP_HUE, 13.0) # 13.0
# cap.set(cv2.cv.CV_CAP_PROP_SATURATION, 28.0)
# Read the current setting from the camera
# test = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC)
# d.log('Test:', test)
# ratio = cap.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO)
# d.log('Ratio:', ratio)
# frame_rate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
# d.log('Frame Rate:', frame_rate)
height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
d.log('Height:', height)
width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
d.log('Width:', width)
brightness = cap.get(cv2.cv.CV_CAP_PROP_BRIGHTNESS)
d.log('Brightness:', brightness)
contrast = cap.get(cv2.cv.CV_CAP_PROP_CONTRAST)
d.log('Contrast:', contrast)
saturation = cap.get(cv2.cv.CV_CAP_PROP_SATURATION)
d.log('Saturation:', saturation)
# hue = cap.get(cv2.cv.CV_CAP_PROP_HUE)
# d.log('Hue:', hue)
# gain = cap.get(cv2.cv.CV_CAP_PROP_GAIN)
# d.log('Gain:', gain)
# exposure = cap.get(cv2.cv.CV_CAP_PROP_EXPOSURE)
# d.log('Exposure:', exposure)
d.time('opening video container', args.out)
d.log('codec {0}, fps {1}, geo {2}x{3}'.format(args.codec, args.fps, width, height))
vid = cv2.VideoWriter(args.out, cv2.cv.CV_FOURCC(*args.codec), args.fps, (int(width), int(height)), True)
d.time('container opened')
if not vid.isOpened():
print('opening video container failed')
cap.release()
vid.release()
d.destroy_image_windows()
exit(1)
exit_code = 0
image_count = args.fps * args.length
while image_count > 0:
ret, img = cap.read()
if ret:
vid.write(img)
d.time('frame', args.fps * args.length - image_count, 'written')
else:
exit_code = 1
print('frame', args.fps * args.length - image_count, 'failed')
image_count -= 1
print(json.dumps({"file": args.out}))
d.time('releasing capture & video container')
cap.release()
vid.release()
d.time('released')
d.destroy_image_windows()
exit(exit_code)
if __name__ == '__main__':
main()
And when I run it with python script.py -d using my Samsung R580 built-in webcam I get the following output:
[0.000 s][Δ 0.000 s] opening camera 0
[0.151 s][Δ 0.151 s] setting width and height
Height: 480.0
Width: 640.0
Brightness: 0.850000023842
Contrast: 0.649999976158
Saturation: 0.600000023842
[0.173 s][Δ 0.022 s] opening video container out.avi
codec XVID, fps 10, geo 640.0x480.0
[0.189 s][Δ 0.016 s] container opened
[2.353 s][Δ 2.163 s] frame 0 written
[2.437 s][Δ 0.085 s] frame 1 written
[2.528 s][Δ 0.091 s] frame 2 written
[2.618 s][Δ 0.090 s] frame 3 written
[2.713 s][Δ 0.095 s] frame 4 written
[2.804 s][Δ 0.091 s] frame 5 written
[2.892 s][Δ 0.088 s] frame 6 written
[2.985 s][Δ 0.094 s] frame 7 written
[3.076 s][Δ 0.091 s] frame 8 written
[3.168 s][Δ 0.092 s] frame 9 written
{"file": "out.avi"}
[3.169 s][Δ 0.000 s] releasing capture & video container
[3.196 s][Δ 0.028 s] released
Notice how the first frame takes over 2 seconds to retrieve! But then subsequent frames come after about every 0.091 seconds. I am not certain how to explain this behavior.
Also, I purchased a different webcam (Logitech C920 as recommended by Derek Molloy) and it certainly speeds up the capture of the first frame. On my laptop its acceptably fast, but on a beagleboard It still takes up to 0.7-1.0 seconds for that first frame (and it's at about 0.2 seconds for every consecutive frame). The frame-rate is acceptable, but I essentially want to minimize the amount of time it takes to start recording after the start of the script.
I tried to use the constant discussed in this stackoverflow (cv2.cv.CV_CAP_PROP_BUFFERSIZE) but the constant doesn't seem to exist in python-opencv that I installed.
Does anyone have experience with this that can lend some insight into what is going on? Does anyone have resources they can recommend reading up on?
Related
Since I am very much new to this language, with whatever little knowledge I have, I have written code.
The code is getting executed thrice, but the three images are being overwritten and at the end there is just one image that is available instead of 3 different images (which is my goal).
import cv2
#helps in turning on the camera
cap = cv2.VideoCapture(0)
#camera clicks the images for 3 times
a = 0
while (a < 3):
a = a+1
#creating a frame
check, frame = cap.read()
print(check)
print(frame)
#conversion of image to grayscale
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#shows the frame
cv2.imshow("capturing",image)
#Saving Of image
status = cv2.imwrite('path of where the image is to be saved.jpg',image)
print("Image written to file-system : ",status)
#turns off the camera
cap.release
cv2.waitKey(0)
cv2.destroyAllWindows()
Question about framerates on the picamera v2:
According to the documentation of picamera , the following framerates are feasible for this hardware:
Resolution Aspect Ratio Framerates Video Image FoV Binning
1 1920x1080 16:9 0.1-30fps x Partial None
2 3280x2464 4:3 0.1-15fps x x Full None
3 3280x2464 4:3 0.1-15fps x x Full None
4 1640x1232 4:3 0.1-40fps x Full 2x2
5 1640x922 16:9 0.1-40fps x Full 2x2
6 1280x720 16:9 40-90fps x Partial 2x2
7 640x480 4:3 40-90fps x Partial 2x2
However, when gathering images with the capture_sequence method (which in the documentation is referred to as the fastest method) I don't get close to these numbers.
For the 1280x720 rate it maxes out at 25 fps, at 640x480 it maxes out close to 60.
The calculations I'm performing are irrelevant i.e. commenting them out doesn't make a difference (calculations are fast enough to not be the cause of the issue).
If somebody would see some flaws in what I'm try to do and would solve increasing the framerate ... .
import io
import time
import picamera
#import multiprocessing
from multiprocessing.pool import ThreadPool
#import threading
import cv2
#from PIL import Image
from referenceimage import ReferenceImage
from detectobject_stream import detectobject_stream
from collections import deque
from datawriter import DataWriter
backgroundimage=ReferenceImage()
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
Numberofimages=500
starttime=time.time()
#datawrite=DataWriter()
#datawrite.start()
def outputs():
stream = io.BytesIO()
Start=True
global backgroundimage
for i in range(Numberofimages):
yield stream
#print time.time()-starttime
#start = time.time()
while len(pending) > 0 and pending[0].ready():
timestamps = pending.popleft().get()
#print timestamps
if len(pending)<threadn:
stream.seek(0)
task = pool.apply_async(detectobject_stream, (stream.getvalue(),backgroundimage,Start,0))
pending.append(task)
Start=False
stoptime = time.time()
print stoptime-start
stream.seek(0)
stream.truncate()
#print i
with picamera.PiCamera() as camera:
#camera.resolution = (640, 480)
camera.resolution = (1280, 720)
camera.framerate = 60
camera.start_preview()
time.sleep(2)
start = time.time()
camera.capture_sequence(outputs(),format='bgr',use_video_port=True)
finish = time.time()
print('Captured images at %.2ffps' % (Numberofimages / (finish - start)))
thanks in advance
Assume that you have a temperature data with sampling rate 512. I want to record this data by synchronized with the camera images. The resulting record going to be just a video file.
I can plot this data with matplotlib and pyqtgraph.
I did it with matplotlib but video sampling rate is decreasing. Here is the code with random incoming data.
import cv2
import numpy as np
import matplotlib.pyplot as plt
cap = cv2.VideoCapture(0) # video source: webcam
fourcc = cv2.cv.CV_FOURCC(*'XVID') # record format xvid
out = cv2.VideoWriter('output.avi',fourcc, 1, (800,597)) # output video : output.avi
t = np.arange(0, 512, 1)# sample time axis from 1 to 512
while(cap.isOpened()): # record loop
ret, frame = cap.read()# get frame from webcam
if ret==True:
nse = np.random.randn(len(t))# generate random data squence
plt.subplot(1, 2, 1)# subplot random data
plt.plot(t, nse)
plt.subplot(1, 2, 2)# subplot image
plt.imshow(frame)
# save matplotlib subplot as last.png
plt.savefig("last.png")
plt.clf()
img=cv2.imread("last.png") # read last.png
out.write(img) # record last.png image to output.avi
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('q'): # exit with press q button in frame window
break
else:
break
cap.release() # relase webcam
out.release() # save video
cv2.destroyAllWindows() # close all windows
import cv2
canvas = np.zeros((480,640))
t = np.arange(0, 512, 1) # sample time axis from 1 to 512
nse = np.random.randn(len(t))
# some normalization to fit to canvas dimension
t = 640 * t / 512
nse = 480 * nse / nse.max()
pts = np.vstack((t,nse)).T.astype(np.int)
cv2.polylines(canvas, [pts], False, 255)
imshow(canvas, 'gray')
This create the plot in a new zero array (480 x 640). t and nse should be normalized by the canvas dimension as you like.
if your capture frame has 480,640 dimension too, then you can prepare cv2.VideoWriter for 960x640 and concatenate frame and canvas using np.concatenate or np.hstack to have 960x640 array which can be used as the buffer to send to VideoWriter.
So I am starting to get very confused by the openCV libraries ability to write out video to disk, because even the openCV documentation is not terribly clear as to how the video actually gets written in this case. The code I have below seems to collect the data just fine but the video file it tries to write has no data in it. All I want to do is take a video that I know I can, change the data within it to a ramp between 0 and 255, and then write that data back out to disk. However, the final I/O step is not cooperating for reasons I don't understand. Can anyone help? Find the code below:
import numpy as np
import cv2
import cv2.cv as cv
cap = cv2.VideoCapture("/Users/Steve/Documents/TestVideo.avi") #The video
height = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT) #We get some properties of the video
width = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)
fps = cap.get(cv.CV_CAP_PROP_FPS)
fourcc = cv2.cv.CV_FOURCC(*'PDVC') #This is essential for testing
out = cv2.VideoWriter('output.avi',fourcc, int(fps), (int(width),int(height)))
xaxis = np.arange(width,dtype='int')
yaxis = np.arange(height,dtype='int')
xx,yy = np.meshgrid(xaxis,yaxis)
ramp=256*xx/int(width) #This is a horizontal ramp image that scales from 0-255 across the width of the image
i=0
while(cap.isOpened()):
if i%100==0: print i
i+=1
ret, frame = cap.read() #Grab a frame
if ret==True:
# Change the frame data to the ramp instead of the original video
frame[:,:,0]=ramp #The camera is B/W so the image is in B/W
frame[:,:,1]=ramp
frame[:,:,2]=ramp
out.write(frame) #Write to disk?
cv2.imshow('frame',frame) # I see the ramp as an imshow
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release() #Clear windows
out.release()
cv2.destroyAllWindows()
Your code is generally correct, but is likely silently failing at some step along the way.
try adding some debug lines:
out = cv2.VideoWriter('output2.avi',fourcc, int(fps), (int(width),int(height)))
or
else:
print "frame %d is false" % i
break
When I was testing your code locally I found the fps was set to 0 for most .avi files I read. Manually setting it to 15 or 30 worked.
I also didn't have any luck getting your fourcc to work on my machine (osx), but this one worked fine.
fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
So I need to get web camera fps rate in OpenCV. Which function can do such thing for?
int cvGetCaptureProperty( CvCapture* capture, int property_id);
with property_id = CV_CAP_PROP_FPS
It seems that for live webcam capture, you can set an arbitrary fps and read back that same fps, which has nothing to do with the real fps from webcam. Is it a bug?
For example:
cvSetCaptureProperty(capture,CV_CAP_PROP_FPS,500);
and later
double rates = cvGetCaptureProperty(capture,CV_CAP_PROP_FPS);
printf("%f\n",rates);
will give you 500.
But if I timed it using web cam fps link, it's around the normal 30fps.
In my case, fps = video.get(cv2.CAP_PROP_FPS) did not work.
So, I found this code in this link:
https://www.learnopencv.com/how-to-find-frame-rate-or-frames-per-second-fps-in-opencv-python-cpp/
import cv2
import time
if __name__ == '__main__':
video = cv2.VideoCapture(1)
# Find OpenCV version
(major_ver, _, _) = (cv2.__version__).split('.')
# With webcam get(CV_CAP_PROP_FPS) does not work.
# Let's see for ourselves.
if int(major_ver) < 3:
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
else:
fps = video.get(cv2.CAP_PROP_FPS)
print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)
# Number of frames to capture
num_frames = 120
print "Capturing {0} frames".format(num_frames)
# Start time
start = time.time()
# Grab a few frames
for i in xrange(0, num_frames):
ret, frame = video.read()
# End time
end = time.time()
# Time elapsed
seconds = end - start
print "Time taken : {0} seconds".format(seconds)
# Calculate frames per second
fps = num_frames / seconds
print "Estimated frames per second : {0}".format(fps);
# Release video
video.release()
*OpenCV 2 solution:
C++: double VideoCapture::get(int propId)
E.g.
VideoCapture myvid("video.mpg");
int fps=myvid.get(CV_CAP_PROP_FPS);