I just finished a work program to faces recognition using python on ubuntu system
But when you want to move the work to "Raspberry pi" gives this error
this is full error :
AttributeError: 'module' object has no attribute 'createLBPHFaceRecognizer'
What is the solution
Thank you
import cv2
import sys
import cv
import glob
import numpy as np
import os
labeltest=[]
Images=[]
Len=0
model = cv2.createLBPHFaceRecognizer(1,8,8,8,70.0)
Labels=[]
textsay=""
# *********** Read *****************\\
def read():
arr={}
with open("csv.ext") as f:
for line in f:
arr=line.split("%",2)
labeltest.append(arr[1])
Images.append(cv2.imread(arr[0],cv2.IMREAD_GRAYSCALE))
label=range(0,len(labeltest))
for i in range(0,len(labeltest)):
label[i]=int(labeltest[i])
print (label)
model.train(np.asarray(Images),np.asarray(label))
model.save("mezo.xml")
model.load("mezo.xml")
# //*********** Read *****************
def writetofile(key):
fo = open("csv.ext", "a+")
fo.write(key)
fo.write("\n")
def searchName(key):
lines=tuple(open("Names.txt","r"))
for i in range(0,len(lines)):
test=lines[i].split("\n")
print test[0]
if str(key.lower())==str(test[0].lower()):
return i
return -1
def readName():
lines=tuple(open("Names.txt","r"))
for i in range(0,len(lines)):
Labels.append(lines[i])
print Labels
def AddName(key):
fo = open("Names.txt", "a+")
fo.write(key)
fo.write("\n")
readName()
# *********** Add *****************\\
def Add(faces,gray):
count=Len+100
for (x, y, w, h) in faces:
filename = "/home/mohammad/Desktop/traning/%03d"%count +".pgm"
f=gray[y:y+h,x:x+w]
f=cv2.resize(f,(92,112),interpolation=cv2.INTER_LANCZOS4)
newName=raw_input("Enter the Name : ")
index=searchName(newName)
if index==-1:
index=len(Labels)
AddName(newName)
filenameIn = filename+"%"+str(index)
writetofile(filenameIn)
cv2.imwrite(filename,f)
count+=1
read()
# //*********** Add *****************
path={}
path=glob.glob("/home/mohammad/Desktop/traning/*.pgm")
Len=len(path)-1
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
count=0
video_capture = cv2.VideoCapture(0)
read();
readName()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
cv2.waitKey(10)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
f=gray[y:y+h,x:x+w]
f=cv2.resize(f,(92,112),interpolation=cv2.INTER_LANCZOS4)
cv2.imwrite("11.pgm",f)
label, confidence = model.predict(f)
print"Threshold : ", model.getDouble("threshold")
if label>-1:
if Labels[label] != textsay:
cmd = 'espeak "{0}" 2>/dev/null'.format(Labels[label])
os.system(cmd)
textsay=Labels[label]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,Labels[label],(x,y-10), font, 1.0,(255,255,255))
print "\n"+str(Labels[label])+" | "+str(confidence)
# Display the resulting frame
cv2.imshow('Video', frame)
k=cv2.waitKey(5)& 0xFF
if k==97 :
Add(faces,gray)
if k==27:
exit()
Related
From the MSCOCO dataset segmentation annotations, how can I extract just the segmented objects themselves? For example, given an image of a person standing with a house in the background, how can I extract just the person themselves?
If your data is already in FiftyOne, then you can write a simple function using OpenCV and Numpy to crop the segmentations in your FiftyOne labels. It could look something like this:
import os
import cv2
import numpy as np
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
def extract_classwise_instances(samples, output_dir, label_field, ext=".png"):
print("Extracted object instances...")
for sample in samples.iter_samples(progress=True):
img = cv2.imread(sample.filepath)
img_h,img_w,c = img.shape
for det in sample[label_field].detections:
mask = det.mask
[x,y,w,h] = det.bounding_box
x = int(x * img_w)
y = int(y * img_h)
h, w = mask.shape
mask_img = img[y:y+h, x:x+w, :]
alpha = mask.astype(np.uint8)*255
alpha = np.expand_dims(alpha, 2)
mask_img = np.concatenate((mask_img, alpha), axis=2)
label = det.label
label_dir = os.path.join(output_dir, label)
if not os.path.exists(label_dir):
os.mkdir(label_dir)
output_filepath = os.path.join(label_dir, det.id+ext)
cv2.imwrite(output_filepath, mask_img)
label_field = "ground_truth"
classes = ["person"]
dataset = foz.load_zoo_dataset(
"coco-2017",
split="validation",
label_types=["segmentations"],
classes=classes,
max_samples=20,
label_field=label_field,
dataset_name=fo.get_default_dataset_name(),
)
view = dataset.filter_labels(label_field, F("label").is_in(classes))
output_dir = "/tmp/coco-segmentations"
os.makedirs(output_dir, exist_ok=True)
extract_classwise_instances(view, output_dir, label_field)
I am working Crop Images in a Django Application
using this tutorial Crop Images in Django
Myform:
class UploadImageForm(forms.ModelForm):
x = forms.FloatField(widget=forms.HiddenInput())
y = forms.FloatField(widget=forms.HiddenInput())
width = forms.FloatField(widget=forms.HiddenInput())
height = forms.FloatField(widget=forms.HiddenInput())
primaryphoto = forms.ImageField(required=False,
error_messages={'invalid': _("Image files only")}, widget=forms.FileInput)
class Meta:
model = User
fields = ['primaryphoto', 'x', 'y', 'width', 'height',]
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
resized_image.save(user.primaryphoto.path)
return user
myview:
def upload_image(request):
if request.method == 'POST':
form = UploadImageForm(request.POST, request.FILES, instance=request.user)
if form.is_valid():
form.save()
return redirect('/profile')
else:
form = UploadImageForm(instance=request.user)
return render(request, 'student/uploadimageform.html', {'form': form})
storage_backend.py:
from storages.backends.s3boto3 import S3Boto3Storage
class MediaStorage(S3Boto3Storage):
location = 'media'
file_overwrite = False
However, when I uploaded it to run on AWS, I got the error message that the backend does not support absolute paths (in reference to primaryphoto.path in the form where the photo is cropped). I was wondering what I have to change to get it working with S3. I've found some resources that say change primaryphoto.path to primaryphoto.name, but that hasn't worked for me. I was wondering if you had any recommendations to solve this problem?
Here Image is uploaded to S3 Bucket but issue is throwing above error. Please help me anyone. Thanks in advance ...
Edit answer:
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
try:
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
resized_image.save(user.primaryphoto.path)
except:
pass
return user
Here issue is throwing error but image is uploading locally and S3 bucket properly ... for handling path error i am using try, except block
For me default.storage.write() did not work, image.save() did not work, this one worked. See this code if anyone is still interested. I apologize for the indentation. My project was using Cloudinary and Django small project.
from io import BytesIO
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
def save(self, *args, **kargs):
super(User, self).save(*args, **kargs)
# After save, read the file
image_read = storage.open(self.profile_image.name, "r")
image = Image.open(image_read)
if image.height > 200 or image.width > 200:
size = 200, 200
# Create a buffer to hold the bytes
imageBuffer = BytesIO()
# Resize
image.thumbnail(size, Image.ANTIALIAS)
# Save the image as jpeg to the buffer
image.save(imageBuffer, image.format)
# Check whether it is resized
image.show()
# Save the modified image
user = User.objects.get(pk=self.pk)
user.profile_image.save(self.profile_image.name, ContentFile(imageBuffer.getvalue()))
image_read = storage.open(user.profile_image.name, "r")
image = Image.open(image_read)
image.show()
image_read.close()
Try the following instead.
I don't test it, but some explanations will be good: We convert the image into string buffer in order to create a Django InMemoryUploadedFile with the cropped image. In this case, we don't use path. Try it and let me know you face any other errors.
import os
from io import BytesIO as StringIO # python3
from django.core.files.uploadedfile import InMemoryUploadedFile
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
filename = os.path.splitext(resized_image.name)[0]
output = StringIO()
resized_image.save(output, format='JPEG', quality=95)
output.seek(0) #Change the stream position to the given byte offset.
new_image = InMemoryUploadedFile(output,'ImageField',\
"%s.jpg" % filename , 'image/jpeg', output.__sizeof__(), None)
user.primaryphoto = new_image
user.save()
return user
This one worked for me. Just replace the save() method in your form.
from django.core.files.storage import default_storage as storage
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
fh = storage.open(user.primaryphoto.name, "w")
picture_format = 'png'
resized_image.save(fh, picture_format)
fh.close()
resized_image.save(user.primaryphoto.path)
return user
For those redirected here from google, here's code tested and working on aws based on answers from #Alexandar Dimitro and other comments on this page.
from django.core.files.storage import default_storage as storage
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
fh = storage.open(user.primaryphoto.name, "wb")
picture_format = 'png'
resized_image.save(fh, picture_format)
fh.close()
return user
I want to use wxpython to show the trajectory of a random walk in real-time. However, the panel is updated only once at the end showing the entire random walk instead of updating step by step and showing the time course.
My first idea was to use wx.ClientDC().DrawPoint() but the result was as described above where I did not see single points being drawn but only the final result was shown.
So instead I thought about using wx.MemoryDC to draw the trajectory to a bitmap stored in memory and then use wx.ClientDC.DrawBitmap() to copy the buffered image to the screen at set time intervals in case flipping the image was the bottleneck. The result is still the same so I am hoping for you help.
The purpose of this exercise is to replace the random walk with positional data coming from an eye tracker with a frame rate of 1000 Hz and I would like to be able to visualize the trajectory in as close to real-time as possible (the monitor's frame rate is 120Hz).
This is my code (most of it comes from here):
import wx
import random
import time
from time import asctime
#-------------------------------------------------------------------
def jmtime():
return '[' + asctime()[11:19] + '] '
#-------------------------------------------------------------------
class MyDrawingArea(wx.Window):
def __init__(self, parent, id):
sty = wx.NO_BORDER
wx.Window.__init__(self, parent, id, style=sty)
self.parent = parent
self.SetBackgroundColour(wx.WHITE)
self.SetCursor(wx.CROSS_CURSOR)
# Some initalisation, just to reminds the user that a variable
# called self.BufferBmp exists. See self.OnSize().
self.BufferBmp = None
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_LEFT_DOWN(self,self.OnClick)
def OnSize(self, event):
print jmtime() + 'OnSize in MyDrawingArea'
# Get the size of the drawing area in pixels.
self.wi, self.he = self.GetSizeTuple()
# Create BufferBmp and set the same size as the drawing area.
self.BufferBmp = wx.EmptyBitmap(self.wi, self.he)
memdc = wx.MemoryDC()
memdc.SelectObject(self.BufferBmp)
# Drawing job
ret = self.DoSomeDrawing(memdc)
if not ret: #error
self.BufferBmp = None
wx.MessageBox('Error in drawing', 'CommentedDrawing', wx.OK | wx.ICON_EXCLAMATION)
def OnPaint(self, event):
print jmtime() + 'OnPaint in MyDrawingArea'
dc = wx.PaintDC(self)
dc.BeginDrawing()
if self.BufferBmp != None:
print jmtime() + '...drawing'
dc.DrawBitmap(self.BufferBmp, 0, 0, True)
else:
print jmtime() + '...nothing to draw'
dc.EndDrawing()
def OnClick(self,event):
pos = event.GetPosition()
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
dcwi, dche = dc.GetSizeTuple()
x = pos.x
y = pos.y
time_start = time.time()
memdc = wx.MemoryDC()
memdc.SelectObject(self.BufferBmp)
memdc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
count = 1
runtime = 5
while (time.time() - time_start) < runtime:
x,y = random_walk(x,y,dcwi,dche)
memdc.DrawPoint(x,y)
if (time.time() - time_start) > count * runtime * 0.1:
print jmtime() + 'Random walk in MyDrawingArea'
count += 1
dc.BeginDrawing()
dc.DrawBitmap(self.BufferBmp, 0, 0, True)
dc.EndDrawing()
dc.BeginDrawing()
dc.DrawBitmap(self.BufferBmp, 0, 0, True)
dc.EndDrawing()
# End of def OnClick
def DoSomeDrawing(self, dc):
try:
print jmtime() + 'DoSomeDrawing in MyDrawingArea'
dc.BeginDrawing()
#~ raise OverflowError #for test
# Clear everything
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.Clear()
dc.EndDrawing()
return True
except:
return False
#-------------------------------------------------------------------
class MyPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id, wx.DefaultPosition, wx.DefaultSize)
self.drawingarea = MyDrawingArea(self, -1)
self.SetAutoLayout(True)
gap = 30 #in pixels
lc = wx.LayoutConstraints()
lc.top.SameAs(self, wx.Top, gap)
lc.left.SameAs(self, wx.Left, gap)
lc.right.SameAs(self, wx.Width, gap)
lc.bottom.SameAs(self, wx.Bottom, gap)
self.drawingarea.SetConstraints(lc)
#-------------------------------------------------------------------
# Usual frame. Can be resized, maximized and minimized.
# The frame contains one panel.
class MyFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'CommentedDrawing', wx.Point(0, 0), wx.Size(500, 400))
self.panel = MyPanel(self, -1)
wx.EVT_CLOSE(self, self.OnCloseWindow)
def OnCloseWindow(self, event):
print jmtime() + 'OnCloseWindow in MyFrame'
self.Destroy()
#-------------------------------------------------------------------
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None, -1)
frame.Show(True)
self.SetTopWindow(frame)
return True
#-------------------------------------------------------------------
def random_walk(x,y,sizex = 250, sizey = 200):
rn = random.randrange(0,2)
x_new = x + (1-rn) - rn
while x_new < 0 or x_new > sizex:
rn = random.randrange(0,2)
x_new = x + (1-rn) - rn
rn = random.randrange(0,2)
y_new = y + (1-rn) - rn
while y_new < 0 or y_new > sizex:
rn = random.randrange(0,2)
y_new = y + (1-rn) - rn
return x_new, y_new
# end of def random_walk
#-------------------------------------------------------------------
def main():
print 'main is running...'
app = MyApp(0)
app.MainLoop()
#-------------------------------------------------------------------
if __name__ == "__main__" :
main()
#eof-------------------------------------------------------------------
This is the solution I came up with. Instead of using dc.DrawBitmap() to copy the buffered image to the screen I used Update() and Refresh() to trigger a paint event. However, what I still don't understand is why I cannot use DrawBitmap() to accomplish the same.
The only difference is that OnPaint() uses PaintDC() and in OnClick() I use ClientDC().
Anyways, this is my current code for OnClick():
def OnClick(self,event):
pos = event.GetPosition()
x = pos.x
y = pos.y
time_start = time.time()
memdc = wx.MemoryDC()
memdc.SelectObject(self.BufferBmp)
dcwi, dche = memdc.GetSizeTuple()
memdc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
runtime = 10
while (time.time() - time_start) < runtime:
x,y = random_walk(x,y,dcwi,dche)
memdc.SelectObject(self.BufferBmp)
memdc.DrawPoint(x,y)
memdc.SelectObject(wx.NullBitmap)
self.Update()
self.Refresh()
print jmtime() + 'Random walk in MyDrawingArea done'
I copy a code from a Web page to Python 2.7 but I didn't success.
The code is:
# Raspbery Pi Color Tracking Project
# Code written by Oscar Liang
# 30 Jun 2013
import cv2.cv as cv
import smbus
bus = smbus.SMBus(1)
address = 0x04
def sendData(value):
bus.write_byte(address, value)
# bus.write_byte_data(address, 0, value)
return -1
def readData():
state = bus.read_byte(address)
# number = bus.read_byte_data(address, 1)
return state
def ColorProcess(img):
# returns thresholded image
imgHSV = cv.CreateImage(cv.GetSize(img), 8, 3)
# converts BGR image to HSV
cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
imgProcessed = cv.CreateImage(cv.GetSize(img), 8, 1)
# converts the pixel values lying within the range to 255 and stores it in the destination
cv.InRangeS(imgHSV, (100, 94, 84), (109, 171, 143), imgProcessed)
return imgProcessed
def main():
# captured image size, change to whatever you want
width = 320
height = 240
capture = cv.CreateCameraCapture(0)
# Over-write default captured image size
cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH,width)
cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT,height)
cv.NamedWindow( “output”, 1 )
cv.NamedWindow( “processed”, 1 )
while True:
frame = cv.QueryFrame(capture)
cv.Smooth(frame, frame, cv.CV_BLUR, 3)
imgColorProcessed = ColorProcess(frame)
mat = cv.GetMat(imgColorProcessed)
# Calculating the moments
moments = cv.Moments(mat, 0)
area = cv.GetCentralMoment(moments, 0, 0)
moment10 = cv.GetSpatialMoment(moments, 1, 0)
moment01 = cv.GetSpatialMoment(moments, 0,1)
# Finding a big enough blob
if(area > 60000):
# Calculating the center postition of the blob
posX = int(moment10 / area)
posY = int(moment01 / area)
# check slave status and send coordinates
state = readData()
if state == 1:
sendData(posX)
sendData(posY)
print ‘x: ‘ + str(posX) + ‘ y: ‘ + str(posY)
# update video windows
cv.ShowImage(“processed”, imgColorProcessed)
cv.ShowImage(“output”, frame)
if cv.WaitKey(10) >= 0:
break
return;
if __name__ == “__main__”:
main()
I solved it. The right code is:
import cv2.cv as cv
import smbus
import cv2
bus = smbus.SMBus(1)
address = 0x04
def sendData(value):
bus.write_byte(address, value)
return -1
def readData():
state = bus.read_byte(address)
return state
def ColorProcess(img):
imgHSV = cv.CreateImage(cv.GetSize(img) ,8 ,3)
cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
imgProcessed = cv.CreateImage(cv.GetSize(img) ,8 ,1)
cv.InRangeS(imgHSV, (100, 94, 84), (109, 171, 143), imgProcessed)
return imgProcessed
def main():
width = 320
height = 240
capture = cv.CreateCameraCapture(0)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)
cv.NamedWindow("output", 1)
cv.NamedWindow("processed", 1)
while True:
frame = cv.QueryFrame(capture)
cv.Smooth(frame, frame, cv.CV_BLUR, 3)
imgColorProcessed = ColorProcess(frame)
mat = cv.GetMat(imgColorProcessed)
moments = cv.Moments(mat, 0)
area = cv.GetCentralMoment(moments, 0, 0)
moment10 = cv.GetSpatialMoment(moments, 1, 0)
moment01 = cv.GetSpatialMoment(moments, 0, 1)
if (area > 60000):
posX = int(moment10/area)
posY = int(moment01/area)
ali = long(2000000)
state = readData()
if state == 1:
sendData(posX)
sendData(posY)
print 'x: ' + str(posX) + 'y: ' + str(posY)
cv.ShowImage("processed", imgColorProcessed)
cv.ShowImage("output", frame)
if cv.WaitKey(10) >= 0:
break
return;
if __name__ == "__main__":
main()
I have the code below and I want to modify it in many parts :
how can I use Raspbery Pi camera instead USB camera?
I will be grateful for anyone who gives me a hint or write the right code.
The code is :
import cv2.cv as cv
import smbus
import cv2
bus = smbus.SMBus(1)
address = 0x04
def sendData(value):
bus.write_byte(address, value)
return -1
def readData():
state = bus.read_byte(address)
return state
def ColorProcess(img):
imgHSV = cv.CreateImage(cv.GetSize(img) ,8 ,3)
cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
imgProcessed = cv.CreateImage(cv.GetSize(img) ,8 ,1)
cv.InRangeS(imgHSV, (100, 94, 84), (109, 171, 143), imgProcessed)
return imgProcessed
def main():
width = 320
height = 240
capture = cv.CreateCameraCapture(0)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)
cv.NamedWindow("output", 1)
cv.NamedWindow("processed", 1)
while True:
frame = cv.QueryFrame(capture)
cv.Smooth(frame, frame, cv.CV_BLUR, 3)
imgColorProcessed = ColorProcess(frame)
mat = cv.GetMat(imgColorProcessed)
moments = cv.Moments(mat, 0)
area = cv.GetCentralMoment(moments, 0, 0)
moment10 = cv.GetSpatialMoment(moments, 1, 0)
moment01 = cv.GetSpatialMoment(moments, 0, 1)
if (area > 60000):
posX = int(moment10/area)
posY = int(moment01/area)
ali = long(2000000)
state = readData()
if state == 1:
sendData(posX)
sendData(posY)
print 'x: ' + str(posX) + 'y: ' + str(posY)
cv.ShowImage("processed", imgColorProcessed)
cv.ShowImage("output", frame)
if cv.WaitKey(10) >= 0:
break
return;
if __name__ == "__main__":
main()
I will high appreciate any help.
Thanks.
Run this command in LX Terminal in Pi.It will take care of drivers.
sudo modprobe bcm2835-v4l2