I am working Crop Images in a Django Application
using this tutorial Crop Images in Django
Myform:
class UploadImageForm(forms.ModelForm):
x = forms.FloatField(widget=forms.HiddenInput())
y = forms.FloatField(widget=forms.HiddenInput())
width = forms.FloatField(widget=forms.HiddenInput())
height = forms.FloatField(widget=forms.HiddenInput())
primaryphoto = forms.ImageField(required=False,
error_messages={'invalid': _("Image files only")}, widget=forms.FileInput)
class Meta:
model = User
fields = ['primaryphoto', 'x', 'y', 'width', 'height',]
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
resized_image.save(user.primaryphoto.path)
return user
myview:
def upload_image(request):
if request.method == 'POST':
form = UploadImageForm(request.POST, request.FILES, instance=request.user)
if form.is_valid():
form.save()
return redirect('/profile')
else:
form = UploadImageForm(instance=request.user)
return render(request, 'student/uploadimageform.html', {'form': form})
storage_backend.py:
from storages.backends.s3boto3 import S3Boto3Storage
class MediaStorage(S3Boto3Storage):
location = 'media'
file_overwrite = False
However, when I uploaded it to run on AWS, I got the error message that the backend does not support absolute paths (in reference to primaryphoto.path in the form where the photo is cropped). I was wondering what I have to change to get it working with S3. I've found some resources that say change primaryphoto.path to primaryphoto.name, but that hasn't worked for me. I was wondering if you had any recommendations to solve this problem?
Here Image is uploaded to S3 Bucket but issue is throwing above error. Please help me anyone. Thanks in advance ...
Edit answer:
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
try:
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
resized_image.save(user.primaryphoto.path)
except:
pass
return user
Here issue is throwing error but image is uploading locally and S3 bucket properly ... for handling path error i am using try, except block
For me default.storage.write() did not work, image.save() did not work, this one worked. See this code if anyone is still interested. I apologize for the indentation. My project was using Cloudinary and Django small project.
from io import BytesIO
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
def save(self, *args, **kargs):
super(User, self).save(*args, **kargs)
# After save, read the file
image_read = storage.open(self.profile_image.name, "r")
image = Image.open(image_read)
if image.height > 200 or image.width > 200:
size = 200, 200
# Create a buffer to hold the bytes
imageBuffer = BytesIO()
# Resize
image.thumbnail(size, Image.ANTIALIAS)
# Save the image as jpeg to the buffer
image.save(imageBuffer, image.format)
# Check whether it is resized
image.show()
# Save the modified image
user = User.objects.get(pk=self.pk)
user.profile_image.save(self.profile_image.name, ContentFile(imageBuffer.getvalue()))
image_read = storage.open(user.profile_image.name, "r")
image = Image.open(image_read)
image.show()
image_read.close()
Try the following instead.
I don't test it, but some explanations will be good: We convert the image into string buffer in order to create a Django InMemoryUploadedFile with the cropped image. In this case, we don't use path. Try it and let me know you face any other errors.
import os
from io import BytesIO as StringIO # python3
from django.core.files.uploadedfile import InMemoryUploadedFile
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
filename = os.path.splitext(resized_image.name)[0]
output = StringIO()
resized_image.save(output, format='JPEG', quality=95)
output.seek(0) #Change the stream position to the given byte offset.
new_image = InMemoryUploadedFile(output,'ImageField',\
"%s.jpg" % filename , 'image/jpeg', output.__sizeof__(), None)
user.primaryphoto = new_image
user.save()
return user
This one worked for me. Just replace the save() method in your form.
from django.core.files.storage import default_storage as storage
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
fh = storage.open(user.primaryphoto.name, "w")
picture_format = 'png'
resized_image.save(fh, picture_format)
fh.close()
resized_image.save(user.primaryphoto.path)
return user
For those redirected here from google, here's code tested and working on aws based on answers from #Alexandar Dimitro and other comments on this page.
from django.core.files.storage import default_storage as storage
def save(self):
user = super(UploadImageForm, self).save()
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
image = Image.open(user.primaryphoto)
cropped_image = image.crop((x, y, w + x, h + y))
resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
fh = storage.open(user.primaryphoto.name, "wb")
picture_format = 'png'
resized_image.save(fh, picture_format)
fh.close()
return user
Related
I'm successfully able to save and deploy a TF2 Image segmentation model to AI Platform with the following code:
#tf.function(input_signature=[tf.TensorSpec(shape=(None), dtype=tf.string)])
def serving(input_image):
# Convert bytes of jpeg input to float32 tensor for model
def _input_to_feature(image_bytes):
img = tf.image.decode_jpeg(image_bytes, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32) / 255.0
img = tf.image.resize_with_pad(img, 256, 256)
return img
img = tf.map_fn(_input_to_feature, input_image, dtype=tf.float32)
# Predict
pred = model(img)
def _pred_to_image(pred):
pred = tf.cast(pred * 255, dtype=tf.uint8)
img_str = tf.image.encode_png(pred, compression=-1, name=None)
return img_str
img_str = tf.map_fn(_pred_to_image, pred, dtype=tf.string)
return img_str
tf.saved_model.save(model, export_dir=checkpoint_dir+'/saved_model', signatures=serving)
However I get this error while sending request like this:
img_str = base64.b64encode(open('sample_372.jpg', "rb").read()).decode()
response = service.projects().predict(name=name,body={'instances': [img_str]}).execute()
HttpError: <HttpError 400 when requesting https://ml.googleapis.com/v1/projects/nerveblox-268109/models/femoral/versions/v6:predict?alt=json returned "{ "error": "Expected image (JPEG, PNG, or GIF), got unknown format starting with \'/9j/4AAQSkZJRgAB\'\n\t [[{{node DecodeJpeg}}]]" }">
Anybody had a similar issue like this? it seems like a problem with tf.image.decode_jpeg. I also tried with tf.image.decode_image and got a similar error. I can use tf.image.decode_jpeg with my local Base64 encoding, so this function should be able to work but somehow it's not receiving the same input in the server!
After a lot of experimentations (due to limited and outdated documentation of Tensorflow) , I realized that in order for the serving function to decode Base64, the request should be sent like this: {'instances': [{'b64': image_base64}]}. Also convert_image_dtype scales the data to [0,1] by itself so /255.0 should not be done. Also map_fn only works on CPU so it should be used with with tf.device('/cpu:0'):. Finaly and the most annoying part is the encoding to Base64. tf.io.encode_base64 is the only way I found in Tensorflow to encode to Base64 but it encodes to web-safe, meaning that it replaces \ and + to _ and - in order to work in URL. But the Google API Client only accepts normal Base64 encoding. So I had to reverse this by regualr expressions. Here's the updated serving function:
#tf.function(input_signature=[tf.TensorSpec(shape=(None), dtype=tf.string)])
def serving(input_image):
# Convert bytes of jpeg input to float32 tensor for model
def _input_to_feature(img_bytes):
img = tf.image.decode_image(img_bytes, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize_with_pad(img, 256, 256)
return img
# Preprocessing
with tf.device('/cpu:0'):
img = tf.map_fn(_input_to_feature, input_image, dtype=tf.float32)
# Prediction
with tf.device('/gpu:0'):
pred = model(img)
colors = tf.constant([[0.2, 0.3, 0.4]])
pred_rgb = tf.tensordot(pred, colors, axes=1)
def _pred_to_image(pred):
pred = tf.image.convert_image_dtype(pred,dtype=tf.uint8)
pred_str = tf.image.encode_png(pred, compression=4)
pred_encoded = tf.io.encode_base64(pred_str, pad=True)
pred_encoded = tf.strings.regex_replace(pred_encoded, '_', '/')
pred_encoded = tf.strings.regex_replace(pred_encoded, '-', '+')
return pred_encoded
# Postprocessing
with tf.device('/cpu:0'):
img_str = tf.map_fn(_pred_to_image, pred_rgb, dtype=tf.string)
return img_str
tf.saved_model.save(model, export_dir=checkpoint_dir+'/saved_model', signatures=serving)
I realized it also works without tf.map_fn like this,
# Prediction
with tf.device('/gpu'):
img = tf.image.decode_image(input_image[0], channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.expand_dims(img, axis=0)
pred = model(img)[0]
colors = tf.constant([[0, 0, 0],
[1., 0, 0],
[0, 1., 0],
[0, 0, 1.],
[1., 1., 0]
])
pred_rgb = tf.tensordot(pred, colors, axes=1)
not_background = pred[...,0][...,None] < 0.9
transparency = tf.cast(not_background, dtype=tf.float32)*0.3
rgba = tf.concat((pred_rgb, transparency), axis=-1)
rgba = tf.image.convert_image_dtype(rgba, dtype=tf.uint8)
pred_str = tf.image.encode_png(rgba, compression=5)
pred_encoded = tf.io.encode_base64(pred_str, pad=True)
pred_encoded = tf.strings.regex_replace(pred_encoded, '_', '/')
pred_encoded = tf.strings.regex_replace(pred_encoded, '-', '+')
pred_encoded = tf.expand_dims(pred_encoded, axis=0)
return pred_encoded
Here is my sample program. When I need to draw a line for x axis and y label for y axis .so can any one please help me how to add a line and lables to scene.i tried different ways but i didn't get the proper output.So please help me how to add labels to scene.Give me any suggestion to solve this task.Thank you in advance.
Given bellow is my tried code:
import sys
from pyface.qt import QtGui, QtCore
# class ScanView(QtGui.QGraphicsView):
# def __init__(self,X=5, Y=5,parent=None):
# super(ScanView, self).__init__(parent)
class DemoApp(QtGui.QMainWindow):
def __init__(self, parent=None):
super(DemoApp, self).__init__()
self.w= QtGui.QGridLayout()
self.v= QtGui.QGraphicsView()
self.w.addWidget(self.v)
self.widget = QtGui.QWidget()
self.widget.setLayout(self.w)
self.setCentralWidget(self.widget)
self._squares = []
n_rows, n_cols = 3, 2
squareLB = 50
label = QtGui.QLabel("xaxis")
label1 = QtGui.QLabel("yaxis")
self._scene = QtGui.QGraphicsScene()
mytext1 = QtGui.QGraphicsSimpleTextItem('label')
self._scene.addItem(mytext1)
mytext2 = QtGui.QGraphicsSimpleTextItem('label1')
self._scene.addItem(mytext2)
self.v.setScene(self._scene)
self.pen = QtGui.QPen(QtCore.Qt.DotLine)
self.pen.setColor(QtCore.Qt.red)
width, height = (2 + 2)*squareLB, (3 + 2)*squareLB
self._scene = QtGui.QGraphicsScene(0, 0, max(708, width), height)
p = squareLB if width > 708 else (708.0-2*squareLB)/2.0
for i in range(n_rows):
for j in range(n_cols):
it = self._scene.addRect(QtCore.QRectF(0,0,squareLB,squareLB),self.pen)
it.setPos(p + j*squareLB, i*squareLB)
self._squares.append(it)
self.v.setScene(self._scene)
class Settings(QtGui.QMainWindow):
def __init__(self, parent=None):
super(Settings, self).__init__(parent)
self.folder = QtGui.QPushButton("Folder", clicked=self.showSettings)
central_widget = QtGui.QWidget()
self.setCentralWidget(central_widget)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.folder)
self.scrollArea = QtGui.QScrollArea(widgetResizable=True)
self.scrollArea.setBackgroundRole(QtGui.QPalette.Light)
hlay = QtGui.QHBoxLayout(central_widget)
hlay.addLayout(vbox)
hlay.addWidget(self.scrollArea)
self.setGeometry(200, 100, 300, 300)
def showSettings(self):
self.view = DemoApp()
self.newwidget = QtGui.QWidget()
hlay = QtGui.QHBoxLayout(self.newwidget)
hlay.addWidget(self.view)
self.scrollArea.setWidget(self.newwidget)
def main():
app = QtGui.QApplication(sys.argv)
ex = Settings()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
You can use QpainterPath to draw the arrow and a little math to set the position:
class DemoApp(QtGui.QMainWindow):
def __init__(self, parent=None):
super(DemoApp, self).__init__()
self._squares = []
n_rows, n_cols = 5, 4
squareLB = 50
self.widget = QtGui.QWidget()
grid= QtGui.QGridLayout(self.widget)
self.v = QtGui.QGraphicsView()
grid.addWidget(self.v)
self.setCentralWidget(self.widget)
width, height = (2 + 2)*squareLB, (3 + 2)*squareLB
self._scene = QtGui.QGraphicsScene(0, 0, max(708, width), height)
self.v.setScene(self._scene)
pen = QtGui.QPen(QtCore.Qt.red, 0.0, QtCore.Qt.DotLine)
p = squareLB if width > 708 else (708.0-2*squareLB)/2.0
for i in range(n_rows):
for j in range(n_cols):
it = self._scene.addRect(QtCore.QRectF(0,0,squareLB,squareLB), pen)
it.setPos(p + j*squareLB, i*squareLB)
self._squares.append(it)
path_x = QtGui.QPainterPath()
path_x.lineTo(squareLB*n_cols/2, 0)
path_x.lineTo(squareLB*n_cols/2 - 0.2*squareLB, -0.2*squareLB)
path_x.lineTo(squareLB*n_cols/2, 0)
path_x.lineTo(squareLB*n_cols/2 - 0.2*squareLB, +0.2*squareLB)
pen = QtGui.QPen("green")
pen.setWidth(2)
item_path = self._scene.addPath(path_x, pen)
item_path.setPos(p, (i+1.2)*squareLB)
mytext1 = self._scene.addText("X")
mytext1.setPos(p + j*squareLB/2, (i+1.2)*squareLB)
path_y = QtGui.QPainterPath()
path_y.lineTo(0, -squareLB*n_rows/2)
path_y.lineTo(-0.2*squareLB, -squareLB*n_rows/2 + 0.2*squareLB)
path_y.lineTo(0, -squareLB*n_rows/2)
path_y.lineTo(+0.2*squareLB, -squareLB*n_rows/2 + 0.2*squareLB)
pen = QtGui.QPen("red")
pen.setWidth(2)
item_path = self._scene.addPath(path_y, pen)
item_path.setPos(p - 0.2*squareLB, (i+1)*squareLB)
mytext1 = self._scene.addText("Y")
mytext1.setPos(p - 0.7*squareLB, (i+1)*squareLB/2)
I want to use wxpython to show the trajectory of a random walk in real-time. However, the panel is updated only once at the end showing the entire random walk instead of updating step by step and showing the time course.
My first idea was to use wx.ClientDC().DrawPoint() but the result was as described above where I did not see single points being drawn but only the final result was shown.
So instead I thought about using wx.MemoryDC to draw the trajectory to a bitmap stored in memory and then use wx.ClientDC.DrawBitmap() to copy the buffered image to the screen at set time intervals in case flipping the image was the bottleneck. The result is still the same so I am hoping for you help.
The purpose of this exercise is to replace the random walk with positional data coming from an eye tracker with a frame rate of 1000 Hz and I would like to be able to visualize the trajectory in as close to real-time as possible (the monitor's frame rate is 120Hz).
This is my code (most of it comes from here):
import wx
import random
import time
from time import asctime
#-------------------------------------------------------------------
def jmtime():
return '[' + asctime()[11:19] + '] '
#-------------------------------------------------------------------
class MyDrawingArea(wx.Window):
def __init__(self, parent, id):
sty = wx.NO_BORDER
wx.Window.__init__(self, parent, id, style=sty)
self.parent = parent
self.SetBackgroundColour(wx.WHITE)
self.SetCursor(wx.CROSS_CURSOR)
# Some initalisation, just to reminds the user that a variable
# called self.BufferBmp exists. See self.OnSize().
self.BufferBmp = None
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_LEFT_DOWN(self,self.OnClick)
def OnSize(self, event):
print jmtime() + 'OnSize in MyDrawingArea'
# Get the size of the drawing area in pixels.
self.wi, self.he = self.GetSizeTuple()
# Create BufferBmp and set the same size as the drawing area.
self.BufferBmp = wx.EmptyBitmap(self.wi, self.he)
memdc = wx.MemoryDC()
memdc.SelectObject(self.BufferBmp)
# Drawing job
ret = self.DoSomeDrawing(memdc)
if not ret: #error
self.BufferBmp = None
wx.MessageBox('Error in drawing', 'CommentedDrawing', wx.OK | wx.ICON_EXCLAMATION)
def OnPaint(self, event):
print jmtime() + 'OnPaint in MyDrawingArea'
dc = wx.PaintDC(self)
dc.BeginDrawing()
if self.BufferBmp != None:
print jmtime() + '...drawing'
dc.DrawBitmap(self.BufferBmp, 0, 0, True)
else:
print jmtime() + '...nothing to draw'
dc.EndDrawing()
def OnClick(self,event):
pos = event.GetPosition()
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
dcwi, dche = dc.GetSizeTuple()
x = pos.x
y = pos.y
time_start = time.time()
memdc = wx.MemoryDC()
memdc.SelectObject(self.BufferBmp)
memdc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
count = 1
runtime = 5
while (time.time() - time_start) < runtime:
x,y = random_walk(x,y,dcwi,dche)
memdc.DrawPoint(x,y)
if (time.time() - time_start) > count * runtime * 0.1:
print jmtime() + 'Random walk in MyDrawingArea'
count += 1
dc.BeginDrawing()
dc.DrawBitmap(self.BufferBmp, 0, 0, True)
dc.EndDrawing()
dc.BeginDrawing()
dc.DrawBitmap(self.BufferBmp, 0, 0, True)
dc.EndDrawing()
# End of def OnClick
def DoSomeDrawing(self, dc):
try:
print jmtime() + 'DoSomeDrawing in MyDrawingArea'
dc.BeginDrawing()
#~ raise OverflowError #for test
# Clear everything
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.Clear()
dc.EndDrawing()
return True
except:
return False
#-------------------------------------------------------------------
class MyPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id, wx.DefaultPosition, wx.DefaultSize)
self.drawingarea = MyDrawingArea(self, -1)
self.SetAutoLayout(True)
gap = 30 #in pixels
lc = wx.LayoutConstraints()
lc.top.SameAs(self, wx.Top, gap)
lc.left.SameAs(self, wx.Left, gap)
lc.right.SameAs(self, wx.Width, gap)
lc.bottom.SameAs(self, wx.Bottom, gap)
self.drawingarea.SetConstraints(lc)
#-------------------------------------------------------------------
# Usual frame. Can be resized, maximized and minimized.
# The frame contains one panel.
class MyFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'CommentedDrawing', wx.Point(0, 0), wx.Size(500, 400))
self.panel = MyPanel(self, -1)
wx.EVT_CLOSE(self, self.OnCloseWindow)
def OnCloseWindow(self, event):
print jmtime() + 'OnCloseWindow in MyFrame'
self.Destroy()
#-------------------------------------------------------------------
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None, -1)
frame.Show(True)
self.SetTopWindow(frame)
return True
#-------------------------------------------------------------------
def random_walk(x,y,sizex = 250, sizey = 200):
rn = random.randrange(0,2)
x_new = x + (1-rn) - rn
while x_new < 0 or x_new > sizex:
rn = random.randrange(0,2)
x_new = x + (1-rn) - rn
rn = random.randrange(0,2)
y_new = y + (1-rn) - rn
while y_new < 0 or y_new > sizex:
rn = random.randrange(0,2)
y_new = y + (1-rn) - rn
return x_new, y_new
# end of def random_walk
#-------------------------------------------------------------------
def main():
print 'main is running...'
app = MyApp(0)
app.MainLoop()
#-------------------------------------------------------------------
if __name__ == "__main__" :
main()
#eof-------------------------------------------------------------------
This is the solution I came up with. Instead of using dc.DrawBitmap() to copy the buffered image to the screen I used Update() and Refresh() to trigger a paint event. However, what I still don't understand is why I cannot use DrawBitmap() to accomplish the same.
The only difference is that OnPaint() uses PaintDC() and in OnClick() I use ClientDC().
Anyways, this is my current code for OnClick():
def OnClick(self,event):
pos = event.GetPosition()
x = pos.x
y = pos.y
time_start = time.time()
memdc = wx.MemoryDC()
memdc.SelectObject(self.BufferBmp)
dcwi, dche = memdc.GetSizeTuple()
memdc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
runtime = 10
while (time.time() - time_start) < runtime:
x,y = random_walk(x,y,dcwi,dche)
memdc.SelectObject(self.BufferBmp)
memdc.DrawPoint(x,y)
memdc.SelectObject(wx.NullBitmap)
self.Update()
self.Refresh()
print jmtime() + 'Random walk in MyDrawingArea done'
I'm trying to create a chat box, using tkinter, for real time chatting. But there is a problem in the GUI part (below) where I am getting this error:
NameError: global name 'action' is not defined
My code:
from Tkinter import *
from PIL import ImageTk,Image
class LoginFrame(Frame):
def action(event):
global EntryBox
global ChatLog
EntryBox.config(state=NORMAL)
EntryText = (EntryBox.get("0.0",END))
LoadMyEntry(ChatLog,EntryText)
EntryBox.delete("0.0",END)
def __init__(self, parent):
Frame.__init__(self, parent,background=("lavender blush"))
self.parent = parent
self.parent.title("Lets Gossip")
self.pack(fill=BOTH, expand=1)
w = 400
h = 500
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
ChatLog = Text(self, bd=0, bg="white", height="8", width="50", font="Arial",)
scrollbar = Scrollbar(self, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set
EntryBox = Text(self, bd=0, bg="white",width="29", height="5", font="Arial")
EntryBox.bind("<Return>",action)
EntryBox.bind("<KeyRelease-Return>")
scrollbar1 = Scrollbar(self, command=ChatLog.yview, cursor="heart")
EntryBox['yscrollcommand'] = scrollbar1.set
scrollbar.place(x=376,y=6, height=386)
ChatLog.place(x=6,y=6, height=386, width=370)
scrollbar1.place(x=376,y=401, height=90)
EntryBox.place(x=6, y=401, height=90, width=370)
self.pack()
root = Tk()
lf = LoginFrame(root)
root.mainloop()
I just finished a work program to faces recognition using python on ubuntu system
But when you want to move the work to "Raspberry pi" gives this error
this is full error :
AttributeError: 'module' object has no attribute 'createLBPHFaceRecognizer'
What is the solution
Thank you
import cv2
import sys
import cv
import glob
import numpy as np
import os
labeltest=[]
Images=[]
Len=0
model = cv2.createLBPHFaceRecognizer(1,8,8,8,70.0)
Labels=[]
textsay=""
# *********** Read *****************\\
def read():
arr={}
with open("csv.ext") as f:
for line in f:
arr=line.split("%",2)
labeltest.append(arr[1])
Images.append(cv2.imread(arr[0],cv2.IMREAD_GRAYSCALE))
label=range(0,len(labeltest))
for i in range(0,len(labeltest)):
label[i]=int(labeltest[i])
print (label)
model.train(np.asarray(Images),np.asarray(label))
model.save("mezo.xml")
model.load("mezo.xml")
# //*********** Read *****************
def writetofile(key):
fo = open("csv.ext", "a+")
fo.write(key)
fo.write("\n")
def searchName(key):
lines=tuple(open("Names.txt","r"))
for i in range(0,len(lines)):
test=lines[i].split("\n")
print test[0]
if str(key.lower())==str(test[0].lower()):
return i
return -1
def readName():
lines=tuple(open("Names.txt","r"))
for i in range(0,len(lines)):
Labels.append(lines[i])
print Labels
def AddName(key):
fo = open("Names.txt", "a+")
fo.write(key)
fo.write("\n")
readName()
# *********** Add *****************\\
def Add(faces,gray):
count=Len+100
for (x, y, w, h) in faces:
filename = "/home/mohammad/Desktop/traning/%03d"%count +".pgm"
f=gray[y:y+h,x:x+w]
f=cv2.resize(f,(92,112),interpolation=cv2.INTER_LANCZOS4)
newName=raw_input("Enter the Name : ")
index=searchName(newName)
if index==-1:
index=len(Labels)
AddName(newName)
filenameIn = filename+"%"+str(index)
writetofile(filenameIn)
cv2.imwrite(filename,f)
count+=1
read()
# //*********** Add *****************
path={}
path=glob.glob("/home/mohammad/Desktop/traning/*.pgm")
Len=len(path)-1
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
count=0
video_capture = cv2.VideoCapture(0)
read();
readName()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
cv2.waitKey(10)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
f=gray[y:y+h,x:x+w]
f=cv2.resize(f,(92,112),interpolation=cv2.INTER_LANCZOS4)
cv2.imwrite("11.pgm",f)
label, confidence = model.predict(f)
print"Threshold : ", model.getDouble("threshold")
if label>-1:
if Labels[label] != textsay:
cmd = 'espeak "{0}" 2>/dev/null'.format(Labels[label])
os.system(cmd)
textsay=Labels[label]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,Labels[label],(x,y-10), font, 1.0,(255,255,255))
print "\n"+str(Labels[label])+" | "+str(confidence)
# Display the resulting frame
cv2.imshow('Video', frame)
k=cv2.waitKey(5)& 0xFF
if k==97 :
Add(faces,gray)
if k==27:
exit()