I have implemented celery on a django project and used redis as broker and django_celery_results as backend. Everything is working fine but I want to remove loading from postman.
I want to print a message like task processing and when it is done, it should show result on reloading or sending the request again.
For example: When we hit send it should immediately show response message like task processing and if we hit send again and task is completed it should return result.
Code
celery.py
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "coutoEditor.settings")
app = Celery("coutoEditor")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
tasks.py
#shared_task()
def speed_up_vid_task(input_path, speed_factor, start, end):
'''
Method Params:
input_path: The video file url or directory path file name
speed_factor: The factor for the speed up process
start: start of the video part that needs to be sped up (in secs)
end: end of the video part that needs to be sped up (in secs)
'''
start = convert_to_sec(start)
end = convert_to_sec(end)
filename = str(uuid.uuid4())
print(filename, "new")
temporary_dir = BASE_DIR + '/' + editor_speedUp_temp_dir # editor_temp_dir = media/editor/speed_vid/temp/"
output_dir = BASE_DIR + '/' + editor_speedUp_output_dir # editor_speedUp_output_dir = media/editor/speed_vid/
# Check for broken url
r = requests.get(input_path, stream=True)
if not r.status_code == 200:
return Response({
'message': "media file is corrupted",
'data': "broken url process could not be completed",
'status': False
}, status=status.HTTP_400_BAD_REQUEST)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.exists(temporary_dir):
os.mkdir(temporary_dir)
stream = os.popen(
"ffmpeg.ffprobe -loglevel error -select_streams a -show_entries stream=codec_type -of csv=p=0 '{}'".format(
input_path))
output = stream.read()
if len(output) == 0:
input_path_vid = os.path.join(BASE_DIR, temporary_dir) + filename + "_temp_video.mp4"
cmd = "ffmpeg -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=44100 -i '{}' -c:v copy -c:a aac -shortest {}".format(
input_path, input_path_vid)
os.system(cmd)
else:
# check if it's a directory or a url
if(os.path.isfile(input_path)):
input_path_vid = BASE_DIR + input_path
pass
else:
ext_name = filename + '_temp_video.mp4'
ext_path = temporary_dir + ext_name
r = requests.get(input_path)
with open(ext_path, 'wb') as outfile:
outfile.write(r.content)
outfile.close()
input_path_vid = ext_path
output_path = os.path.join(BASE_DIR, editor_speedUp_output_dir + filename + ".mp4")
cmd = 'ffmpeg -i ' + input_path_vid + ' \
-filter_complex \
"[0:v]trim=0:' + str(start) + ',setpts=PTS-STARTPTS[v1]; \
[0:v]trim=' + str(start) + ':' + str(end) + ',setpts=1/' + str(speed_factor) + '*(PTS-STARTPTS)[v2]; \
[0:v]trim=' + str(end) + ',setpts=PTS-STARTPTS[v3]; \
[0:a]atrim=0:' + str(start) + ',asetpts=PTS-STARTPTS[a1]; \
[0:a]atrim=' + str(start) + ':' + str(end) + ',asetpts=PTS-STARTPTS,atempo=' + str(speed_factor) + '[a2]; \
[0:a]atrim=' + str(end) + ',asetpts=PTS-STARTPTS[a3]; \
[v1][a1][v2][a2][v3][a3]concat=n=3:v=1:a=1" \
-preset superfast -profile:v baseline ' + output_path
os.system(cmd)
generated_video = open(output_path, "rb")
generated_video_file = TemporaryFiles.objects.create(temp_file=File(generated_video, name=filename + ".mp4"),
created_at=datetime.utcnow())
generated_video.close()
if os.path.exists(input_path_vid):
os.remove(input_path_vid)
if os.path.exists(output_path):
os.remove(output_path)
res_dict = {}
res_dict["video_url"] = os.path.join(BASE_URL, generated_video_file.temp_file.url[1:])
return res_dict
views.py
class speed_up_video(APIView):
def post(self,request):
video_url = request.data["video_url"]
speed_factor = request.data["speed_factor"]
start = request.data["start"]
end = request.data["end"]
result_vid = speed_up_vid_task.delay(video_url, speed_factor, start, end)
return Response(result_vid.get())
Thanks.
Related
I am using python 2 in asterisk 2 there is a section where code listen to callers audio and process the audio. During the process there is a silence of 15 sec before the audio is played. I want to add a music during the processing of audio. Is there a way to do this.
the extension.config is like this
[autoattendant2]
exten => 5555,1,same=>n,Answer()
same=> n, AGI(/root/code_base/Queue/code2.py)
same=>n,hangup()
Below is the python code
#!/usr/bin/env python2
import sys
import re
import time
import random
import subprocess
import requests
import json
from datetime import datetime
env = {}
tests = 0;
while 1:
line = sys.stdin.readline().strip()
if line == '':
break
key,data = line.split(':')
if key[:4] <> 'agi_':
#skip input that doesn't begin with agi_
sys.stderr.write("Did not work!\n");
sys.stderr.flush()
continue
key = key.strip()
data = data.strip()
if key <> '':
env[key] = data
sys.stderr.write("AGI Environment Dump:\n");
sys.stderr.flush()
for key in env.keys():
sys.stderr.write(" -- %s = %s\n" % (key, env[key]))
sys.stderr.flush()
def checkresult (params):
params = params.rstrip()
if re.search('^200',params):
result = re.search('result=(\d+)',params)
if (not result):
sys.stderr.write("FAIL ('%s')\n" % params)
sys.stderr.flush()
return -1
else:
result = result.group(1)
#debug("Result:%s Params:%s" % (result, params))
sys.stderr.write("PASS (%s)\n" % result)
sys.stderr.flush()
return result
else:
sys.stderr.write("FAIL (unexpected result '%s')\n" % params)
sys.stderr.flush()
return -2
def change_file(path, cid):
# one of the process example
filename = 'complain{0}'.format(cid)
#filename =
input_file = path + '/' + filename + '.gsm'
output_file = path + '/' + filename + '.wav'
#command = "sox {} -r 8000 -c 1 {}".format(input_file, output_file)
command = "sox {} -b 16 -r 44100 -c 1 {} trim 0 7 vol 2".format(input_file, output_file)
subprocess.call(command, shell=True)
pbcheck = requests.get("http://127.0.0.1:8000/STT_complaint/", params = {"address" : output_file, "lang" : language, "cid":cid, "phone":callerid, "start_time":now})
res = pbcheck.content
res2 = res.replace('"', "")
return res2
def run_cmd(cmd):
#This runs the general command
sys.stderr.write(cmd)
sys.stderr.flush()
sys.stdout.write(cmd)
sys.stdout.flush()
result = sys.stdin.readline().strip()
checkresult(result)
#language = "ben"
# asking problem recorded audio
cmd_streaming = "STREAM FILE /root/code_base/recorded_voices/{0}/plz_tell_problem \"\"\n".format(language, language)
run_cmd(cmd_streaming)
# listening to caller / recording caller voice
cmd_record = "RECORD FILE {0}/complain{1} gsm 1234 {2} s=3 \"\"\n".format(fixed_path, cid, 15)
run_cmd(cmd_record)
#processing audio
processed_output = change_file(path , cid) # while this is executing ad giving output i want to play wait_music and then stop to run
# while processing play this
cmd_streaming = "STREAM FILE /root/code_base/recorded_voices/wait_music \"\"\n".format(language, language)
run_cmd(cmd_streaming)
# once output received (processed_output) play next audio
cmd_streaming = "STREAM FILE /root/code_base/recorded_voices/next_instruction \"\"\n")
run_cmd(cmd_streaming)
For that asterisk AGI have the special command "SET MUSIC ON"
https://wiki.asterisk.org/wiki/display/AST/Asterisk+18+AGICommand_set+music
Set waiting tone for asterisk agi function processing
Background
For our research we currently need to download ~ 15.000 files. While the database has it's command line tool, to support "bulk" download, this is totally unfeasible to do this for 15.000 runs sequentially ( as the command line tool currently does ).
Simple math
I used the currently available command line tool do download several runs, and took the average runtime, which is ~ 20 minutes per file ( if not more ). So to do so for all 15.000 files, this will take 15.000 * 20 / 60 / 24 = 208 days, which would be nice only if you get paid per an hour of script runtime ;)
The bulk command line script
This is the script which allows users to download bulk data ( NOT MADE BY ME ):
NOTE:I changed it a little, so I can run it from the IDE directly ( so not having to start the command line for every little change )
'''
Created on 27/10/2015
#author: Maxim Scheremetjew
amended 07/11/2016 by Maxim Scheremetjew
version: 1.1
'''
import sys
import argparse
import csv
import os
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
from urllib.error import URLError
from io import StringIO
def _download_resource_by_url(url, output_file_name):
"""Kicks off a download and stores the file at the given path.
Arguments:
'url' -- Resource location.
'output_file_name' -- Path of the output file.
"""
print("Starting the download of the following file...")
print(url)
print("Saving file in:\n" + output_file_name)
try:
urllib.request.urlretrieve(url, output_file_name)
except URLError as url_error:
print(url_error)
raise
except IOError as io_error:
print(io_error)
raise
print("Download finished.")
def _get_number_of_chunks(url_template, study_id, sample_id, run_id, version, domain, file_type):
"""
Returns the number of chunks for the given set of parameters (study, sample and run identifier).
"""
print("Getting the number of chunks from the following URL...")
url_get_number_of_chunks = url_template % (
study_id, sample_id, run_id, version, domain, file_type)
print(url_get_number_of_chunks)
try:
file_stream_handler = urllib.request.urlopen(url_get_number_of_chunks)
result = int(file_stream_handler.read())
print("Retrieved " + str(result) + " chunks.")
return result
except URLError as url_error:
print(url_error)
raise
except IOError as io_error:
print(io_error)
raise
except ValueError as e:
print(e)
print("Skipping this run! Could not retrieve the number of chunks for this URL. " \
"Check the version number in the URL and check if the run is available online.")
return 0
def _get_file_stream_handler(url_template, study_id):
"""
Returns a file stream handler for the given URL.
"""
print("Getting the list of project runs...")
url_get_project_runs = url_template % (study_id)
try:
req = urllib.request.Request(url=url_get_project_runs, headers={'Content-Type': 'text/plain'})
res = urllib.request.urlopen(req)
dec_res = res.read().decode()
sys.stderr.write(str(dec_res))
return dec_res
except URLError as url_error:
print(url_error)
raise
except IOError as io_error:
print(io_error)
raise
except ValueError as e:
print(e)
print("Could not retrieve any runs. Open the retrieval URL further down in your browser and see if you get any results back. Program will exit now.")
print(url_get_project_runs)
raise
def _print_program_settings(project_id, version, selected_file_types_list, output_path, root_url):
print("Running the program with the following setting...")
print("Project: " + project_id)
print("Pipeline version: " + version)
print("Selected file types: " + ",".join(selected_file_types_list))
print("Root URL: " + root_url)
print("Writing result to: " + output_path)
def start(args):
function_file_type_list = ["InterProScan", "GOAnnotations", "GOSlimAnnotations"]
sequences_file_type_list = ["ProcessedReads", "ReadsWithPredictedCDS", "ReadsWithMatches", "ReadsWithoutMatches",
"PredictedCDS", "PredictedCDSWithoutAnnotation", "PredictedCDSWithAnnotation",
"PredictedORFWithoutAnnotation", "ncRNA-tRNA-FASTA"]
taxonomy_file_type_list = ["5S-rRNA-FASTA", "16S-rRNA-FASTA", "23S-rRNA-FASTA", "OTU-TSV", "OTU-BIOM",
"OTU-table-HDF5-BIOM", "OTU-table-JSON-BIOM", "NewickTree", "NewickPrunedTree"]
# Default list of available file types
default_file_type_list = sequences_file_type_list + function_file_type_list + taxonomy_file_type_list
# Parse script parameters
# Parse the project accession
study_id = args['project_id']
# Parse the values for the file type parameter
selected_file_types_list = []
if not args['file_type']:
# If not specified use the default set of file types
selected_file_types_list = default_file_type_list
else:
# Remove whitespaces
selected_file_types_str = args['file_type'].replace(" ", "")
# Set all functional result file types
if selected_file_types_str == "AllFunction":
selected_file_types_list = function_file_type_list
elif selected_file_types_str == "AllTaxonomy":
selected_file_types_list = taxonomy_file_type_list
elif selected_file_types_str == "AllSequences":
selected_file_types_list = sequences_file_type_list
# Set defined file types
elif len(selected_file_types_str.split(",")) > 1:
selected_file_types_list = selected_file_types_str.split(",")
# Set single file type
else:
selected_file_types_list.append(selected_file_types_str)
# Parse the analysis version
version = args['version']
root_url = "https://www.ebi.ac.uk"
study_url_template = root_url + "/metagenomics/projects/%s/runs"
number_of_chunks_url_template = root_url + "/metagenomics/projects/%s/samples/%s/runs/%s/results/versions/%s/%s/%s/chunks"
chunk_url_template = root_url + "/metagenomics/projects/%s/samples/%s/runs/%s/results/versions/%s/%s/%s/chunks/%s"
download_url_template = root_url + "/metagenomics/projects/%s/samples/%s/runs/%s/results/versions/%s/%s/%s"
# Print out the program settings
_print_program_settings(study_id, version, selected_file_types_list, args['output_path'], root_url)
# Iterating over all file types
for file_type in selected_file_types_list:
domain = None
fileExtension = None
# Boolean flag to indicate if a file type is chunked or not
is_chunked = True
# Set the result file domain (sequences, function or taxonomy) dependent on the file type
# Set output file extension (tsv, faa or fasta) dependent on the file type
if file_type == 'InterProScan':
domain = "function"
fileExtension = ".tsv.gz"
elif file_type == 'GOSlimAnnotations' or file_type == 'GOAnnotations':
domain = "function"
fileExtension = ".csv"
is_chunked = False
# PredictedCDS is version 1.0 and 2.0 only, from version 3.0 on this file type was replaced by
# PredictedCDSWithAnnotation (PredictedCDS can be gained by concatenation of the 2 sequence file types now)
elif file_type == 'PredictedCDS' or file_type == 'PredicatedCDSWithoutAnnotation' or file_type == \
'PredictedCDSWithAnnotation':
if file_type == 'PredictedCDSWithAnnotation' and (version == '1.0' or version == '2.0'):
print("File type '" + file_type + "' is not available for version " + version + "!")
continue
elif file_type == 'PredictedCDS' and version == '3.0':
print("File type '" + file_type + "' is not available for version " + version + "!")
continue
domain = "sequences"
fileExtension = ".faa.gz"
elif file_type == 'ncRNA-tRNA-FASTA':
domain = "sequences"
fileExtension = ".fasta"
is_chunked = False
elif file_type == '5S-rRNA-FASTA' or file_type == '16S-rRNA-FASTA' or file_type == '23S-rRNA-FASTA':
is_chunked = False
domain = "taxonomy"
fileExtension = ".fasta"
# NewickPrunedTree is version 2 only
# NewickTree is version 1 only
elif file_type == 'NewickPrunedTree' or file_type == 'NewickTree':
if file_type == 'NewickPrunedTree' and version == '1.0':
print("File type '" + file_type + "' is not available for version " + version + "!")
continue
if file_type == 'NewickTree' and version == '2.0':
print("File type '" + file_type + "' is not available for version " + version + "!")
continue
is_chunked = False
domain = "taxonomy"
fileExtension = ".tree"
elif file_type == 'OTU-TSV':
is_chunked = False
domain = "taxonomy"
fileExtension = ".tsv"
# OTU-BIOM is version 1 only
# OTU-table-HDF5-BIOM and OTU-table-JSON-BIOM are version 2 only
elif file_type == 'OTU-BIOM' or file_type == 'OTU-table-HDF5-BIOM' or file_type == 'OTU-table-JSON-BIOM':
if file_type == 'OTU-BIOM' and version == '2.0':
print("File type '" + file_type + "' is not available for version " + version + "!")
continue
if (file_type == 'OTU-table-HDF5-BIOM' or file_type == 'OTU-table-JSON-BIOM') and version == '1.0':
print("File type '" + file_type + "' is not available for version " + version + "!")
continue
is_chunked = False
domain = "taxonomy"
fileExtension = ".biom"
else:
domain = "sequences"
fileExtension = ".fasta.gz"
# Retrieve a file stream handler from the given URL and iterate over each line (each run) and build the download link using the variables from above
file_stream_handler = _get_file_stream_handler(study_url_template, study_id)
reader = csv.reader(StringIO(file_stream_handler), delimiter=',')
for study_id, sample_id, run_id in reader:
print(study_id + ", " + sample_id + ", " + run_id)
output_path = args['output_path'] + "/" + study_id + "/" + file_type
if not os.path.exists(output_path):
os.makedirs(output_path)
if is_chunked:
number_of_chunks = _get_number_of_chunks(number_of_chunks_url_template, study_id, sample_id, run_id,
version, domain, file_type)
for chunk in range(1, number_of_chunks + 1):
output_file_name = output_path + "/" + run_id.replace(" ", "").replace(",",
"-") + "_" + file_type + "_" + str(
chunk) + fileExtension
rootUrl = chunk_url_template % (study_id, sample_id, run_id, version, domain, file_type, chunk)
_download_resource_by_url(rootUrl, output_file_name)
else:
output_file_name = output_path + "/" + run_id.replace(" ", "").replace(",",
"-") + "_" + file_type + fileExtension
rootUrl = download_url_template % (study_id, sample_id, run_id, version, domain, file_type)
_download_resource_by_url(rootUrl, output_file_name)
print("Program finished.")
start({'project_id':'ERP001736',
'file_type': 'ProcessedReads,16S-rRNA-FASTA,OTU-TSV',
'version': '2.0',
'output_path':''})
What am I thinking of
I have ( a little ) experience with multithreading / multiprocessing / asynchronous requests, but can't figure out what I should do in this case. I have 20 CPU's on my Linux server, so I could do some MP ( ~ 208 / 20 = 10+ days ), but based on my previous experience with doing this, the CPU's will only be used for ~1-5%, which seems a waste of capacity. I haven't used the other two methods for this kind of problem, I used both for simple http-requests ( just asking for a page and getting the result, not downloading files in chunks ).
The real question
What would be the fastest method to download all these 15.000 files ( sequentially is definitely not an option )?
If it's not too time consuming, please, provide a code example ( or a reference ) of what you mean.
Updates
I used nload to measure the dataflow-bandwidth usage, while running the script, downloading 1 file ( of course there were background processes, but these seem to be negligible, only several Mb's ). I did this at 4 time points and averaged the numbers:
Curr: 110 Mbit/s
Min: 30 Mbit/s
Avg: 90 Mbit/s
Max: 159.75 Mbit/s
Ttl: 752.41 GByte
I have been trying to get my script to loop in such a way that it will load the outputs in 1 file, and then when it's done loading everything move the values to output file 2 , erase the values in output file 1 and start reloading them, then when those are down move the values into output two (overwriting the old ones) repeat.
I have been pretty successful so far and don't know what else to add to my script and am hoping someone here knows why I keep getting ""Unboundlocalerror: Local Variable "Val" Referenced before Assignment" error randomly midway through the loading process, when I have a very small input file, the script performs how I want.
Does anyone know how I can change my script to fix that error, I have tried to understand why it is happening but cannot.
I have tried to research it thoroughly but none of the suggestions I have found have worked (or I implemented them incorrectly, I have attached my script. Thanks!
import urllib2,re,urllib,urlparse,csv,sys,time,threading,codecs,shutil
from bs4 import BeautifulSoup
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
return val
while True:
ifile = open('output.csv', "w", 0)
inputs = csv.reader(open('input.csv'))
# inputs = csv.reader(codecs.open('input.csv', 'rU', 'utf-16'))
ifile.write('URL' + "," + 'Price' + "," + 'Stock' + "," + "Time" + '\n')
for i in inputs:
ifile.write(extract(i[0]))
ifile.close()
Update:
Thanks for the help guys! This is my new script:
import urllib2,re,urllib,urlparse,csv,sys,time,threading,codecs,shutil
from bs4 import BeautifulSoup
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
else:
return val
while True:
ifile = open('output.csv', "w", 0)
inputs = csv.reader(open('input.csv'))
# inputs = csv.reader(codecs.open('input.csv', 'rU', 'utf-16'))
ifile.write('URL' + "," + 'Price' + "," + 'Stock' + "," + "Time" + '\n')
for i in inputs:
val_to_write = extract(i[0])
if val_to_write:
ifile.write(val_to_write)
ifile.close()
shutil.copy('output.csv', 'output2.csv')
print("finished")
With the above script I am now getting the error: "ValueError: I/O operation on closed file". Thanks
Use try-except-else as you would only want to return val if no exception was raised (if an exception was raised then val wouldn't be assigned to when you try to return it). Another suggestion is not to use a "catch-em-all" except block.
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
else:
return val
But be warned: if an exception does occur then extract will return None and the calling code will have to take account for that, for example:
for i in inputs:
val_to_write = extract(i[0])
if val_to_write:
ifile.write(val_to_write)
ifile.close()
I am launching rails server using rails -v 4.2.0 with the following code...
"rails server -b $IP -p $port" and getting the following error.
/usr/local/rvm/gems/ruby-2.1.5#rails4/gems/railties-4.2.0/lib/rails/commands/server.rb:12:in `parse!': missing argument: -p (OptionParser::MissingArgument)
Can someone please explain the missing argument and how to fix it?
Thks
Try rails s -b $IP -p $PORT. Case is important. Bonus for you - you can use s as a shortcut for server.
I get the haarcascade_frontalface_deflult.xml file from cPanel the file are present there but it cannot detect the face its working locally fine but when I upload it on cPanel it did not detect faces properly
face_detector = cv2.CascadeClassifier('/home/khblpkn3ru9o/public_html/media/haarcascade_frontalface_default.xml')
i also try this
cv2.CascadeClassifier('http://theangrynerds.com/media/haarcascade_frontalface_default.xml')
you can check file http://www.theangrynerds.com/media/haarcascade_frontalface_default.xml
my complete module code here
#login_required(login_url='login')
def imgDelete(request,id):
# if request.method== 'POST':
Image.objects.filter(name=id).delete()
FaceName.objects.filter(name = id).delete()
allimages = FaceName.objects.filter(User=request.user)
# for p in allPdf:
# print("http://127.0.0.1:8000/"+p.thumbnail)
context={
'allimg' : allimages
}
return render(request, 'Application/imagess.html',context)
def location(request):
return render(request, 'Application/location.html')
def out(request):
logout(request)
return redirect('login')
def hello(request):
if request.method == "POST":
F_name = request.POST['name']
user_video = request.FILES['vide']
videoSave = videoStore.objects.create(User=User.objects.get(id=request.user.pk) , videoFile = user_video)
get_path_video = videoStore.objects.get(pk = videoSave.pk)
accurate_path = "http://theangrynerds.com/media/" + str(get_path_video.videoFile)
faceCount = FaceName.objects.all().count()
face_id = faceCount + 1
count =0
video = cv2.VideoCapture(accurate_path)
# Detect object in video stream using Haarcascade Frontal Face
face_detector = cv2.CascadeClassifier('/home/khblpkn3ru9o/public_html/media/haarcascade_frontalface_default.xml')
while True:
# Capture video frame
cc, image_frame = video.read()
if cc == False:
break
# Convert frame to grayscale
gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)
# Detect frames of different sizes, list of faces rectangles
faces = face_detector.detectMultiScale(gray, 1.3, 5)
# Loops for each faces
for (x,y,w,h) in faces:
# Crop the image frame into rectangle
FaceName.objects.create(User=User.objects.get(id=request.user.pk) , name = F_name , ids = face_id)
# cv2.rectangle(image_frame, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
has = cv2.imwrite("/home/khblpkn3ru9o/public_html/media/" + str(request.user.username) + "." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
c = str(request.user.username)+"." + str(face_id) + '.' + str(count) + ".jpg"
Image.objects.create(User=User.objects.get(id=request.user.pk), name=F_name , imagestore= c )
if count == 100:
break
FaceName.objects.create(User=User.objects.get(id=request.user.pk) , name = F_name , ids = face_id)
video.release()
return redirect('imagess')
After installing RFID Terminal module successfully on a fresh database of OpenERP 7 to pull attendance logs from a ZKTeco DS100 machine, I tried to add a user in the module (to link IDs on the machine with Employee IDs in OpenERP). I get the error:
File "C:\Program Files (x86)\OpenERP 7.0-20130610-231029\Server\server\openerp\addons\hr_attendance_terminal\hr_attendance_terminal.py", line 230, in create_terminal_users
UnboundLocalError: local variable 's' referenced before assignment
The code block:
def create_terminal_users(self, cr, uid, automatic=False, use_new_cursor=False, context=None):
if context is None:
context = {}
terminal_ids = self.pool.get('hr.attendance.terminal').search(cr, uid, [])
for t_id in terminal_ids:
terminal = self.pool.get('hr.attendance.terminal').browse(cr, uid, t_id, context=context)
#print "CREATE USER ON Terminal: %s | %s" % (terminal.tnr, terminal.ip)
TerminalNr = terminal.tnr # zweistelling in Hex
host = terminal.ip # Terminaladresse
port = 8000 # Terminaldatenport
STX = '\x02' # Startbit
ETX = '\x03' # Stopbit
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('rfid_key', '!=', '')])
if emp_ids:
#Verbindung herstellen
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.connect((host,port))
except socket.error, msg:
print 'Socket Error: %s' % msg
break
for emp_id in emp_ids:
employee = self.pool.get('hr.employee').browse(cr, uid, emp_id, context=context)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
rfid_key = employee.rfid_key
employee_name = employee.name
pin = '0000'
pinabfrage = '0' # bei 1 wird pin abgefragt
infotext1 = ' ' # 16 Zeichen Infotext
infotext2 = employee_name.center(16) # 16 Zeichen Infotext
infotext3 = ' ' # 16 Zeichen Infotext
infotext4 = ' ' # 16 Zeichen Infotext
#Paket / Telegram erstellen
#Schema: <STX>SS<Kommando><Daten><BCC><ETX>
bccstring = self.create_bcc(TerminalNr + 'SPSTA' + rfid_key + pin + pinabfrage + infotext1 + infotext2 + infotext3 + infotext4)
message = STX + TerminalNr + 'SPSTA' + rfid_key + pin + pinabfrage + infotext1 + infotext2 + infotext3 + infotext4 + bccstring + ETX
#print "Employee: %s" % employee.name
#Paket / Telegram senden
try:
s.sendall(message)
except socket.error, msg:
print 'Socket Error: %s' % msg
break
while 1:
reply = s.recv(8192)
if str(reply) != '':
r_message = re.sub(r'\x02|\x03','',str(reply))
r_terminal = r_message[0:2]
if r_message[2:7] == 'SPSTA':
#print "Stammsatz gespeichert!"
break
s.close()
return True
Notes:
The module installed normally eventhough it was built for OERP 6.
Made minor changes to match OERP 7 import functions.
Python 2.7
s.close() should come inside the for loop.
I would like to add my comment to this, because we are the Creators of the Module.
The Module you've installed will only work with this specific Device: Officetimer II from ADC GmbH, not with any other RFID Attendance Device!
Beware, the Module that is publicly available is only a first release from mid 2011 for OpenERP Version 6.0, actually you could call it a draft, bugfixes and enhancements have not been applied on the launchpad branch! This Module should really not be used with Version 6.1 or 7.0 of OpenERP due to the handling of the terminal communication and timezone management!
Of course we have ported the Module to 6.1 and 7.0, but we have not made those modules publicly available due to several reasons.