CelebA datasets Identities or classes - computer-vision

I want to use CelebA dataset on face recognition project, but when I downloaded the dataset I found that all images are in one folder without any identities or sub folder assign to classes. Also, the downloaded information files have no any idea about the classes or identities! Any information about the 10,000 identities that mentioned on its paper and websites.

import os
import numpy as np
import cv2
import shutil
def read_identity(identities_filename):
identities = []
with open(identities_filename, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
identities.append(pair)
#print(identities)
return np.array(identities)
def create_identity(celebA_dir,identity_path,output_dir):
img_identity = read_identity(os.path.expanduser(identity_path))
#print(img_identity[0][1])
i=0
for filename in os.listdir(celebA_dir):
if filename == img_identity[i][0]:
# print("Good ", filename ,img_identity)
# # ----create the sub folder in the output folder
# save_dir = os.path.join(output_dir,img_identity[i][1])
# i += 1
# if not os.path.exists(save_dir):
# os.makedirs(save_dir)
# # ----copy image source to its identity destination
destination = os.path.join(output_dir, img_identity[i][1])
source = os.path.join(celebA_dir, img_identity[i][0])
i += 1
shutil.copy2(source, destination)
if __name__ == '__main__':
celebA_dir = r"...." #Add your directory where dataset exist
output_dir = r"...." #Add directory where you want to save datasets with identities
identity_path = r"identity_CelebA.txt"
create_identity(celebA_dir,identity_path,output_dir)

Related

How to save or upload an image from LOCAL directory to ImageField of database object in DJANGO

I was trying to create some products in ecommerce project in django and i had the data file ready and just wanted to loop throw the data and save to the database with Product.objects.create(image='', ...) but i couldnt upload the images from local directory to database!
I tried these ways:
1
with open('IMAGE_PATH', 'rb') as f:
image = f.read()
Product.objects.create(image=image)
2
image = open('IMAGE_PATH', 'rb')
Product.objects.create(image=image)
3
module_dir = dir_path = os.path.dirname(os.path.realpath(__file__))
for p in products:
file_path = os.path.join(module_dir, p['image'])
Product.objects.create()
product.image.save(
file_path,
File(open(file_path, 'rb'))
)
product.save()
none worked for me.
After some searching, I got the answer.
the code to use would be like this:
from django.core.files import File
for p in products:
product = Product.objects.create()
FILE_PATH = p['image']
local_file = open(f'./APP_NAME/{FILE_PATH}', "rb")
djangofile = File(local_file)
product.image.save('FILE_NAME.jpg', djangofile)
local_file.close()
from django.core.files import File
import urllib
result = urllib.urlretrieve(image_url) # image_url is a URL to an image
model_instance.photo.save(
os.path.basename(self.url),
File(open(result[0], 'rb'))
)
self.save()
Got the answer from here

Android App Crashes when i run this code from the example

Im running this code with the buildozer but every time i click on the take picture on my android device the app crashes. The main screen comes up file and the build is okay. I've added extra permissions and and put the requirement for python 2 as well. I have also added the android to the requirements but it still doesn't work.
'''
Basic camera example
Default picture is saved as
/sdcard/org.test.cameraexample/enter_file_name_here.jpg
'''
from os import getcwd
from os.path import exists
from os.path import splitext
import kivy
kivy.require('1.8.0')
from kivy.app import App
from kivy.properties import ObjectProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.logger import Logger
from plyer import camera
class CameraDemo(FloatLayout):
def __init__(self):
super(CameraDemo, self).__init__()
self.cwd = getcwd() + "/"
self.ids.path_label.text = self.cwd
def do_capture(self):
filepath = self.cwd + self.ids.filename_text.text
ext = splitext(filepath)[-1].lower()
if(exists(filepath)):
popup = MsgPopup("Picture with this name already exists!")
popup.open()
return False
try:
camera.take_picture(filename=filepath,
on_complete=self.camera_callback)
except NotImplementedError:
popup = MsgPopup(
"This feature has not yet been implemented for this platform.")
popup.open()
def camera_callback(self, filepath):
if(exists(filepath)):
popup = MsgPopup("Picture saved!")
popup.open()
else:
popup = MsgPopup("Could not save your picture!")
popup.open()
class CameraDemoApp(App):
def __init__(self):
super(CameraDemoApp, self).__init__()
self.demo = None
def build(self):
self.demo = CameraDemo()
return self.demo
def on_pause(self):
return True
def on_resume(self):
pass
class MsgPopup(Popup):
def __init__(self, msg):
super(MsgPopup, self).__init__()
self.ids.message_label.text = msg
if __name__ == '__main__':
CameraDemoApp().run()
My Buildozer file is as follows, but im not sure what my permissions and info shoul look like.
[app]
# (str) Title of your application
title = Kivy Camera Example
# (str) Package name
package.name = cameraexample
# (str) Package domain (needed for android/ios packaging)
package.domain = org.test
# (str) Source code where the main.py live
source.dir = .
# (list) Source files to include (let empty to include all the files)
source.include_exts = py,png,jpg,kv,atlas
# (list) Source files to exclude (let empty to not exclude anything)
#source.exclude_exts = spec
# (list) List of directory to exclude (let empty to not exclude anything)
#source.exclude_dirs = tests, bin
# (list) List of exclusions using pattern matching
#source.exclude_patterns = license,images/*/*.jpg
# (str) Application versioning (method 1)
# version.regex = __version__ = '(.*)'
# version.filename = %(source.dir)s/main.py
# (str) Application versioning (method 2)
version = 1.0
# (list) Application requirements
# android library is also required to run this app on Android platform
# for android device -> requirements = plyer,kivy,android
requirements = plyer,kivy,python2
# (str) Presplash of the application
#presplash.filename = %(source.dir)s/data/presplash.png
# (str) Icon of the application
#icon.filename = %(source.dir)s/data/icon.png
# (str) Supported orientation (one of landscape, portrait or all)
orientation = portrait
# (bool) Indicate if the application should be fullscreen or not
fullscreen = 0
#
# Android specific
#
# (list) Permissions
# android.permissions = WRITE_EXTERNAL_STORAGE
# (int) Android API to use
#android.api = 14
# (int) Minimum API required (8 = Android 2.2 devices)
#android.minapi = 8
# (int) Android SDK version to use
#android.sdk = 21
# (str) Android NDK version to use
#android.ndk = 9
# (bool) Use --private data storage (True) or --dir public storage (False)
#android.private_storage = True
# (str) Android NDK directory (if empty, it will be automatically downloaded.)
#android.ndk_path =
# (str) Android SDK directory (if empty, it will be automatically downloaded.)
#android.sdk_path =
# (str) Android entry point, default is ok for Kivy-based app
#android.entrypoint = org.renpy.android.PythonActivity
# (list) List of Java .jar files to add to the libs so that pyjnius can access
# their classes. Don't add jars that you do not need, since extra jars can slow
# down the build process. Allows wildcards matching, for example:
# OUYA-ODK/libs/*.jar
#android.add_jars = foo.jar,bar.jar,path/to/more/*.jar
# (list) List of Java files to add to the android project (can be java or a
# directory containing the files)
#android.add_src =
# (str) python-for-android branch to use, if not master, useful to try
# not yet merged features.
#android.branch = master
# (str) OUYA Console category. Should be one of GAME or APP
# If you leave this blank, OUYA support will not be enabled
#android.ouya.category = GAME
# (str) Filename of OUYA Console icon. It must be a 732x412 png image.
#android.ouya.icon.filename = %(source.dir)s/data/ouya_icon.png
# (str) XML file to include as an intent filters in <activity> tag
#android.manifest.intent_filters =
# (list) Android additionnal libraries to copy into libs/armeabi
#android.add_libs_armeabi = libs/android/*.so
# (bool) Indicate whether the screen should stay on
# Don't forget to add the WAKE_LOCK permission if you set this to True
#android.wakelock = False
# (list) Android application meta-data to set (key=value format)
#android.meta_data =
# (list) Android library project to add (will be added in the
# project.properties automatically.)
#android.library_references =
#
# iOS specific
#
# (str) Name of the certificate to use for signing the debug version
# Get a list of available identities: buildozer ios list_identities
#ios.codesign.debug = "iPhone Developer: <lastname> <firstname> (<hexstring>)"
# (str) Name of the certificate to use for signing the release version
#ios.codesign.release = %(ios.codesign.debug)s
[buildozer]
# (int) Log level (0 = error only, 1 = info, 2 = debug (with command output))
log_level = 2
# -----------------------------------------------------------------------------
# List as sections
#
# You can define all the "list" as [section:key].
# Each line will be considered as a option to the list.
# Let's take [app] / source.exclude_patterns.
# Instead of doing:
#
# [app]
# source.exclude_patterns = license,data/audio/*.wav,data/images/original/*
#
# This can be translated into:
#
# [app:source.exclude_patterns]
# license
# data/audio/*.wav
# data/images/original/*
#
# -----------------------------------------------------------------------------
# Profiles
#
# You can extend section / key with a profile
# For example, you want to deploy a demo version of your application without
# HD content. You could first change the title to add "(demo)" in the name
# and extend the excluded directories to remove the HD content.
#
# [app#demo]
# title = My Application (demo)
#
# [app:source.exclude_patterns#demo]
# images/hd/*
#
# Then, invoke the command line with the "demo" profile:
#
# buildozer --profile demo android debug

Python 2.7 and GCP Google BigQuery: extracts - compression not working

I'm using python 2.7 (can't change right now), and Google python client library v0.28 of google.cloud.bigquery, and the compression="GZIP" or "NONE" argument/setting doesn't appear to be working for me, can someone else try this out and let me know if it works for them?
In the code below you can see I've been playing with this, but each time on GCS my files appear to be non-compressed, no matter what I use for the compression.
Note: my imports are for a larger set of code, not all needed for this snippet
from pandas.io import gbq
import google.auth
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
from google.cloud.bigquery import LoadJobConfig
from google.cloud.bigquery import Table
import json
import re
from google.cloud import storage
bigquery_client = bigquery.Client(project=project)
dataset_ref = bigquery_client.dataset(dataset_name)
table_ref = dataset_ref.table(table_name)
job_id_prefix = "bqTools_export_job"
job_config = bigquery.LoadJobConfig()
# default is ","
if field_delimiter:
job_config.field_delimiter = field_delimiter
# default is true
if print_header:
job_config.print_header = print_header
# CSV, NEWLINE_DELIMITED_JSON, or AVRO
if destination_format:
job_config.destination_format = destination_format
# GZIP or NONE
if compression:
job_config.compression = compression
job_config.Compression = "GZIP"
job_config.compression = "GZIP"
job = bigquery_client.extract_table(table_ref, destination, job_config=job_config, job_id_prefix=job_id_prefix)
# job.begin()
job.result() # Wait for job to complete
returnMsg = 'Exported {}:{} to {}'.format(dataset_name, table_name, destination)
Related links:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.compression
https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/job.html
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.compression
https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/bigquery/api/export_data_to_cloud_storage.py
I'm sure I'm doing something stupid, thank you for your help...Rich
EDIT BELOW
In the interest of sharing, here is what I think our final code will be...Rich
# export a table from bq into a file on gcs,
# the destination should look like the following, with no brackets {}
# gs://{bucket-name-here}/{file-name-here}
def export_data_to_gcs(dataset_name, table_name, destination,
field_delimiter=",", print_header=None,
destination_format="CSV", compression="GZIP", project=None):
try:
bigquery_client = bigquery.Client(project=project)
dataset_ref = bigquery_client.dataset(dataset_name)
table_ref = dataset_ref.table(table_name)
job_id_prefix = "bqTools_export_job"
job_config = bigquery.ExtractJobConfig()
# default is ","
if field_delimiter:
job_config.field_delimiter = field_delimiter
# default is true
if print_header:
job_config.print_header = print_header
# CSV, NEWLINE_DELIMITED_JSON, or AVRO
if destination_format:
job_config.destination_format = destination_format
# GZIP or NONE
if compression:
job_config.compression = compression
# if it should be compressed, make sure there is a .gz on the filename, add if needed
if compression == "GZIP":
if destination.lower()[-3:] != ".gz":
destination = str(destination) + ".gz"
job = bigquery_client.extract_table(table_ref, destination, job_config=job_config, job_id_prefix=job_id_prefix)
# job.begin()
job.result() # Wait for job to complete
returnMsg = 'Exported {}:{} to {}'.format(dataset_name, table_name, destination)
return returnMsg
except Exception as e:
errorStr = 'ERROR (export_data_to_gcs): ' + str(e)
print(errorStr)
raise
For table extract you should use ExtractJobConfig

How to classify image in real time using tensorflow?

I'm trying to use raspberry pi camera to capture image and classify the image in real time into three classes. What I did is using the code below. It can predict in the first iteration. The problem is that it shows me ran out of memory after the second iteration. Is there anyway to fix this?
import numpy as np
import tensorflow as tf
import argparse
import os
import sys
def create_graph(model_file):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference(images, out_file, labels, model_file, k=5):
# Creates graph from saved GraphDef.
create_graph(model_file)
if out_file:
out_file = open(out_file, 'wb', 1)
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
for img in images:
if not tf.gfile.Exists(img):
tf.logging.fatal('File does not exist %s', img)
continue
image_data = tf.gfile.FastGFile(img, 'rb').read()
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-k:][::-1] # Getting top k predictions
vals = []
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
vals.append('%s=%.5f' % (human_string, score))
rec = "%s\t %s" % (img, ", ".join(vals))
if out_file:
out_file.write(rec)
out_file.write("\n")
else:
print(rec)
if out_file:
print("Output stored to a file")
out_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classify Image(s)')
parser.add_argument('-i','--in', help='Input Image file ')
parser.add_argument('-li','--list', help='List File having input image paths')
parser.add_argument('-o','--out', help='Output file for storing the content')
parser.add_argument('-m','--model', help='model file path (protobuf)', required=True)
parser.add_argument('-l','--labels', help='labels text file', required=True)
parser.add_argument('-r','--root', help='path to root directory of input data')
args = vars(parser.parse_args())
# Read input
if not args['in'] and not args['list']:
print("Either -in or -list option is required.")
sys.exit(1)
if args['in']:
images = [args['in']]
else: # list must be given
with open(args['list']) as ff:
images = filter(lambda x: x, map(lambda y: y.strip(), ff.readlines()))
# if a separate root directory given then make a new path
if args['root']:
print("Input data from : %s" % args['root'])
images = map(lambda p: os.path.join(args['root'], p), images)
with open(args['labels'], 'rb') as f:
labels = [str(w).replace("\n", "") for w in f.readlines()]
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])
The problem is that you are creating the graph in every run_inference method call:
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])
def run_inference(images, out_file, labels, model_file, k=5):
# Creates graph from saved GraphDef.
create_graph(model_file)
...
As the graph probably uses almost all memory in your GPU it fails in the second iteration when a the code tries to create a new graph. You should create only one graph for all the program life.
Try this:
create_graph(model_file)
while True:
imagename='/home/pi/Desktop/camerasnap.jpg'
images=raspi.capture(imagename)
run_inference(images=images, out_file=args['out'], labels=labels, model_file=args['model'])

Serving Zip file Django

I'm following this solution (Serving dynamically generated ZIP archives in Django) to serve some zip files from django.
The idea is to select the files from a database using some check boxes, but I'm trying to make the example work with just 2 images.
import os
import zipfile
import StringIO
from django.http import HttpResponse
def getfiles(request):
# Files (local path) to put in the .zip
# FIXME: Change this (get paths from DB etc)
filenames = ["/home/../image1.png", "/home/../image2.png"]
# Folder name in ZIP archive which contains the above files
# E.g [thearchive.zip]/somefiles/file2.txt
# FIXME: Set this to something better
zip_subdir = "somefiles"
zip_filename = "%s.zip" % zip_subdir
# Open StringIO to grab in-memory ZIP contents
s = StringIO.StringIO()
# The zip compressor
zf = zipfile.ZipFile(s, "w")
for fpath in filenames:
# Calculate path for file in zip
fdir, fname = os.path.split(fpath)
zip_path = os.path.join(zip_subdir, fname)
# Add file, at correct path
zf.write(fpath, zip_path)
# Must close zip for all contents to be written
zf.close()
# Grab ZIP file from in-memory, make response with correct MIME-type
resp = HttpResponse(s.getvalue(), mimetype = "application/x-zip-compressed")
# ..and correct content-disposition
resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
return resp
I wrote the getfile(request) on my views.py and i make a call from the index view
def index(request):
if request.method == 'POST': # If the form has been submitted...
resp = getfiles(request)
form = FilterForm(request.POST) # A form bound to the POST data
# do some validation and get latest_events from database
context = {'latest_events_list': latest_events_list, 'form': form}
return render(request, 'db_interface/index.html', context)
I know the getfile() method is called, because if I put names of unexistents files I got an error, but I dont get any download neither an error if the filenames are correct (I put the full path /home/myuser/xxx/yyy/Project/app/static/app/image1.png).
I tried with the django server and with the apache2/nginx server I have for production
I also tried using content_type = 'application/force-download'
Thanks