Django default data throw an error during migration - django

I'm using Django 1.7
class MyModel(models.Model):
my_random_field_1 = models.ForeignKey(
MyOtherModel, null=True, blank=True, related_name="random_1", default=get_random_1
)
my_random_field_2 = models.ForeignKey(
MyOtherModel, null=True, blank=True, related_name="random_2", default=get_random_2
)
And 'random functions':
def get_random_1():
ob = MyOtherModel.objects.filter(...some filtering...)
try:
x = ob[0]
return x
except:
return None
def get_random_2():
ob = MyOtherModel.objects.filter(...some other filtering...)
try:
x = ob[1]
return x
except:
return None
And when I'm trying to migrate I gave this error:
TypeError: int() argument must be a string, a bytes-like object or a number, not 'MyOtherModel'
Sentry is attempting to send 2 pending error messages
Waiting up to 10 seconds
But after that, when I open admin panel and go to MyOtherModel I have this random field, and they are properly init by 'ob[0]' and 'ob[1]'

To make this code work you should be sending instances primary key as a default, not instance itself.
def get_random_1():
ob = MyOtherModel.objects.filter(...some filtering...)
try:
x = ob[0]
return x.pk
except:
return None
def get_random_2():
ob = MyOtherModel.objects.filter(...some other filtering...)
try:
x = ob[1]
return x.pk
except:
return None
But mind you that this value will stay "baked" in your migrations file and all instances that are in your db at the time of migration (old data for instance) will get that one single value so maybe this is not what you wante.
Newer versions of Django don't even allow such a thing as baking in an object instance into migration file :D
ValueError: Cannot serialize: <Model: instance name>
There are some values Django cannot serialize into migration files.

Related

database not updated by a task running in celery beat

Hello I have a celery task that is suppose to run every 1 hour to fetch a key and it runs and even acts like it has updates the database but it does not update in reality
#app.task
def refresh_token():
r = requests.get(AUTH_URL, auth=HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET))
obj = json.loads(r.text)
obj['expires_in'] = int(obj['expires_in'])
try:
mpesa_token = MpesaAccessToken.objects.get(id=1)
mpesa_token.access_token = obj['access_token']
mpesa_token.save()
print(obj)
print(mpesa_token.access_token)
print("saved")
except:
print(obj)
mpesa_token = MpesaAccessToken.objects.create(**obj)
return 1
the last thee prints all shows in the logs but checking the admin panel, the values are not updated however when I use a view and make a request then call the function, the database get updated, could anyone know what is going on
You need to add transaction.atomic() to your code. Like this:
from django.db import transaction
#app.task
def refresh_token():
with transaction.atomic():
r = requests.get(AUTH_URL, auth=HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET))
obj = json.loads(r.text)
obj['expires_in'] = int(obj['expires_in'])
try:
mpesa_token = MpesaAccessToken.objects.get(id=1)
mpesa_token.access_token = obj['access_token']
mpesa_token.save()
print(obj)
print(mpesa_token.access_token)
print("saved")
except:
print(obj)
mpesa_token = MpesaAccessToken.objects.create(**obj)
return 1

query is getting UTC timestamp from the database but local time is stored in the database

For some reason my queryset is returning a UTC time, however in the database the time it is supposed to be getting is local time. Does anyone know why this is happening? Thanks for your help
The last_checkin_time method is what gets the user's last checkin timestamp from the database, and right now I just have it posting in my toggle method in the time_delta variable so I can see what value its getting. (once i get this timezone thing figured out the time_delta will be an actual time delta)
Heres my model manager
class UserActivityManager(models.Manager):
def current(self, user):
current_obj = self.get_queryset().filter(user=user).order_by('-timestamp').first()
return current_obj
def last_checkin_time(self, user):
last_activity_time = self.get_queryset().order_by('-timestamp').filter(user=user, activity="checkin").first()
return last_activity_time
def toggle(self, user):
last_item = self.current(user)
activity = "checkin"
time_delta = None
last_checkin = self.last_checkin_time(user)
if last_item is not None:
if last_item.timestamp <= tz.localize(datetime.datetime.now()):
pass
if last_item.activity == "checkin":
activity = "checkout"
time_delta = last_checkin.timestamp
obj = self.model(
user=user,
activity=activity,
time_delta = time_delta,
)
obj.save()
return obj
And heres what the table in my database looks like (focusing on the time_delta field in the last few rows)
EDIT:
Also I should mention the timestamp field in my model is set to auto_now_add=True ie.
timestamp = models.DateTimeField(auto_now_add=True)
Not sure if this is causing the problem
Instead of using tz.localize(last_checkin.timestamp) I needed to use tz.normalize(last_checkin.timestamp). It seems since the timestamp was already set to UTC I needed to change it with the normalize method, rather than the localize

Populate model with metadata of file uploaded through django admin

I have two models,Foto and FotoMetadata. Foto just has one property called upload, that is an upload field. FotoMetadata has a few properties and should receive metadata from the foto uploaded at Foto. This can be done manually at the admin interface, but I want to do it automatically, i.e: when a photo is uploaded through admin interface, the FotoMetadata is automatically filled.
In my model.py I have a few classes, including Foto and FotoMetadata:
class Foto(models.Model):
upload = models.FileField(upload_to="fotos")
def __str__(self):
return '%s' %(self.upload)
class FotoMetadata(models.Model):
image_formats = (
('RAW', 'RAW'),
('JPG', 'JPG'),
)
date = models.DateTimeField()
camera = models.ForeignKey(Camera, on_delete=models.PROTECT)
format = models.CharField(max_length=8, choices=image_formats)
exposure = models.CharField(max_length=8)
fnumber = models.CharField(max_length=8)
iso = models.IntegerField()
foto = models.OneToOneField(
Foto,
on_delete=models.CASCADE,
primary_key=True,
)
When I login at the admin site, I have an upload form related to the Foto, and this is working fine. My problem is that I can't insert metadata at FotoMetadata on the go. I made a function that parse the photo and give me a dictionary with the info I need. This function is called GetExif is at a file called getexif.py. This will be a simplified version of it:
def GetExif(foto):
# Open image file for reading (binary mode)
f = open(foto, 'rb')
# Parse file
...
<parsing code>
...
f.close()
#create dictionary to receive data
meta={}
meta['date'] = str(tags['EXIF DateTimeOriginal'].values)
meta['fnumber'] = str(tags['EXIF FNumber'])
meta['exposure'] = str(tags['EXIF ExposureTime'])
meta['iso'] = str(tags['EXIF ISOSpeedRatings'])
meta['camera'] =str( tags['Image Model'].values)
return meta
So, basically, what I'm trying to do is use this function at admin.py to automatically populate the FotoMetadata when uploading a photo at Foto, but I really couldn't figure out how to make it. Does any one have a clue?
Edit 24/03/2016
Ok, after a lot more failures, I'm trying to use save_model in admin.py:
from django.contrib import admin
from .models import Autor, Camera, Lente, Foto, FotoMetadata
from fotomanager.local.getexif import GetExif
admin.site.register(Autor)
admin.site.register(Camera)
admin.site.register(Lente)
admin.site.register(FotoMetadata)
class FotoAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
# populate the model
obj.save()
# get metadata
metadados = GetExif(obj.upload.url)
# Create instance of FotoMetadata
fotometa = FotoMetadata()
# FotoMetadata.id = Foto.id
fotometa.foto = obj.pk
# save exposure
fotometa.exposure = metadados['exposure']
admin.site.register(Foto, FotoAdmin)
I thought it would work, or that I will have problems saving data to the model, but actually I got stucked before this. I got this error:
Exception Type: FileNotFoundError
Exception Value:
[Errno 2] No such file or directory: 'http://127.0.0.1:8000/media/fotos/IMG_8628.CR2'
Exception Location: /home/ricardo/Desenvolvimento/fotosite/fotomanager/local/getexif.py in GetExif, line 24
My GetExif function can't read the file, however, the file path is right! If I copy and paste it to my browser, it downloads the file. I'm trying to figure out a way to correct the address, or to pass the internal path, or to pass the real file to the function instead of its path. I'm also thinking about a diferent way to access the file at GetExif() function too. Any idea of how to solve it?
Solution
I solved the problem above! By reading the FileField source, I've found a property called path, which solve the problem. I also made a few other modifications and the code is working. The class FotoAdmin, at admin.py is like this now:
class FotoAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
# populate the model
obj.save()
# get metadata
metadados = GetExif(obj.upload.path)
# Create instance of FotoMetadata
fotometa = FotoMetadata()
# FotoMetadata.id = Foto.id
fotometa.foto = obj
# set and save exposure
fotometa.exposure = metadados['exposure']
fotometa.save()
I also had to set null=True at some properties in models.py and everything is working as it should.
I guess you want to enable post_save a signal
read : django signals
Activate the post_save signal - so after you save a FOTO you have a hook to do other stuff, in your case parse photometa and create a FotoMetadata instance.
More, if you want to save the foto only if fotometa succeed , or any other condition you may use , pre_save signal and save the foto only after meta foto was saved.

GeoDJango: retrieve last inserted primary key from LayerMapping

I am building an application with GeoDjango and I have the following problem:
I need to read track data from a GPX file and those data should be stored in a model MultiLineStringField field.
This should happen in the admin interface, where the user uploads a GPX file
I am trying to achieve this, namely that the data grabbed from the file should be assigned to the MultiLineStringField, while the other fields should get values from the form.
My model is:
class GPXTrack(models.Model):
nome = models.CharField("Nome", blank = False, max_length = 255)
slug = models.SlugField("Slug", blank = True)
# sport natura arte/cultura
tipo = models.CharField("Tipologia", blank = False, max_length = 2, choices=TIPOLOGIA_CHOICES)
descrizione = models.TextField("Descrizione", blank = True)
gpx_file = models.FileField(upload_to = 'uploads/gpx/')
track = models.MultiLineStringField(blank = True)
objects = models.GeoManager()
published = models.BooleanField("Pubblicato")
rel_files = generic.GenericRelation(MyFiles)
#publish_on = models.DateTimeField("Pubblicare il", auto_now_add = True)
created = models.DateTimeField("Created", auto_now_add = True)
updated = models.DateTimeField("Updated", auto_now = True)
class Meta:
#verbose_name = "struttura'"
#verbose_name_plural = "strutture"
ordering = ['-created']
def __str__(self):
return str(self.nome)
def __unicode__(self):
return '%s' % (self.nome)
def put(self):
self.slug = sluggy(self.nome)
key = super(Foresta, self).put()
# do something after save
return key
While in the admin.py file I have overwritten the save method as follows:
from django.contrib.gis import admin
from trails.models import GPXPoint, GPXTrack
from django.contrib.contenttypes import generic
from django.contrib.gis.gdal import DataSource
#from gpx_mapping import GPXMapping
from django.contrib.gis.utils import LayerMapping
from django.template import RequestContext
import tempfile
import os
import pprint
class GPXTrackAdmin(admin.OSMGeoAdmin):
list_filter = ( 'tipo', 'published')
search_fields = ['nome']
list_display = ('nome', 'tipo', 'published', 'gpx_file')
inlines = [TrackImagesInline, TrackFilesInline]
prepopulated_fields = {"slug": ("nome",)}
def save_model(self, request, obj, form, change):
"""When creating a new object, set the creator field.
"""
if 'gpx_file' in request.FILES:
# Get
gpxFile = request.FILES['gpx_file']
# Save
targetPath = tempfile.mkstemp()[1]
destination = open(targetPath, 'wt')
for chunk in gpxFile.chunks():
destination.write(chunk)
destination.close()
#define fields of interest for LayerMapping
track_point_mapping = {'timestamp' : 'time',
'point' : 'POINT',
}
track_mapping = {'track' : 'MULTILINESTRING'}
gpx_file = DataSource(targetPath)
mytrack = LayerMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
mytrack.save()
#remove the temp file saved
os.remove(targetPath)
orig = GPXTrack.objects.get(pk=mytrack.pk)
#assign the parsed values from LayerMapping to the appropriate Field
obj.track = orig.track
obj.save()
As far as I know:
LayerMapping cannot be used to update a field but only to save a new one
I cannot access a specific field of the LayerMapping object (ie in the code above: mytrack.track) and assign its value to a model field (ie obj.track) in the model_save method
I cannot retrieve the primary key of the last saved LayerMapping object (ie in the code above: mytrack.pk) in order to update it with the values passed in the form for the field not mapped in LayerMapping.mapping
What can I do then?!?!
I sorted it out subclassing LayerMapping and adding a method get_values() that instead of saving the retrieved data, returns them for any use or manipulation.The get_values method is a copy of the LayerMapping::save() method that returns the values instead of saving them.
I am using django 1.5
import os
from django.contrib.gis.utils import LayerMapping
import sys
class MyMapping(LayerMapping):
def get_values(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Returns the contents from the OGR DataSource Layer
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
#self.transaction_decorator
def _get_values(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
pippo = kwargs
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return pippo
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
pippo = _get_values(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
return _get_values()
In a custom save or save_model method you can then use:
track_mapping = {'nome': 'name',
'track' : 'MULTILINESTRING'}
targetPath = "/my/gpx/file/path.gpx"
gpx_file = DataSource(targetPath)
mytrack = MyMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
pippo = mytrack.get_values()
obj.track = pippo['track']

SQLAlchemy query not retrieving committed data in alternate session

I have a problem where I insert a database item using a SQLAlchemy / Tastypie REST interface, but the item is missing when subsequently get the list of items. It shows up only after I get the list of items a second time.
I am using SQLAlchemy with Tastypie/Django running on Apache via mod_wsgi. I use a singleton Database Manager class to hold my engine and declarative_base, and with Tastypie, a separate class to get the session and make sure I roll-back if there is a problem with the commit. As in the update below, the problem occurs when I don't close my session after inserting. Why is this necessary?
My original code was like this:
Session = scoped_session(sessionmaker(autoflush=True))
# Singleton Database Manager class for managing session
class DatabaseManager():
engine = None
base = None
def ready(self):
host='mysql+mysqldb://etc...'
if self.engine and self.base:
return True
else:
try:
self.engine = create_engine(host, pool_recycle=3600)
self.base = declarative_base(bind=self.engine)
return True
except:
return False
def getSession(self):
if self.ready():
session = Session()
session.configure(bind=self.engine)
return session
else:
return None
DM = DatabaseManager()
# A session class I use with Tastypie to ensure the session is destroyed at the
# end of the transaction, because Tastypie creates singleton Resources used for
# all threads
class MySession:
def __init__(self):
self.s = DM.getSession()
def safeCommit(self):
try:
self.s.commit()
except:
self.s.rollback()
raise
def __del__(self):
try:
self.s.commit()
except:
self.s.rollback()
raise
# ... Then ... when I get requests through Apache/mod_wsgi/Django/Tastypie
# First Request
obj_create():
db = MySession()
print db.s.query(DBClass).count() # returns 4
newItem = DBClass()
db.s.add(newItem)
db.s.safeCommit()
print db.s.query(DBClass).count() # returns 5
# Second Request after First Request returns
obj_get_list():
db = MySession()
print db.s.query(DBClass).count() # returns 4 ... should be 5
# Third Request is okay
obj_get_list():
db = MySession()
print db.s.query(DBClass).count() # returns 5
UPDATE
After further digging, it appears that the problem is my session needed to be closed after creating. Perhaps because Tastypie's object_create() adds the SQLAlchemy object to it's bundle, and I don't know what happens after it leaves the function's scope:
obj_create():
db = MySession()
newItem = DBClass()
db.s.add(newItem)
db.s.safeCommit()
copiedObj = copyObj(newItem) # copy SQLAlchemy record into non-sa object (see below)
db.s.close()
return copiedObj
If someone cares to explain this in an answer, I can close the question. Also, for those who are curious, I copy my object out of SQLAlchemy like this:
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class MyTastypieResource(Resource):
...
def copyObject(self, object):
base = {}
# self._meta is part of my tastypie resource
for p in class_mapper(self._meta.object_class).iterate_properties:
if p.key not in base and p.key not in self._meta.excludes:
base[p.key] = getattr(object,p.key)
return Struct(**base)
The problem was resolved by closing my session. The update in the answer didn't solve the problem fully - I ended up adding a middleware class to close the session at the end of a transaction. This ensured everything was written to the database. The middleware looks a bit like this:
class SQLAlchemySessionMiddleWare(object):
def process_response(self, request, response):
try:
session = MyDatabaseManger.getSession()
session.commit()
session.close()
except Exception, err:
pass
return response
def process_exception(self, request, exception):
try:
session = MyDatabaseManger.getSession()
session.rollback()
session.close()
except Exception, err:
pass