i am trying to store data in from json file and i added its not a problem to add data but when i trigger data again again they copy data and create same new one that i don't want , i want that it will update existing data and if there will new data in json file it will add in model .
here is my django base command code.
from django.core.management.base import BaseCommand
import requests
from demo.models import CoronaAge, CoronaSex, CoronaComorbidity
class Command(BaseCommand):
def handle(self, *args, **kwargs):
url = 'https://api.the2019ncov.com/api/fatality-rate'
r = requests.get(url)
titles = r.json()
print(titles)
# For between age
for title in titles['byAge'] or []:
CoronaAge.objects.update_or_create(
age=title['age'],
rate=title['rate']
)
context = {'titles': CoronaAge.objects.all()}
# for sex wise male and female
for title in titles['bySex'] or []:
CoronaSex.objects.update_or_create(
sex=title['sex'],
rate=title['rate']
)
context = {'titles': CoronaSex.objects.all()}
for title in titles['byComorbidity'] or []:
CoronaComorbidity.objects.update_or_create(
condition=title['preExistingCondition'],
rate=title['rate']
)
context = {'titles': CoronaComorbidity.objects.all()}
This is how I would solve it. Get a list existing data. Then, for each new entry check if it exists in the db, create new object, add to list and at the end run bulk_create to insert all of them in one hit. If exists, then update all fields that you want and again, run bulk update at the end.
corona_ages = CoronaAge.objects.all()
new_ages = []
existing_ages = []
for title in titles['byAge'] or []:
entry = corona_ages.filter(age=title['age']).first():
if not entry:
new_data = CoronaAge(**title)
new_ages.append(new_data)
else:
entry['some_param'] = title['some_param']
entry['other_param'] = title['other_param']
existing_ages.append(new_date)
CoronaAge.objects.bulk_create(new_ages)
CoronaAge.objects.bulk_update(existing_ages)
Related
i'm working with Django and i want to retrieve data from firebase and make it readable and serializable
when retrieving data from the default database (sqlite) i use this function :
#api_view([('GET')])
def getPatients(request):
patients = Patient.objects.all()
serializer = PatientSerializer(patients , many = True)
return Response(serializer.data)
i want to change this function to get and serialize the data from firebase .
i tried to get data from firebase and send it in the response but it is not showing in the web view page .
the solution i tried :
#api_view([('GET')])
def getPatients(request):
patients = database.child('patients').get().val()
serializer = PatientSerializer(patients , many = True)
return Response(serializer.data)
It sounds like you are trying to get data from Firestore.
If that is the case you need to initialize firestore and convert the data into a python dictionary using the to_dict() method.
secret = os.getenv("FIREBASE_ADMIN_SDK")
cred = credentials.Certificate(json.loads(secret))
firebase_app = initialize_app(cred)
db = firestore.client()
doc_ref = db.collection('<your_collection').doc('<your_document>')
patients = doc_ref.get().to_dict()
this solution worked for me :
#api_view([('GET')])
def getPatients(request):
# patients = Patient.objects.all()
# serializer = PatientSerializer(patients , many = True)
patients = [] # make empty list for patients
patientsData = database.child('patients').get().val() #get all the value from firebase database
for patient in patientsData : # fill the patients list
print(database.child('patients').child(patient).get().val())
patients.append(database.child('patients').child(patient).get().val())
print(patients)
return Response(patients) # send it to the api
I'm making a REST api that files can be uploaded based in MODEL-VIEW in flask-appbuilder like this.
But I don't know how to call REST API (POST /File).
I tried several different ways. but I couldn't.
Let me know the correct or the alternative ways.
[client code]
file = {'file':open('test.txt', 'rb'),'description':'test'}
requests.post(url, headers=headers, files=file)
==> Failed
model.py
class Files(Model):
__tablename__ = "project_files"
id = Column(Integer, primary_key=True)
file = Column(FileColumn, nullable=False)
description = Column(String(150))
def download(self):
return Markup(
'<a href="'
+ url_for("ProjectFilesModelView.download", filename=str(self.file))
+ '">Download</a>'
)
def file_name(self):
return get_file_original_name(str(self.file))
view.py
class FileApi(ModelRestApi):
resource_name = "File"
datamodel = SQLAInterface(Files)
allow_browser_login = True
appbuilder.add_api(FileApi)
FileColumn is only a string field that saves the file name in the database. The actual file is saved to config['UPLOAD_FOLDER'].
This is taken care of by flask_appbuilder.filemanager.FileManager.
Furthermore, ModelRestApi assumes that you are POSTing JSON data. In order to upload files, I followed Flask's documentation, which suggests to send a multipart/form-data request. Because of this, one needs to override ModelRestApi.post_headless().
This is my solution, where I also make sure that when a Files database row
is deleted, so is the relative file from the filesystem.
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.api import ModelRestApi
from flask_appbuilder.const import API_RESULT_RES_KEY
from flask_appbuilder.filemanager import FileManager
from flask import current_app, request
from marshmallow import ValidationError
from sqlalchemy.exc import IntegrityError
from app.models import Files
class FileApi(ModelRestApi):
resource_name = "file"
datamodel = SQLAInterface(Files)
def post_headless(self):
if not request.form or not request.files:
msg = "No data"
current_app.logger.error(msg)
return self.response_400(message=msg)
file_obj = request.files.getlist('file')
if len(file_obj) != 1:
msg = ("More than one file provided.\n"
"Please upload exactly one file at a time")
current_app.logger.error(msg)
return self.response_422(message=msg)
else:
file_obj = file_obj[0]
fm = FileManager()
uuid_filename = fm.generate_name(file_obj.filename, file_obj)
form = request.form.to_dict(flat=True)
# Add the unique filename provided by FileManager, which will
# be saved to the database. The original filename can be
# retrieved using
# flask_appbuilder.filemanager.get_file_original_name()
form['file'] = uuid_filename
try:
item = self.add_model_schema.load(
form,
session=self.datamodel.session)
except ValidationError as err:
current_app.logger.error(err)
return self.response_422(message=err.messages)
# Save file to filesystem
fm.save_file(file_obj, item.file)
try:
self.datamodel.add(item, raise_exception=True)
return self.response(
201,
**{API_RESULT_RES_KEY: self.add_model_schema.dump(
item, many=False),
"id": self.datamodel.get_pk_value(item),
},
)
except IntegrityError as e:
# Delete file from filesystem if the db record cannot be
# created
fm.delete_file(item.file)
current_app.logger.error(e)
return self.response_422(message=str(e.orig))
def pre_delete(self, item):
"""
Delete file from filesystem before removing the record from the
database
"""
fm = FileManager()
current_app.logger.info(f"Deleting {item.file} from filesystem")
fm.delete_file(item.file)
You can use this.
from app.models import Project, ProjectFiles
class DataFilesModelView(ModelView):
datamodel = SQLAInterface(ProjectFiles)
label_columns = {"file_name": "File Name", "download": "Download"}
add_columns = ["file", "description", "project"]
edit_columns = ["file", "description", "project"]
list_columns = ["file_name", "download"]
show_columns = ["file_name", "download"]
Last add the view to the menu.
appbuilder.add_view(DataFilesModelView,"File View")
I'm building a basic blog from the Web Development course by Steve Hoffman on Udacity. This is my code -
import os
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
def datetimeformat(value, format='%H:%M / %d-%m-%Y'):
return value.strftime(format)
jinja_env.filters['datetimeformat'] = datetimeformat
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class Entries(db.Model):
title = db.StringProperty(required = True)
body = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
class MainPage(webapp2.RequestHandler):
def get(self):
entries = db.GqlQuery('select * from Entries order by created desc limit 10')
self.response.write(render_str('mainpage.html', entries=entries))
class NewPost(webapp2.RequestHandler):
def get(self):
self.response.write(render_str('newpost.html', error=""))
def post(self):
title = self.request.get('title')
body = self.request.get('body')
if title and body:
e = Entries(title=title, body=body)
length = db.GqlQuery('select * from Entries order by created desc').count()
e.put()
self.redirect('/newpost/' + str(length+1))
else:
self.response.write(render_str('newpost.html', error="Please type in a title and some content"))
class Permalink(webapp2.RequestHandler):
def get(self, id):
e = db.GqlQuery('select * from Entries order by created desc').get()
self.response.write(render_str('permalink.html', id=id, entry = e))
app = webapp2.WSGIApplication([('/', MainPage),
('/newpost', NewPost),
('/newpost/(\d+)', Permalink)
], debug=True)
In the class Permalink, I'm using the get() method on the query than returns all records in the descending order of creation. So, it should return the most recently added record. But when I try to add a new record, permalink.html (it's just a page with shows the title, the body and the date of creation of the new entry) shows the SECOND most recently added. For example, I already had three records, so when I added a fourth record, instead of showing the details of the fourth record, permalink.html showed me the details of the third record. Am I doing something wrong?
I don't think my question is a duplicate of this - Read delay in App Engine Datastore after put(). That question is about read delay of put(), while I'm using get(). The accepted answer also states that get() doesn't cause any delay.
This is because of eventual consistency used by default for GQL queries.
You need to read:
https://cloud.google.com/appengine/docs/python/datastore/data-consistency
https://cloud.google.com/appengine/docs/python/datastore/structuring_for_strong_consistency
https://cloud.google.com/datastore/docs/articles/balancing-strong-and-eventual-consistency-with-google-cloud-datastore/
search & read on SO and other source about strong & eventual consistency in Google Cloud Datastore.
You can specify read_policy=STRONG_CONSISTENCY for your query but it has associated costs that you should be aware of and take into account.
I am trying to make an object graph of webpages (edges being 'related' links between pages). Once I get this working I will create a dictionary of objects with the key as the URL so that I don't create multiple objects of the same page.
I have a class Page() and inside page is a list filled with objects of type Page(). I call the function root.crawlRelated() on my root page object. This populates the list. Then I try to iterate through the list and call obj.crawlRelated(). My goal is to then populate each object Page() in root.related[] with a list of their related pages.
On the website I am crawling each page has 5 related links, so after two generations there should be root with 5 objects in related and each of those objects should have 5 objects in their related. The problem I am having is that the crawlRelated() method is appending all of the objects to root rather then the respective objects in root.related.
from urllib import urlopen
#from bs4 import BeautifulSoup
import re
####################CLASS PAGE######################
class Page():
__title = ""
__link = ""
related = []
__relatedURLs = []
def __init__(self, title, link, relatedURLs):
self.__title = title
self.__link = link
self.__relatedURLs = relatedURLs
def get_attributes(self,key):
return self.__attributes.get(key,None)
def get_relatedURLs(self):
return self.__relatedURLs
def get_title(self):
return self.__title
def crawl(self,url):
webpage = urlopen(url).read()
patFinderTitle = re.compile('<title>(.*)</title>')
patFinderLink = re.compile('<link rel="canonical" href="([^"]*)" />')
patFinderRelated = re.compile('<li><a href="([^"]*)"')
findPatTitle = re.findall(patFinderTitle, webpage)
findPatLink = re.findall(patFinderLink, webpage)
findPatRelated = re.findall(patFinderRelated, webpage)
self.related.append(Page(findPatTitle,findPatLink,findPatRelated))
def crawlRelated(self):
for link in self.__relatedURLs:
self.crawl(link)
print 'crawled related in', self.__title
####################END CLASS######################
print 'doing some work...'
webpage = urlopen('http://medtwice.com/am-i-pregnant/').read()
patFinderTitle = re.compile('<title>(.*)</title>')
patFinderLink = re.compile('<link rel="canonical" href="([^"]*)" />')
patFinderRelated = re.compile('<li><a href="([^"]*)"')
findPatTitle = re.findall(patFinderTitle, webpage)
findPatLink = re.findall(patFinderLink, webpage)
findPatRelated = re.findall(patFinderRelated, webpage)
print 'found the webpage', findPatTitle
root = Page(findPatTitle,findPatLink,findPatRelated)
print 'Now crawling', root.get_relatedURLs()
root.crawlRelated()
print 'done crawling related'
print 'crawling related gen 2...'
i = 0
for rel in root.related:
print 'crawling', rel#.get_title()
#print rel.get_relatedURLs()
rel.crawlRelated()
i += 1
if i > 3:
break
print 'done crawling related gen 2'
print root.related
print len(root.related)
print len(root.related[0].related)
The if i > 3: break line is to prevent the infinite loop that happens due to this bug.
After this line of code runs
print len(root.related)
print len(root.related[0].related)
>> 25
>> 25
I want it to return
>>5
>>5
What am I doing wrong? Thanks.
The key is in this section of your code:
class Page():
__title = ""
__link = ""
related = []
__relatedURLs = []
def __init__(self, title, link, relatedURLs):
self.__title = title
self.__link = link
self.__relatedURLs = relatedURLs
You've declared related as a class variable. That means all instances of the class Page share one array. Whether you access root.related or rel.related or so on, it's all the same array. So anything you add to, say, rel.related (in the crawl method) will also appear when you access root.related.
The solution is to make related an instance variable, by only assigning to it in the constructor. While you're at it, you should probably remove the other declarations of class variables because they get masked by the corresponding instance variables. Basically, start your class like this:
class Page():
def __init__(self, title, link, relatedURLs):
self.__title = title
self.__link = link
self.__relatedURLs = relatedURLs
self.related = []
I am building an application with GeoDjango and I have the following problem:
I need to read track data from a GPX file and those data should be stored in a model MultiLineStringField field.
This should happen in the admin interface, where the user uploads a GPX file
I am trying to achieve this, namely that the data grabbed from the file should be assigned to the MultiLineStringField, while the other fields should get values from the form.
My model is:
class GPXTrack(models.Model):
nome = models.CharField("Nome", blank = False, max_length = 255)
slug = models.SlugField("Slug", blank = True)
# sport natura arte/cultura
tipo = models.CharField("Tipologia", blank = False, max_length = 2, choices=TIPOLOGIA_CHOICES)
descrizione = models.TextField("Descrizione", blank = True)
gpx_file = models.FileField(upload_to = 'uploads/gpx/')
track = models.MultiLineStringField(blank = True)
objects = models.GeoManager()
published = models.BooleanField("Pubblicato")
rel_files = generic.GenericRelation(MyFiles)
#publish_on = models.DateTimeField("Pubblicare il", auto_now_add = True)
created = models.DateTimeField("Created", auto_now_add = True)
updated = models.DateTimeField("Updated", auto_now = True)
class Meta:
#verbose_name = "struttura'"
#verbose_name_plural = "strutture"
ordering = ['-created']
def __str__(self):
return str(self.nome)
def __unicode__(self):
return '%s' % (self.nome)
def put(self):
self.slug = sluggy(self.nome)
key = super(Foresta, self).put()
# do something after save
return key
While in the admin.py file I have overwritten the save method as follows:
from django.contrib.gis import admin
from trails.models import GPXPoint, GPXTrack
from django.contrib.contenttypes import generic
from django.contrib.gis.gdal import DataSource
#from gpx_mapping import GPXMapping
from django.contrib.gis.utils import LayerMapping
from django.template import RequestContext
import tempfile
import os
import pprint
class GPXTrackAdmin(admin.OSMGeoAdmin):
list_filter = ( 'tipo', 'published')
search_fields = ['nome']
list_display = ('nome', 'tipo', 'published', 'gpx_file')
inlines = [TrackImagesInline, TrackFilesInline]
prepopulated_fields = {"slug": ("nome",)}
def save_model(self, request, obj, form, change):
"""When creating a new object, set the creator field.
"""
if 'gpx_file' in request.FILES:
# Get
gpxFile = request.FILES['gpx_file']
# Save
targetPath = tempfile.mkstemp()[1]
destination = open(targetPath, 'wt')
for chunk in gpxFile.chunks():
destination.write(chunk)
destination.close()
#define fields of interest for LayerMapping
track_point_mapping = {'timestamp' : 'time',
'point' : 'POINT',
}
track_mapping = {'track' : 'MULTILINESTRING'}
gpx_file = DataSource(targetPath)
mytrack = LayerMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
mytrack.save()
#remove the temp file saved
os.remove(targetPath)
orig = GPXTrack.objects.get(pk=mytrack.pk)
#assign the parsed values from LayerMapping to the appropriate Field
obj.track = orig.track
obj.save()
As far as I know:
LayerMapping cannot be used to update a field but only to save a new one
I cannot access a specific field of the LayerMapping object (ie in the code above: mytrack.track) and assign its value to a model field (ie obj.track) in the model_save method
I cannot retrieve the primary key of the last saved LayerMapping object (ie in the code above: mytrack.pk) in order to update it with the values passed in the form for the field not mapped in LayerMapping.mapping
What can I do then?!?!
I sorted it out subclassing LayerMapping and adding a method get_values() that instead of saving the retrieved data, returns them for any use or manipulation.The get_values method is a copy of the LayerMapping::save() method that returns the values instead of saving them.
I am using django 1.5
import os
from django.contrib.gis.utils import LayerMapping
import sys
class MyMapping(LayerMapping):
def get_values(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Returns the contents from the OGR DataSource Layer
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
#self.transaction_decorator
def _get_values(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
pippo = kwargs
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return pippo
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
pippo = _get_values(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
return _get_values()
In a custom save or save_model method you can then use:
track_mapping = {'nome': 'name',
'track' : 'MULTILINESTRING'}
targetPath = "/my/gpx/file/path.gpx"
gpx_file = DataSource(targetPath)
mytrack = MyMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
pippo = mytrack.get_values()
obj.track = pippo['track']