Postgres vs SQL lite page not found discrapancy - django

I have my dev environment with django - SQLlite and my prod is with django
- Postgres
I have view that works perfectly on SQL lite
login_required
def receipt_pdf(request,pk):
try:
Receipt_payment_id_dict = Receipt_payment.objects.filter(is_active = True ).aggregate(Max('id'))
if Receipt_payment_id_dict:
Receipt_payment_id = Receipt_payment_id_dict['id__max']
Receipt_payment_dict = get_object_or_404(Receipt_payment, pk=Receipt_payment_id)
else:
Receipt_payment_dict = "no data"
except ValueError:
raise Http404("At least one active value should be in -Receipt_payment-, contact your administrator")
try:
general_id_dict = General_configurations.objects.filter(is_active = True ).aggregate(Max('id'))
if general_id_dict:
general_id = general_id_dict['id__max']
general_dict = get_object_or_404(General_configurations, pk=general_id)
else:
general_dict = "no data"
except ValueError:
raise Http404("At least one active value should be in -General_configurations-, contact your administrator")
payment = get_object_or_404(LeasePayment, pk=pk)
# leasetenant = lease.tenant_set.all().order_by('-id')
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="Receipt.pdf"'
buffer = BytesIO()
# Create the PDF object, using the BytesIO object as its "file."
p = canvas.Canvas(buffer)
But When I execute same code with Postgres I get
No Receipt_payment matches the given query.
What could be the problem?

Related

How to manage stored procedure in AWS Redshift using an automation or command line tool?

We've got a number of stored procedures that have been built in Redshift on AWS.
We need to download and upload these stored procedures so that they can be kept in GITHUB as a means of tracking changes.
These procedures eventually need to be part of a cloudformation template so that the infrastructure can be maintained as well.
Ideally this could be done using the AWS CLI but there doesn't seem to be a command to do that.
How are AWS RedShift stored proceedure managed in an automation/ CICD environment?
I have a portion of a working solution.
import json
import psycopg2
import os
def run_sql_commands(input_command, database="mydatabasename", host_port=1234 ,host_url="datawarehouse.redshift.amazonaws.com"):
"""
:param input_command: sql string to execute
:param database: which database to run the query
:return:
"""
results = None
# db_user and db_pass will need to be set as environment variables
db_user = os.environ["db_user"]
db_pass = os.environ["db_pass"]
db_host = host_url
db_port = host_port
db_name = database
try:
conn = psycopg2.connect(
"dbname={} port={} user={} host={} password={}".format(db_name, db_port, db_user, db_host, db_pass))
cursor = conn.cursor()
cursor.execute(input_command)
results = cursor.fetchall()
except Exception as e:
return None
return results
def get_arg_type(oid, database):
sql = f"SELECT typname FROM pg_catalog.pg_type WHERE oid={oid}"
r = run_sql_commands(sql, database)
return r[0][0]
def download_all_procedures(database, file_location="./local-code-store"):
get_all_stored_procedure_sql = """
SELECT
n.nspname,
b.usename,
p.proname,
p.proargnames,
p.proargtypes,
p.prosrc
FROM
pg_catalog.pg_namespace n
JOIN pg_catalog.pg_proc p ON
pronamespace = n.oid
JOIN pg_user b ON
b.usesysid = p.proowner
WHERE
nspname NOT IN ('information_schema', 'pg_catalog');
"""
r = run_sql_commands(get_all_stored_procedure_sql, database)
counted_items=[]
for item in r:
table = item[0]
author = item[1]
procedure_name = item[2]
procedure_arguments = item[3]
procedure_argtypes = item[4]
procedure = item[5]
t_list = []
for this_oid in procedure_argtypes.split():
t = get_arg_type(this_oid, database="mydatabasename")
t_list.append(t)
meta_data = {'table': table,
'author': author,
'arguments': procedure_arguments,
'argument_types': t_list}
filename = f'{file_location}/{database}/Schemas/{table}/{procedure_name}.sql'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
count = f.write(procedure.replace('\r\n', '\n'))
f.close()
filename = f'{file_location}/{database}/Schemas/{table}/{procedure_name}.json'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
counted_items.append(f.write(json.dumps(meta_data)))
f.close()
return counted_items
if __name__ == "__main__":
print("Starting...")
c = download_all_procedures("mydatabase")
print("... finished")

Why does the render_template keep on showing the old value of the flask form?

I've been searching for an answer for hours. I apologise if I missed something.
I'm using the same form multiple times in order to add rows to my database.
Every time I check an excel file to pre-fill some of the wtforms StringFields with known information that the user may want to change.
The thing is: I change the form.whatever.data and when printing it, it shows the new value. But when I render the template it keeps showing the old value.
I tried to do form.hours_estimate.data = "" before assigning it a new value just in case but it didn't work.
I will attach here the route I'm talking about. The important bit is after # Get form ready for next service. If there's more info needed please let me know.
Thank you very much.
#coordinator_bp.route("/coordinator/generate-order/<string:pev>", methods=['GET', 'POST'])
#login_required
def generate_order_services(pev):
if not (current_user.is_coordinator or current_user.is_manager):
return redirect(url_for('public.home'))
# Get the excel URL
f = open("./app/database/datafile", 'r')
filepath = f.read()
f.close()
error = None
if GenerateServicesForm().submit1.data and GenerateServicesForm().validate():
# First screen submit (validate the data -> first Service introduction)
form = FillServiceForm()
next_service_row = get_next_service_row(filepath)
if next_service_row is None:
excel_info = excel_get_pev(filepath)
error = "Excel error. Service code not found. If you get this error please report the exact way you did it."
return render_template('coordinator/get_pev_form.html', form=GetPevForm(), error=error, info=excel_info)
service_info = get_service_info(filepath, next_service_row)
service_code = service_info[0]
start_date = service_info[1]
time_estimate = service_info[2]
objects = AssemblyType.get_all()
assembly_types = []
for assembly_type in objects:
assembly_types.append(assembly_type.type)
form.service_code.data = service_code
form.start_date.data = start_date
form.hours_estimate.data = time_estimate
return render_template('coordinator/fill_service_form.html', form=form, error=error, assembly_types=assembly_types)
if FillServiceForm().submit2.data:
if not FillServiceForm().validate():
objects = AssemblyType.get_all()
assembly_types = []
for assembly_type in objects:
assembly_types.append(assembly_type.type)
return render_template('coordinator/fill_service_form.html', form=FillServiceForm(), error=error,
assembly_types=assembly_types)
# Service screen submits
# Here we save the data of the last submit and ready the next one or end the generation process
# Ready the form
form = FillServiceForm()
next_service_row = get_next_service_row(filepath)
if next_service_row is None:
excel_info = excel_get_pev(filepath)
error = "Excel error. Service code not found. If you get this error please report the exact way you did it."
return render_template('coordinator/get_pev_form.html', form=GetPevForm(), error=error, info=excel_info)
service_info = get_service_info(filepath, next_service_row)
service_code = service_info[0]
form.service_code.data = service_code
# create the service (this deletes the service code from the excel)
service = create_service(form, filepath)
if isinstance(service,str):
return render_template('coordinator/fill_service_form.html', form=form, error=service)
# Get next service
next_service_row = get_next_service_row(filepath)
if next_service_row is None:
# This means there is no more services pending
return "ALL DONE"
else:
# Get form ready for next service
service_info = get_service_info(filepath, next_service_row)
service_code = service_info[0]
start_date = service_info[1]
time_estimate = service_info[2]
print("time_estimate")
print(time_estimate) # I get the new value.
objects = AssemblyType.get_all()
assembly_types = []
for assembly_type in objects:
assembly_types.append(assembly_type.type)
form.service_code.data = service_code
form.start_date.data = start_date
form.hours_estimate.data = time_estimate
print(form.hours_estimate.data) # Here I get the new value. Everything should be fine.
# In the html, the old value keeps on popping.
return render_template('coordinator/fill_service_form.html', form=form, error=error,
assembly_types=assembly_types)
number_of_services = excel_get_services(filepath=filepath, selected_pev=pev)
# Get the number of the first excel row of the selected pev
first_row = excel_get_row(filepath, pev)
if first_row is None:
excel_info = excel_get_pev(filepath)
error = "Excel error. PEV not found. If you get this error please report the exact way you did it."
return render_template('coordinator/get_pev_form.html', form=GetPevForm(), error=error, info=excel_info)
service_code = []
start_date = []
time_estimate_code = []
quantity = []
# Open the excel
wb = load_workbook(filepath)
# grab the active worksheet
ws = wb.active
for idx in range(number_of_services):
# Append the data to the lists
service_code.append(ws.cell(row=first_row+idx, column=12).value)
start_date.append(str(ws.cell(row=first_row + idx, column=5).value)[:10])
time_estimate_code.append(ws.cell(row=first_row+idx, column=7).value)
quantity.append(ws.cell(row=first_row + idx, column=9).value)
wb.close()
return render_template('coordinator/generate_services_form.html',
form=GenerateServicesForm(),
pev=pev,
service_code=service_code,
start_date=start_date,
time_estimate_code=time_estimate_code,
quantity=quantity)
Well I found a workarround: I send the data outside the form like this:
return render_template('coordinator/fill_service_form.html', form=form, error=error,
assembly_types=assembly_types,
service_code=service_code,
start_date=start_date,
time_estimate=time_estimate)
And replace the jinja form for this:
<input class="form-control" placeholder="2021-04-23" name="start_date" type="text" value="{{start_date}}">
I'm still using the form (name= the form field name) and at the same time I input the value externally.
I hope this helps somebody.

Flask app-builder how to make REST API with file items

I'm making a REST api that files can be uploaded based in MODEL-VIEW in flask-appbuilder like this.
But I don't know how to call REST API (POST /File).
I tried several different ways. but I couldn't.
Let me know the correct or the alternative ways.
[client code]
file = {'file':open('test.txt', 'rb'),'description':'test'}
requests.post(url, headers=headers, files=file)
==> Failed
model.py
class Files(Model):
__tablename__ = "project_files"
id = Column(Integer, primary_key=True)
file = Column(FileColumn, nullable=False)
description = Column(String(150))
def download(self):
return Markup(
'<a href="'
+ url_for("ProjectFilesModelView.download", filename=str(self.file))
+ '">Download</a>'
)
def file_name(self):
return get_file_original_name(str(self.file))
view.py
class FileApi(ModelRestApi):
resource_name = "File"
datamodel = SQLAInterface(Files)
allow_browser_login = True
appbuilder.add_api(FileApi)
FileColumn is only a string field that saves the file name in the database. The actual file is saved to config['UPLOAD_FOLDER'].
This is taken care of by flask_appbuilder.filemanager.FileManager.
Furthermore, ModelRestApi assumes that you are POSTing JSON data. In order to upload files, I followed Flask's documentation, which suggests to send a multipart/form-data request. Because of this, one needs to override ModelRestApi.post_headless().
This is my solution, where I also make sure that when a Files database row
is deleted, so is the relative file from the filesystem.
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.api import ModelRestApi
from flask_appbuilder.const import API_RESULT_RES_KEY
from flask_appbuilder.filemanager import FileManager
from flask import current_app, request
from marshmallow import ValidationError
from sqlalchemy.exc import IntegrityError
from app.models import Files
class FileApi(ModelRestApi):
resource_name = "file"
datamodel = SQLAInterface(Files)
def post_headless(self):
if not request.form or not request.files:
msg = "No data"
current_app.logger.error(msg)
return self.response_400(message=msg)
file_obj = request.files.getlist('file')
if len(file_obj) != 1:
msg = ("More than one file provided.\n"
"Please upload exactly one file at a time")
current_app.logger.error(msg)
return self.response_422(message=msg)
else:
file_obj = file_obj[0]
fm = FileManager()
uuid_filename = fm.generate_name(file_obj.filename, file_obj)
form = request.form.to_dict(flat=True)
# Add the unique filename provided by FileManager, which will
# be saved to the database. The original filename can be
# retrieved using
# flask_appbuilder.filemanager.get_file_original_name()
form['file'] = uuid_filename
try:
item = self.add_model_schema.load(
form,
session=self.datamodel.session)
except ValidationError as err:
current_app.logger.error(err)
return self.response_422(message=err.messages)
# Save file to filesystem
fm.save_file(file_obj, item.file)
try:
self.datamodel.add(item, raise_exception=True)
return self.response(
201,
**{API_RESULT_RES_KEY: self.add_model_schema.dump(
item, many=False),
"id": self.datamodel.get_pk_value(item),
},
)
except IntegrityError as e:
# Delete file from filesystem if the db record cannot be
# created
fm.delete_file(item.file)
current_app.logger.error(e)
return self.response_422(message=str(e.orig))
def pre_delete(self, item):
"""
Delete file from filesystem before removing the record from the
database
"""
fm = FileManager()
current_app.logger.info(f"Deleting {item.file} from filesystem")
fm.delete_file(item.file)
You can use this.
from app.models import Project, ProjectFiles
class DataFilesModelView(ModelView):
datamodel = SQLAInterface(ProjectFiles)
label_columns = {"file_name": "File Name", "download": "Download"}
add_columns = ["file", "description", "project"]
edit_columns = ["file", "description", "project"]
list_columns = ["file_name", "download"]
show_columns = ["file_name", "download"]
Last add the view to the menu.
appbuilder.add_view(DataFilesModelView,"File View")

Django session loses complex object in production, but not in dev

So I am successfully storing a complex object (non-model) in my session in development. I've tried every session engine and cache type and they are all working in development (Pycharm). However, when I move the code to production, while no error are thrown, the session losses the object.
Here is the method I use to set the session object:
def instantiate_command_object(request):
try:
ssc = request.session['specimen_search_criteria']
logger.debug('found ssc session variable')
except KeyError:
logger.debug('failed to find ssc session variable')
ssc = SpecimenSearchCommand()
return ssc
Then in a method that runs asynchronously via an ajax call I start making changes to the object in the session:
def ajax_add_collection_to_search(request):
ssc = instantiate_command_object(request)
collection_id = request.GET.get('collection')
collection = Collection.objects.get(pk=collection_id)
if collection and collection not in ssc.collections:
ssc.collections.append(collection)
# save change to session
request.session['specimen_search_criteria'] = ssc
# refresh search results
ssc.search()
return render(request, '_search.html')
All this works as far as it goes. However, if I then refresh the browser, the session is lost. Here is a snippet from the template:
{% with criteria=request.session.specimen_search_criteria %}
<div class="search-criteria" id="search-criteria">
<div class="row">
Sesssion:
{{ request.session }}<br/>
Search:
{{ request.session.specimen_search_criteria }}<br/>
Created:
{{ request.session.specimen_search_criteria.key }}<br/>
Collections:
{{ request.session.specimen_search_criteria.collections }}<br/>
Again, in development I can refresh all day and the same object will be returned. In production, it will either create a new object or occasionally will return a previously created copy.
A few relevant items:
The production server is running Apache httpd with mod_wsgi.
I've tried memcached, databasecache, etc. the behavior remains the same. Always works in development, never in production.
I've tried it with
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
and without. I can see the session info in the database and when I unpickle it it just seems to be pointing to a location in memory for the complex object.
I'm guessing this might have something to do with running in a multi-user environment, but again, I'm not using locmem and I've tried all of the caching approaches to no effect.
To be clear, the session itself seems to be fine, I can store a string or other simple item in it and it will stick. It's the complex object within the session that seems to be getting lost.
Edit: I might also point out that if I refresh the browser immediately following the return of the search criteria it will actually return successfully. Anything more than about a second and it will disappear.
Edit (adding code of SpecimenSearchCommand):
class SpecimenSearchCommand:
def __init__(self):
pass
created = datetime.datetime.now()
key = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(6))
jurisdictions = []
taxa = []
strata = []
collections = []
chrons = []
has_images = False
query = None # The active SQL query, not the actual result records
page_size = 50
current_page = 1
sort_order = 'number'
results = [] # Page of results from paginator
def is_empty(self):
if len(self.jurisdictions) == 0 and len(self.taxa) == 0 and len(self.strata) == 0 and \
len(self.collections) == 0 and len(self.chrons) == 0 and self.has_images is False:
return True
else:
return False
def get_results(self):
paginator = Paginator(self.query, self.page_size)
try:
self.results = paginator.page(self.current_page)
except PageNotAnInteger:
self.results = paginator.page(1)
except TypeError:
return []
except EmptyPage:
self.results = paginator.page(paginator.num_pages)
return self.results
def get_results_json(self):
points = []
for s in self.results:
if s.locality.latitude and s.locality.longitude:
points.append({"type": "Feature",
"geometry": {"type": "Point",
"coordinates": [s.locality.longitude, s.locality.latitude]},
"properties": {"specimen_id": s.id,
"sci_name": s.taxon.scientific_name(),
"cat_num": s.specimen_number(),
"jurisdiction": s.locality.jurisdiction.full_name()}
})
return json.dumps({"type": "FeatureCollection", "features": points})
def search(self):
if self.is_empty():
self.query = None
return
query = Specimen.objects.filter().distinct().order_by(self.sort_order)
if len(self.taxa) > 0:
query = query.filter(taxon__in=get_hierarchical_search_elements(self.taxa))
if len(self.jurisdictions) > 0:
query = query.filter(locality__jurisdiction__in=get_hierarchical_search_elements(self.jurisdictions))
if len(self.strata) > 0:
query = query.filter(stratum__in=get_hierarchical_search_elements(self.strata))
if len(self.chrons) > 0:
query = query.filter(chron__in=get_hierarchical_search_elements(self.chrons))
if len(self.collections) > 0:
query = query.filter(collection__in=get_hierarchical_search_elements(self.collections))
if self.has_images:
query = query.filter(images__isnull=False)
self.query = query
return
def get_hierarchical_search_elements(elements):
search_elements = []
for element in elements:
search_elements = set().union(search_elements, element.get_descendants(True))
return search_elements
OK, so as Daniel pointed out, the attributes of the SSC class were class-level instead of instance level. The correct version looks like this now:
self.created = datetime.datetime.now()
self.key = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(6))
self.jurisdictions = []
self.taxa = []
self.strata = []
self.collections = []
self.chrons = []
self.has_images = False
self.query = None # The active SQL query, not the actual result records
self.page_size = 50
self.current_page = 1
self.sort_order = 'number'
self.results = [] # Page of results from paginator

GeoDJango: retrieve last inserted primary key from LayerMapping

I am building an application with GeoDjango and I have the following problem:
I need to read track data from a GPX file and those data should be stored in a model MultiLineStringField field.
This should happen in the admin interface, where the user uploads a GPX file
I am trying to achieve this, namely that the data grabbed from the file should be assigned to the MultiLineStringField, while the other fields should get values from the form.
My model is:
class GPXTrack(models.Model):
nome = models.CharField("Nome", blank = False, max_length = 255)
slug = models.SlugField("Slug", blank = True)
# sport natura arte/cultura
tipo = models.CharField("Tipologia", blank = False, max_length = 2, choices=TIPOLOGIA_CHOICES)
descrizione = models.TextField("Descrizione", blank = True)
gpx_file = models.FileField(upload_to = 'uploads/gpx/')
track = models.MultiLineStringField(blank = True)
objects = models.GeoManager()
published = models.BooleanField("Pubblicato")
rel_files = generic.GenericRelation(MyFiles)
#publish_on = models.DateTimeField("Pubblicare il", auto_now_add = True)
created = models.DateTimeField("Created", auto_now_add = True)
updated = models.DateTimeField("Updated", auto_now = True)
class Meta:
#verbose_name = "struttura'"
#verbose_name_plural = "strutture"
ordering = ['-created']
def __str__(self):
return str(self.nome)
def __unicode__(self):
return '%s' % (self.nome)
def put(self):
self.slug = sluggy(self.nome)
key = super(Foresta, self).put()
# do something after save
return key
While in the admin.py file I have overwritten the save method as follows:
from django.contrib.gis import admin
from trails.models import GPXPoint, GPXTrack
from django.contrib.contenttypes import generic
from django.contrib.gis.gdal import DataSource
#from gpx_mapping import GPXMapping
from django.contrib.gis.utils import LayerMapping
from django.template import RequestContext
import tempfile
import os
import pprint
class GPXTrackAdmin(admin.OSMGeoAdmin):
list_filter = ( 'tipo', 'published')
search_fields = ['nome']
list_display = ('nome', 'tipo', 'published', 'gpx_file')
inlines = [TrackImagesInline, TrackFilesInline]
prepopulated_fields = {"slug": ("nome",)}
def save_model(self, request, obj, form, change):
"""When creating a new object, set the creator field.
"""
if 'gpx_file' in request.FILES:
# Get
gpxFile = request.FILES['gpx_file']
# Save
targetPath = tempfile.mkstemp()[1]
destination = open(targetPath, 'wt')
for chunk in gpxFile.chunks():
destination.write(chunk)
destination.close()
#define fields of interest for LayerMapping
track_point_mapping = {'timestamp' : 'time',
'point' : 'POINT',
}
track_mapping = {'track' : 'MULTILINESTRING'}
gpx_file = DataSource(targetPath)
mytrack = LayerMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
mytrack.save()
#remove the temp file saved
os.remove(targetPath)
orig = GPXTrack.objects.get(pk=mytrack.pk)
#assign the parsed values from LayerMapping to the appropriate Field
obj.track = orig.track
obj.save()
As far as I know:
LayerMapping cannot be used to update a field but only to save a new one
I cannot access a specific field of the LayerMapping object (ie in the code above: mytrack.track) and assign its value to a model field (ie obj.track) in the model_save method
I cannot retrieve the primary key of the last saved LayerMapping object (ie in the code above: mytrack.pk) in order to update it with the values passed in the form for the field not mapped in LayerMapping.mapping
What can I do then?!?!
I sorted it out subclassing LayerMapping and adding a method get_values() that instead of saving the retrieved data, returns them for any use or manipulation.The get_values method is a copy of the LayerMapping::save() method that returns the values instead of saving them.
I am using django 1.5
import os
from django.contrib.gis.utils import LayerMapping
import sys
class MyMapping(LayerMapping):
def get_values(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Returns the contents from the OGR DataSource Layer
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
#self.transaction_decorator
def _get_values(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
pippo = kwargs
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return pippo
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
pippo = _get_values(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
return _get_values()
In a custom save or save_model method you can then use:
track_mapping = {'nome': 'name',
'track' : 'MULTILINESTRING'}
targetPath = "/my/gpx/file/path.gpx"
gpx_file = DataSource(targetPath)
mytrack = MyMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
pippo = mytrack.get_values()
obj.track = pippo['track']