'Tareas' object has no attribute '__fields__' - django

When i try to access to 'tareas' the app returns to me as follows:
'Tareas' object has no attribute '__fields__'
Request Method: GET
Request URL: http://localhost:8000/tareas/
Django Version: 1.4
Exception Type: AttributeError
Exception Value:
'Tareas' object has no attribute '__fields__'
Exception Location: /Users/Tone/Documents/Proyectos/macrotelecom/common/generic.py in view_list, line 240
The problem is in this line:
if not fields:
fields=queryset.__dict__['model']().__fields__(profile)
Which I think is weird because in admin.py i set the attribute fields for my object 'Tareas':
from tareas.models import Tareas
from django.contrib import admin
class TareasAdmin(admin.ModelAdmin):
fields = ['idtarea','idtipotarea','idagente','fechatarea']
list_display = ('idtarea','idagente','fechatarea')
admin.site.register(Tareas,TareasAdmin)
The line is defined in this function 'def_view':
def view_list(request, get_template, **kwargs):
'''
Generic list view with validation included and object transfering support
'''
# Config
default_rows_per_page=100
# Get arguments from the function
object_id=keyarg('object_id',kwargs,None)
action=keyarg('action',kwargs,None)
extra_context=keyarg('extra_context',kwargs,{})
queryset=keyarg('queryset',kwargs,None)
restrictions=keyarg('restrictions',kwargs,None)
permission=keyarg('permission',kwargs,None)
fields=keyarg('fields',kwargs,None)
default_ordering=keyarg('default_ordering',kwargs,None)
compact_rows=keyarg('compact_rows',kwargs,None)
# Get template and profile
namesp=str(queryset.__dict__['model']).replace("class ","").replace(">","").replace("<","").replace("'","").split(".")
appname=namesp[-3].lower()
modelname=namesp[-1].lower()
(profile,template_name)=get_template(keyarg('template_name',kwargs,"%s/%s_list.html" % (appname,modelname)))
# Check permissions
if (permission is not None) and (not request.user.has_perm(permission)):
return HttpResponseRedirect('/not_authorized/')
# Get extra arguments
extra={}
for arg in kwargs:
if arg not in ['object_id','action','template_name','extra_context','queryset','restrictions']:
extra[arg]=kwargs[arg]
# Inicialization
new_extra_context={}
new_extra_context['now']=epochdate(time.time())
# new_extra_context['msglog']=msglog()
# Restrictions fields
new_extra_context['filters']=[]
if restrictions:
for restriction in restrictions:
f={}
f['name']=restriction
f['value']=extra[restriction]
new_extra_context['filters'].append(f)
# Process the filter
new_extra_context['filters_obj']={}
new_extra_context['header_loop']=1
if restrictions:
queryset_obj={}
for rname in restrictions:
# Get name of the field and object
(rfield,robject)=restrictions[rname]
# Get the ID
rid=extra[rname]
# Save the id in extra_context
new_extra_context[rname]=rid
# Save the object in queryset_obj
queryset_obj[rname]=robject(id=rid)
# Filter the queryset
queryset=queryset.filter(eval("Q(%s=queryset_obj['%s'])" % (rfield,rname)))
new_extra_context['filters_obj'][rname]=get_object_or_404(robject,pk=rid)
# Get field list
if not fields:
fields=queryset.__dict__['model']().__fields__(profile)
# Save action if we got one
if action:
new_extra_context['action']=action
# Try to convert object_id to a numeric id
try:
object_id=int(object_id)
except:
pass
# Save GET values
new_extra_context['get']=[]
new_extra_context['getval']={}
for name in request.GET:
if name not in ['filtername','filtervalue']:
struct={}
struct['name']=name
if name=='rowsperpage':
struct['value']=default_rows_per_page
elif name=='page':
struct['value']=1
else:
struct['value']=request.GET[name]
new_extra_context['get'].append(struct)
new_extra_context['getval'][name]=struct['value']
# Filter on limits
limits=queryset.__dict__['model']().__limitQ__(profile,request)
qobjects=None
for name in limits:
if qobjects:
qobjects&=limits[name]
else:
qobjects=limits[name]
if qobjects:
queryset=queryset.filter(qobjects)
# Filters on fields
try:
filters_by_json=request.GET.get('filters','{}')
filters_by_struct=json_decode(str(filters_by_json))
except Exception:
filters_by_struct=[]
filtername=request.GET.get('filtername',None)
filtervalue=request.GET.get('filtervalue',None)
listfilters=queryset.__dict__['model']().__searchF__(profile)
# Process the search
filters_struct={}
for key in filters_by_struct:
# Get the value of the original filter
value=filters_by_struct[key]
# If there is something to filter, filter is not being changed and filter is known by the class
if (key!=filtername) and (key in listfilters) and (value>0):
# Add the filter to the queryset
f=listfilters[key]
fv=f[2][value-1][0]
queryset=queryset.filter(f[1](fv))
# Save it in the struct as a valid filter
filters_struct[key]=value
# Add the requested filter if any
if (filtername in listfilters) and (int(filtervalue)>0):
f=listfilters[filtername]
fv=f[2][int(filtervalue)-1][0]
queryset=queryset.filter(f[1](fv))
filters_struct[filtername]=int(filtervalue)
# Rewrite filters_json updated
filters_json=json_encode(filters_struct)
# Build the clean get for filters
get=new_extra_context['get']
filters_get=[]
for element in get:
if element['name'] not in ['filters']:
struct={}
struct['name']=element['name']
struct['value']=element['value']
filters_get.append(struct)
# Add filter_json
struct={}
struct['name']='filters'
struct['value']=filters_json
filters_get.append(struct)
new_extra_context['filters_get']=filters_get
# Get the list of filters allowed by this class
filters=[]
for key in listfilters:
choice=[_('All')]
for value in listfilters[key][2]:
choice.append(value[1])
# Decide the choosen field
if key in filters_struct.keys():
choose=int(filters_struct[key])
else:
choose=0
filters.append((key,listfilters[key][0],choice,choose))
new_extra_context['filters']=filters
# Search text in all fields
search=request.GET.get('search','')
new_extra_context['search']=search
datetimeQ=None
if len(search)>0:
searchs=queryset.__dict__['model']().__searchQ__(search,profile)
qobjects=None
for name in searchs:
if (searchs[name]=='datetime'):
datetimeQ=name
continue
else:
if qobjects:
qobjects|=searchs[name]
else:
qobjects=searchs[name]
if qobjects:
queryset=queryset.filter(qobjects)
else:
# Look for datetimeQ field
searchs=queryset.__dict__['model']().__searchQ__(search,profile)
for name in searchs:
if (searchs[name]=='datetime'):
datetimeQ=name
continue
# Datetime Q
new_extra_context['datetimeQ']=datetimeQ
if datetimeQ:
# Inicialization
f={}
f['year']=(1900,2100,False)
f['month']=(1,12,False)
f['day']=(1,31,False)
f['hour']=(0,23,False)
f['minute']=(0,59,False)
f['second']=(0,59,False)
date_elements=[None,'year','month','day','hour','minute','second']
# Get configuration of dates and set limits to the queryset
for element in date_elements[1:]:
value=request.GET.get(element,None)
if value:
f[element]=(int(value),int(value),True)
if f['year'][2] and f['month'][2] and not f['day'][2]:
(g,lastday)=calendar.monthrange(f['year'][1],f['month'][1])
f['day']=(f['day'][0],lastday,f['day'][2])
# Limits
date_min=datetime.datetime(f['year'][0], f['month'][0], f['day'][0], f['hour'][0], f['minute'][0], f['second'][0])
date_max=datetime.datetime(f['year'][1], f['month'][1], f['day'][1], f['hour'][1], f['minute'][1], f['second'][1])
queryset=queryset.filter(eval("( Q(%s__gte=date_min) & Q(%s__lte=date_max) ) | Q(%s=None)" % (datetimeQ,datetimeQ,datetimeQ)))
# Find actual deepness
deepness_index=0
for element in date_elements[1:]:
if f[element][2]:
deepness_index+=1
else:
break
# Get results from dates to set the new order
date_results=queryset.values_list(datetimeQ, flat=True) #.dates(datetimeQ,'day')
if f['day'][0]!=f['day'][1]:
if f['month'][0]==f['month'][1]:
date_results=date_results.dates(datetimeQ,'day')
elif f['year'][0]==f['year'][1]:
date_results=date_results.dates(datetimeQ,'month')
else:
date_results=date_results.dates(datetimeQ,'year')
get=new_extra_context['get']
new_extra_context['datefilter']={}
# Save the deepness
if (deepness_index+1==len(date_elements)):
new_extra_context['datefilter']['deepness']=None
else:
new_extra_context['datefilter']['deepness']=date_elements[deepness_index+1]
new_extra_context['datefilter']['deepnessback']=[]
new_extra_context['datefilter']['deepnessinit']=[]
for element in get:
if (not element['name'] in date_elements):
struct={}
struct['name']=element['name']
struct['value']=element['value']
new_extra_context['datefilter']['deepnessinit'].append(struct)
new_extra_context['datefilter']['deepnessback'].append(struct)
elif (element['name']!=date_elements[deepness_index] and f[element['name']][2]):
struct={}
struct['name']=element['name']
struct['value']=element['value']
new_extra_context['datefilter']['deepnessback'].append(struct)
# Build the list of elements
new_extra_context['datefilter']['data']=[]
for element in date_results:
# Save the data
new_extra_context['datefilter']['data'].append(element.timetuple()[deepness_index])
new_extra_context['datefilter']['data']=list(set(new_extra_context['datefilter']['data']))
new_extra_context['datefilter']['data'].sort()
# Prepare the rightnow result
if f['month'][2]:
month=_(month_name(f['month'][0]))
else:
month='__'
if f['hour'][2]:
rightnow="%s/%s/%s %s:%s:%s" % (grv(f,'day'),month,grv(f,'year'),grv(f,'hour'),grv(f,'minute'),grv(f,'second'))
else:
rightnow="%s/%s/%s" % (grv(f,'day'),month,grv(f,'year'))
new_extra_context['datefilter']['rightnow']=rightnow
# Distinct
queryset=queryset.distinct()
# Ordering field autofill
try:
order_by_json=request.GET.get('ordering','[]')
order_by_struct=json_decode(str(order_by_json))
except Exception:
order_by_struct=[]
order_by=[]
position={}
counter=1
for order in order_by_struct:
name=order.keys()[0]
direction=order[name]
if direction=='asc':
order_by.append("%s" % (name))
elif direction=='desc':
order_by.append("-%s" % (name))
position[name]=counter
counter+=1
if order_by:
queryset=queryset.order_by(*order_by)
elif default_ordering:
queryset=queryset.order_by(default_ordering)
else:
queryset=queryset.order_by("pk")
# Check the total count of registers
total_registers=queryset.count()
# Ordering field autofill
sort={}
for value in fields:
# Get values
name=value[0]
publicname=value[1]
if len(value)>2:
size=value[2]
else:
size=None
if len(value)>3:
align=value[3]
else:
align=None
# Process ordering
ordering=[]
found=False
for order in order_by_struct:
subname=order.keys()[0]
direction=order[subname]
if name==subname:
if direction == 'desc':
direction = ''
sort_class='headerSortUp'
elif direction == 'asc':
direction = 'desc'
sort_class='headerSortDown'
else:
sort_class=''
direction = 'asc'
found=True
if direction == 'asc' or direction=='desc':
ordering.append({subname:direction})
if not found:
ordering.append({name:'asc'})
sort_class=''
# Save the ordering method
sort[name]={}
sort[name]['id']=name
sort[name]['name']=publicname
sort[name]['class']=sort_class
sort[name]['size']=size
sort[name]['align']=align
if name:
sort[name]['ordering']=json_encode(ordering).replace('"','\\"')
if name in position:
sort[name]['position']=position[name]
# Pagination
# IMPORTANT: This part is commented because I don't manage to control rowsperpage from urls.py file, it is remembering last query instead
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#if 'rowsperpage' in extra_context:
# rowsperpage=extra_context['rowsperpage']
#else:
# rowsperpage=default_rows_per_page
#total_rows_per_page=request.GET.get('rowsperpage',rowsperpage)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
total_rows_per_page=request.GET.get('rowsperpage',default_rows_per_page)
if total_rows_per_page:
try:
total_rows_per_page = int(total_rows_per_page)
except Exception:
total_rows_per_page = 'All'
else:
# IMPORTANT: Commented as before coded
# total_rows_per_page = rowsperpage
total_rows_per_page = default_rows_per_page
if total_rows_per_page == 'All':
page_number=1
total_rows_per_page = total_registers
total_rows_per_page_out = _('All')
total_pages=1
else:
total_rows_per_page = int(total_rows_per_page) # By default 10 rows per page
total_rows_per_page_out = total_rows_per_page
total_pages=total_registers/total_rows_per_page
if total_registers%total_rows_per_page:
total_pages+=1
page_number=request.GET.get('page',1) # If no page specified use first page
if page_number=='last':
page_number=total_pages
else:
try:
page_number=int(page_number)
except:
page_number=1
if page_number>total_pages:
page_number=total_pages
# Build the list of page counters allowed
choice=[]
c=default_rows_per_page
chk=1
while total_registers>=c:
choice.append(c)
if chk==1:
# From 5 to 10
c=c*2
# Next level
chk=2
elif chk==2:
# From 10 to 25 (10*2+10/2)
c=c*2+c/2
# Next level
chk=3
elif chk==3:
# From 25 to 50
c*=2
chk=1
# Add all choice in any case
choice.append(_('All'))
# Save the pagination in the structure
new_extra_context['rowsperpageallowed']=choice
new_extra_context['rowsperpage']=total_rows_per_page_out
new_extra_context['pagenumber']=page_number
if type(object_id)==type(u'abc'):
# If object_id is a string, we have a name not an object
new_extra_context['object_name']=object_id
object_obj = None
else:
# If is not an string
if object_id:
# If we got one, load the object
obj=extra_context['obj']
object_obj = get_object_or_404(obj, pk=object_id)
else:
# There is no object
object_obj = None
new_extra_context['object_obj']=object_obj
# Build the columns structure
new_extra_context['columns']=[]
for value in fields:
field=value[0]
new_extra_context['columns'].append(sort[field])
# Get the full number of registers and save it to extra_context
new_extra_context['total_registers']=total_registers
if total_rows_per_page=='All':
# Remove total_rows_per_page if is all
total_rows_per_page=None
new_extra_context['page_before']=None
new_extra_context['page_after']=None
new_extra_context['start_register']=1
new_extra_context['showing_registers']=total_registers
else:
# Page before
if page_number<=1:
new_extra_context['page_before']=None
else:
new_extra_context['page_before']=page_number-1
# Page after
if page_number>=total_pages:
new_extra_context['page_after']=None
else:
new_extra_context['page_after']=page_number+1
# Starting on register number
new_extra_context['start_register']=(page_number-1)*total_rows_per_page+1
new_extra_context['showing_registers']=total_rows_per_page
# Calculate end
new_extra_context['end_register']=min(new_extra_context['start_register']+new_extra_context['showing_registers']-1,total_registers)
# If compact rows
hide_head=[]
hide_tail=[]
hide_subhead=[]
hide_subtail=[]
if compact_rows:
(compact_field,compact_subelements)=compact_rows
lastvalue=None
lastrow=None
total_subelements=0
for row in queryset:
value=eval("row.%s" % (compact_field))
# Count the subelements from this row
if compact_subelements:
count_subelements=eval("row.%s.count()" % (compact_subelements))
else:
count_subelements=1
# If the new row belongs to the same group than the row before
if value==lastvalue:
# Hide the head from this row
hide_head.append(row.id)
# Hide the tail from the last row
hide_tail.append(lastrow.id)
# If there were elements in the group (somebody already opened the subhead, hide the head of the subgroup) or if this row has no elements (no need to open this subhead)
if total_subelements>0 or count_subelements==0:
# Hid the subhead
hide_subhead.append(row.id)
# Hide the tail of the last row, since we want to connect both groups
hide_subtail.append(lastrow.id)
# Add the total count of elements
total_subelements+=count_subelements
# This row doesn't belong to the opened group
else:
# If there was some row already and there are no elements in the group (nobody opened the group, so we don't have to close it either)
if lastrow and total_subelements==0:
# Hide the tail from the group
hide_subtail.append(lastrow.id)
# Startup a new count of elements (Reset the total count of subelements)
total_subelements=0
total_subelements+=count_subelements
# If the new group doesn't have element (we don't think about opening the group)
if total_subelements==0:
# Hide the head from this group
hide_subhead.append(row.id)
# Remember
lastvalue=value
lastrow=row
# Proper closing the group after the bucle if there was some row opened
if lastrow and total_subelements==0:
# Hide the tail from the group if was no element in the group (nobody opened the group)
hide_subtail.append(lastrow.id)
# Save it in the public structure
new_extra_context['hide_head']=hide_head
new_extra_context['hide_tail']=hide_tail
new_extra_context['hide_subhead']=hide_subhead
new_extra_context['hide_subtail']=hide_subtail
# Save extra context
extra_context.update(new_extra_context)
# Empty results are empty
if page_number==0:
total_rows_per_page=0
# Return results
return object_list(request, queryset=queryset, template_name=template_name, extra_context=extra_context, paginate_by=total_rows_per_page, page=page_number)
What it calls in urls.py from 'Tareas':
from django.conf.urls import patterns, include, url
from tareas.models import Tareacobro, Tipotarea, Agentes, Perfil, Tareas
from django.conf import settings
from common.generic import view_list
# Uncomment the next two lines to enable the admin:
info_tasks = {'queryset': Tareas.objects.all()}
urlpatterns = patterns('tareas.views',
# =====TASKS======
# url(r'^$','tareas'),
(r'^', view_list, dict( info_tasks, extra_context={'obj':Tareas} ),'admin/tareas/tareas'),
# (r'^$',view_list, dict(info_tasks),'admin/tareas/tareas'),
#url(r'^$',view_list, dict(info_tasks, extra_context={'obj':Tareas} ),'tareas'),
)

fields=queryset.__dict__['model']() instantiates a model instance. As an aside, you could use queryset.model instead of looking in __dict__.
Django models instances do not have an attribute __fields__, so you get an attribute error.
If you want to access the fields you have defined in the model admin, you can fetch the model admin class from the registry.
from django.contrib.admin import site
model_admin = site._registry[Model]
fields = model_admin.fields

Related

Optimizing for loop inside for loop django: added link

I have a function which checks for a profile name and determines if it is in a tagged profile name.
def check_profiles(request):
try:
# get all individual profiles
profiles = Profile.objects.all()
# get all individual tagged profiles
tagged_profiles = TaggedProfiles.objects.all()
# ids to exclude in adding dates
exclude_profiles = []
# for profile in profiles
for profile in profiles:
# for tagged in sdn list
for tagged_profile in tagged_profiles:
# if contains 1
if any(name in tagged_profile.name_breakdown() for name in profile.name_breakdown()):
# put in exclude
exclude_profiles.append(profile.pk)
profile.status = 'FOR REVIEW'
profile.save()
break
for profile in Profile.objects.all().exclude(pk__in = exclude_profiles):
cleared_dates = profile.cleared_dates
cleared_dates.append(
{
'date': datetime.now().strftime('%Y-%m-%d'),
'time': datetime.now().strftime('%I:%M %p')
})
logger.debug(cleared_dates)
profile.cleared_dates = cleared_dates
profile.save()
except Exception as e:
logger.error(e)
Basically, if a profile's name is 'firstname lastname', it's breakdown is ['firstname', 'lastname']. And if tagged_profiles include either a 'firstname' or a 'lastname' in any of it's breakdowns, it's a hit.
But I'm doing it very inefficiently. How may I optimize it with any of django's built in functions?
You can see it here.

How can I access URL parameters from within a BasePermission?

I'm trying to write a custom rest_framework Permission to prevent users from querying information that's not of the same company as them. Unfortunately, I can't seem to access any of the URL's parameters from within has_permission() or has_object_permissions().
Here's the beginning of my router:
# Create a basic router
router = routers.SimpleRouter()
# Establish some variables to assist with nested routes
root_elem = 'companies'
root_elem_id = '/(?P<company_id>[0-9]+)'
loca_elem = '/locations'
loca_elem_id = '/(?P<location_id>[0-9]+)'
# Companies will be the root from which all other relations branch
router.register(r'' + root_elem, views.CompanyViewSet)
router.register(r'' + root_elem + root_elem_id + loca_elem,
views.LocationViewSet)
Here's my custom permission:
# Only permit actions originating from location managers or company admins
class IsLocationManagerOrHigher(BasePermission):
# Checked when displaying lists of records
def has_permission(self, request, *args, **kwargs):
is_correct_level = False
# Admins can see every location if their location_id
# matches a location that's a child of the company
# specified in the URL
if request.employee.is_admin:
is_correct_level = True
return request.user and is_correct_level
# Checked when viewing specific records
def has_object_permission(self, request, view, obj):
is_correct_level = False
# Admins can see location details if their location's company_id
# matches a Location's company_id
if request.employee.is_admin:
is_correct_level = True
# Managers can see location details if it's their location
elif obj.id == request.employee.location_id and request.employee.is_manager:
is_correct_level = True
return request.user and is_correct_level
Right now checking request.employee.is_admin is only half of what I need - I also need to access the company_id from the URL and make sure it matches the admin's location's company_id:
# Pseudocode
try:
user_location = Location.objects.get(id=request.employee.location_id)
return user_location.company_id == kwargs['company_id']
except ObjectDoesNotExist:
pass
I've yet to figure out how to pass these parameters into the Permission so that it can perform this extra step. Or perhaps there's a better way of accomplishing what I'm trying to do?
If you can't pass them in directly (which would be preferable), they are available on the request object:
company_id = request.resolver_match.kwargs.get('company_id')
request.resolver_match.args and request.resolver_match.kwargs contain the positional/keyword arguments captured in your url.
As an alternative to the correct response posted by knbk, you can also get the URL parameters using the view object passed to has_permission method. Like this:
company_id = view.kwargs.get('company_id')

Add to default field selectors with python social oauth2

I have set the SOCIAL_AUTH_LINKEDIN_FIELD_OAUTH2_SELECTORS field in my Django Settings per the instructions for LinkedIn configurations here: http://psa.matiasaguirre.net/docs/backends/linkedin.html
But when I run the authentication the additional email selector added to that setting is not added to the list of selectors in the backend call.
When I remove the field SOCIAL_AUTH_LINKEDIN_FIELD_OAUTH2_SELECTORS I get an error that it is missing:
'Settings' object has no attribute
'SOCIAL_AUTH_LINKEDIN_FIELD_OAUTH2_SELECTORS'
So I know I am using the correct settings name.
None of the added params make it to the backend though:
settings.SOCIAL_AUTH_LINKEDIN_FIELD_OAUTH2_SELECTORS = ['id',
'recommendations-received', 'positions', 'email-address', 'headline',
'industry', 'first-name', 'last-name', 'location', 'num-connections',
'skills']
I printed out the result of the backend and always just get the default selector list:
[edited backends/linkedin.py from
https://github.com/omab/python-social-auth/blob/master/social/backends/linkedin.py#L32]
def user_details_url(self):
# use set() since LinkedIn fails when values are duplicated
fields_selectors = list(set(['first-name', 'id', 'last-name'] +
self.setting('FIELD_SELECTORS', [])))
print fields_selectors
# user sort to ease the tests URL mocking
fields_selectors.sort()
fields_selectors = ','.join(fields_selectors)
return self.USER_DETAILS.format(fields_selectors)
#> ['first-name', 'id', 'last-name']
How can I add selectors through DJANGO Settings to expand the data returned when authenticating?
Aamir suggestion worked!:
SOCIAL_AUTH_LINKEDIN_OAUTH2_FIELD_SELECTORS
I ended up adding a print statement to social.strategies.django_strategies and got alisting of all the settings being pulled:
def get_setting(self, name):
print name
return getattr(settings, name)
Listing...
# SOCIAL_AUTH_REDIRECT_IS_HTTPS
# REDIRECT_IS_HTTPS
# SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY
# SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET
# SOCIAL_AUTH_LINKEDIN_OAUTH2_REQUESTS_TIMEOUT
# SOCIAL_AUTH_REQUESTS_TIMEOUT
# REQUESTS_TIMEOUT
# SOCIAL_AUTH_LINKEDIN_OAUTH2_URLOPEN_TIMEOUT
# SOCIAL_AUTH_URLOPEN_TIMEOUT
# URLOPEN_TIMEOUT
# SOCIAL_AUTH_LINKEDIN_OAUTH2_FIELD_SELECTORS
....

How to avoid Django URLField adding the trailing slash?

Django URLField likes to add a trailing slash (/) at the end of the user input, forcing all URLs to be stored with the extra character, this is wrong. How can I stop this behavior and save URLs as submitted by users?
Check to_python of URLField at https://github.com/django/django/blob/master/django/forms/fields.py.
You can see it has a line url_fields[2] = '/' almost at the end of method to_python. It appends a trailing slash / at the end of url. You can see the logic for doing this as a comment before this line.
This slash is necessary in case some query params are given.
If you want to avoid this behaviour, write you own field which extends from URLField and override to_python in your custom class.
I've been struggling with this as well, because it's causing a problem for certain urls. For example, http://www.nasa.gov/mission_pages/kepler/news/kepler-62-kepler-69.html/ fails, but it works without the slash.
To expand on akshar's answer, the method to do this is explained here. For example, defining this in my models.py file and setting url = NoSlashURLField() rather than models.URLField() in my model removes the slash:
try:
from urllib.parse import urlsplit, urlunsplit
except ImportError: # Python 2
from urlparse import urlsplit, urlunsplit
class NoSlashURLField(models.URLField):
description = "Remove the goddamn slash"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
super(NoSlashURLField, self).__init__(*args, **kwargs)
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'])
value = super(NoSlashURLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
# if not url_fields[2]:
# # the path portion may need to be added before query params
# url_fields[2] = '/'
value = urlunsplit(url_fields)
return value
For those using the usual Django admin forms for their site, and also using South for DB migrations, you may want to use the following method instead of stonefury's. His method changes the model field, which confuses South unless you add some special code. The below method changes only the admin code, allowing South to remain blissfully unaware.
Define this class somewhere in your app:
class NoSlashURLFormField(forms.URLField):
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'])
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
Then edit your admin.py file as follows:
from your_app.path.to.noslash import NoSlashURLFormField
from django.contrib.admin.widgets import AdminURLFieldWidget
class MyModelAdmin(admin.ModelAdmin):
...
formfield_overrides = {
models.URLField: {
'form_class': NoSlashURLFormField,
# Need to specify the AdminURLFieldWidget here because it would
# otherwise get defaulted back to URLInput.
'widget': AdminURLFieldWidget,
}
}

GeoDJango: retrieve last inserted primary key from LayerMapping

I am building an application with GeoDjango and I have the following problem:
I need to read track data from a GPX file and those data should be stored in a model MultiLineStringField field.
This should happen in the admin interface, where the user uploads a GPX file
I am trying to achieve this, namely that the data grabbed from the file should be assigned to the MultiLineStringField, while the other fields should get values from the form.
My model is:
class GPXTrack(models.Model):
nome = models.CharField("Nome", blank = False, max_length = 255)
slug = models.SlugField("Slug", blank = True)
# sport natura arte/cultura
tipo = models.CharField("Tipologia", blank = False, max_length = 2, choices=TIPOLOGIA_CHOICES)
descrizione = models.TextField("Descrizione", blank = True)
gpx_file = models.FileField(upload_to = 'uploads/gpx/')
track = models.MultiLineStringField(blank = True)
objects = models.GeoManager()
published = models.BooleanField("Pubblicato")
rel_files = generic.GenericRelation(MyFiles)
#publish_on = models.DateTimeField("Pubblicare il", auto_now_add = True)
created = models.DateTimeField("Created", auto_now_add = True)
updated = models.DateTimeField("Updated", auto_now = True)
class Meta:
#verbose_name = "struttura'"
#verbose_name_plural = "strutture"
ordering = ['-created']
def __str__(self):
return str(self.nome)
def __unicode__(self):
return '%s' % (self.nome)
def put(self):
self.slug = sluggy(self.nome)
key = super(Foresta, self).put()
# do something after save
return key
While in the admin.py file I have overwritten the save method as follows:
from django.contrib.gis import admin
from trails.models import GPXPoint, GPXTrack
from django.contrib.contenttypes import generic
from django.contrib.gis.gdal import DataSource
#from gpx_mapping import GPXMapping
from django.contrib.gis.utils import LayerMapping
from django.template import RequestContext
import tempfile
import os
import pprint
class GPXTrackAdmin(admin.OSMGeoAdmin):
list_filter = ( 'tipo', 'published')
search_fields = ['nome']
list_display = ('nome', 'tipo', 'published', 'gpx_file')
inlines = [TrackImagesInline, TrackFilesInline]
prepopulated_fields = {"slug": ("nome",)}
def save_model(self, request, obj, form, change):
"""When creating a new object, set the creator field.
"""
if 'gpx_file' in request.FILES:
# Get
gpxFile = request.FILES['gpx_file']
# Save
targetPath = tempfile.mkstemp()[1]
destination = open(targetPath, 'wt')
for chunk in gpxFile.chunks():
destination.write(chunk)
destination.close()
#define fields of interest for LayerMapping
track_point_mapping = {'timestamp' : 'time',
'point' : 'POINT',
}
track_mapping = {'track' : 'MULTILINESTRING'}
gpx_file = DataSource(targetPath)
mytrack = LayerMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
mytrack.save()
#remove the temp file saved
os.remove(targetPath)
orig = GPXTrack.objects.get(pk=mytrack.pk)
#assign the parsed values from LayerMapping to the appropriate Field
obj.track = orig.track
obj.save()
As far as I know:
LayerMapping cannot be used to update a field but only to save a new one
I cannot access a specific field of the LayerMapping object (ie in the code above: mytrack.track) and assign its value to a model field (ie obj.track) in the model_save method
I cannot retrieve the primary key of the last saved LayerMapping object (ie in the code above: mytrack.pk) in order to update it with the values passed in the form for the field not mapped in LayerMapping.mapping
What can I do then?!?!
I sorted it out subclassing LayerMapping and adding a method get_values() that instead of saving the retrieved data, returns them for any use or manipulation.The get_values method is a copy of the LayerMapping::save() method that returns the values instead of saving them.
I am using django 1.5
import os
from django.contrib.gis.utils import LayerMapping
import sys
class MyMapping(LayerMapping):
def get_values(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Returns the contents from the OGR DataSource Layer
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
#self.transaction_decorator
def _get_values(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
pippo = kwargs
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return pippo
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
pippo = _get_values(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
return _get_values()
In a custom save or save_model method you can then use:
track_mapping = {'nome': 'name',
'track' : 'MULTILINESTRING'}
targetPath = "/my/gpx/file/path.gpx"
gpx_file = DataSource(targetPath)
mytrack = MyMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
pippo = mytrack.get_values()
obj.track = pippo['track']