I'm experiencing issues trying to update a queryset on setUp:
class MyTestCase(BaseTestCase):
OPERATOR_USERNAME = "test_operator"
OPERATOR_PASSWORD = "secret"
OPERATOR_EMAIL = "test#example.org"
#classmethod
def setUpClass(cls):
super().setUpClass()
cls.operator = Operator.objects.create_superuser(
username=cls.OPERATOR_USERNAME, password=cls.OPERATOR_PASSWORD, email=cls.OPERATOR_EMAIL
)
def setUp(self) -> None:
self.client.login(username=self.OPERATOR_USERNAME, password=self.OPERATOR_PASSWORD)
utd_ids = MyModel.objects.filter(
ref_year=2021).values_list("id", flat=True
)[:10]
utd_qs = MyModel.objects.filter(id__in=utd_ids) # just added another step for debugging purposes
# update initial utd status
_updates = utd_qs.update(status="INITIAL_STATE_VALUE")
print(_updates) # it prints 10
self.ssn_list = list(utd_qs.values_list("user__ssn", flat=True))
self.client.login(username=self.OPERATOR_USERNAME, password=self.OPERATOR_PASSWORD)
print(MyModel.objects.filter(id__in=utd_ids).values("status").distinct())
# this should retrieve 1 value but instead it retrieve multiple values different from INITIAL_STATE_VALUE
am I doing something wrong? I tried the same update through python manage.py shell on a similar queryset and it works as expected
Related
I use django-import-export 2.8.0 with Oracle 12c.
Line-by-line import via import_data() works without problems, but when I turn on the use_bulk=True option, it stops importing and does not throw any errors.
Why does not it work?
resources.py
class ClientsResources(resources.ModelResource):
class Meta:
model = Clients
fields = ('id', 'name', 'surname', 'age', 'is_active')
batch_size = 1000
use_bulk = True
raise_errors = True
views.py
def import_data(request):
if request.method == 'POST':
file_format = request.POST['file-format']
new_employees = request.FILES['importData']
clients_resource = ClientsResources()
dataset = Dataset()
imported_data = dataset.load(new_employees.read().decode('utf-8'), format=file_format)
result = clients_resource.import_data(imported_data, dry_run=True, raise_errors=True)
if not result.has_errors():
clients_resource.import_data(imported_data, dry_run=False)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
data.csv
id,name,surname,age,is_active
18,XSXQAMA,BEHKZFI,89,Y
19,DYKNLVE,ZVYDVCX,20,Y
20,GPYXUQE,BCSRUSA,73,Y
21,EFHOGJJ,MXTWVST,93,Y
22,OGRCEEQ,KJZVQEG,52,Y
--UPD--
I used django-debug-toolbar and saw a very strange behavior with import-queries.
With Admin Panel doesnt work. I see all importing rows, but next it writes "Import finished, with 5 new and 0 updated clients.", and see this strange queries
Then I use import by my form and here simultaneous situation:
use_bulk by django-import-export (more)
And for comparing my handle create_bulk()
--UPD2--
I've tried to trail import logic and look what I found:
import_export/resources.py
def bulk_create(self, using_transactions, dry_run, raise_errors, batch_size=None):
"""
Creates objects by calling ``bulk_create``.
"""
print(self.create_instances)
try:
if len(self.create_instances) > 0:
if not using_transactions and dry_run:
pass
else:
self._meta.model.objects.bulk_create(self.create_instances, batch_size=batch_size)
except Exception as e:
logger.exception(e)
if raise_errors:
raise e
finally:
self.create_instances.clear()
This print() showed empty list in value.
This issue appears to be due to a bug in the 2.x version of django-import-export. It is fixed in v3.
The bug is present when running in bulk mode (use_bulk=True)
The logic in save_instance() is finding that 'new' instances have pk values set, and are then incorrectly treating them as updates, not creates.
I cannot determine how this would happen. It's possible this is related to using Oracle (though I cannot see how).
Running into some problems when testing my custom admin actions.
First I can show you an example of a test that works and the actions it's testing.
custom action, Product model
#admin.action(description="Merge selected products")
def merge_products(self, request, queryset):
list_of_products = queryset.order_by("created_at")
list_of_products = list(list_of_products)
canonical_product = list_of_products[0]
list_of_products.remove(canonical_product)
for p in list_of_products:
for ep in p.external_products.all():
ep.internal_product = canonical_product
ep.save()
p.save()
canonical_product.save()
related test
class MockRequest(object):
pass
class ProductAdminTest(TestCase):
def setUp(self):
self.product_admin = ProductAdmin(model=Product, admin_site=AdminSite())
User = get_user_model()
admin_user = User.objects.create_superuser(
username="superadmin", email="superadmin#email.com", password="testpass123"
)
self.client.force_login(admin_user)
def test_product_merge(self):
self.boot1 = baker.make("products.Product", title="Filippa K boots", id=uuid4())
self.boot2 = baker.make("products.Product", title="Filippa K boots", id=uuid4())
self.external_product1 = baker.make(
"external_products.ExternalProduct", internal_product=self.boot1
)
self.external_product2 = baker.make(
"external_products.ExternalProduct", internal_product=self.boot2
)
self.assertEqual(self.boot1.external_products.count(), 1)
self.assertEqual(self.boot2.external_products.count(), 1)
request = MockRequest()
queryset = Product.objects.filter(title="Filippa K boots")
self.product_admin.merge_products(request, queryset)
self.assertEqual(self.boot1.external_products.count(), 2)
self.assertEqual(self.boot2.external_products.count(), 0)
Might not be the pretties test but it works, so does the action.
The code above works as it should but has been running into problems when trying to test an almost identical action but for another model.
custom action, Brand model
#admin.action(description="Merge selected brands")
def merge_brands(self, request, queryset):
qs_of_brands = queryset.order_by("created_at")
list_of_brands = list(qs_of_brands)
canonical_brand = list_of_brands[0]
for brand in list_of_brands:
if brand.canonical:
canonical_brand = brand
list_of_brands.remove(canonical_brand)
for brand in list_of_brands:
brand.canonical_brand = canonical_brand
brand.save()
for product in Product.objects.filter(brand=brand):
product.brand = canonical_brand
product.save()
canonical_brand.save()
related test
class MockRequest(object):
pass
def setUp(self):
self.brand_admin = BrandAdmin(model=Brand, admin_site=AdminSite())
User = get_user_model()
admin_user = User.objects.create_superuser(
username="superadmin", email="superadmin#email.com", password="testpass123"
)
self.client.force_login(admin_user)
def test_brand_merge(self):
self.brand1 = baker.make("brands.Brand", title="Vans")
self.brand1.canonical_brand = self.brand1
self.brand1.active = True
self.brand1.save()
self.brand2 = baker.make("brands.Brand", title="Colmar")
self.boot1 = baker.make("products.Product", brand=self.brand1, id=uuid4())
self.boot2 = baker.make("products.Product", brand=self.brand2, id=uuid4())
self.assertEqual(self.boot1.brand, self.brand1)
self.assertEqual(self.boot2.brand, self.brand2)
self.assertEqual(self.brand1.active, True)
self.assertEqual(self.brand2.active, False)
request = MockRequest()
queryset = Brand.objects.all()
self.brand_admin.merge_brands(request, queryset)
self.assertEqual(self.boot1.brand, self.brand1)
self.assertEqual(self.boot2.brand, self.brand1)
self.assertEqual(self.brand1.active, True)
self.assertEqual(self.brand2.active, False)
self.assertEqual(self.brand1.canonical_brand, self.brand1)
self.assertEqual(self.brand2.canonical_brand, self.brand1)
It's not really necessary how the code above works but I included it for context.
The product admin action works both when I try it manually and in the test suite. The brand action does work as it should when tested manually but in the test suite, it does nothing.
self.brand_admin.merge_brands(request, queryset)
does not change the tested objects in any way.
I have another custom admin action for the Brand model and it's the same problem there, works like a charm when I try it manually but it does nothing in the test suite. Because of those facts, my guess is that the problem is related to my admin setup in the test suite but it's identical to the admin setup used for the first test which works.
Any ideas?
Gladly provide more context if needed.
It looks like you need to refresh the objects from the Database, in your test self.brand1, boot1, etc are in memory and will not be automatically updated from the database, you need to get the new values from the database.
https://docs.djangoproject.com/en/4.0/ref/models/instances/#refreshing-objects-from-database
If you need to reload a model’s values from the database, you can use the refresh_from_db() method. When this method is called without arguments the following is done:
All non-deferred fields of the model are updated to the values currently present in the database.
Any cached relations are cleared from the reloaded instance.
After you call your merge_brands you should refresh each object.
self.boot1.refresh_from_db()
self.boot2.refresh_from_db()
# etc..
Hello I have a celery task that is suppose to run every 1 hour to fetch a key and it runs and even acts like it has updates the database but it does not update in reality
#app.task
def refresh_token():
r = requests.get(AUTH_URL, auth=HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET))
obj = json.loads(r.text)
obj['expires_in'] = int(obj['expires_in'])
try:
mpesa_token = MpesaAccessToken.objects.get(id=1)
mpesa_token.access_token = obj['access_token']
mpesa_token.save()
print(obj)
print(mpesa_token.access_token)
print("saved")
except:
print(obj)
mpesa_token = MpesaAccessToken.objects.create(**obj)
return 1
the last thee prints all shows in the logs but checking the admin panel, the values are not updated however when I use a view and make a request then call the function, the database get updated, could anyone know what is going on
You need to add transaction.atomic() to your code. Like this:
from django.db import transaction
#app.task
def refresh_token():
with transaction.atomic():
r = requests.get(AUTH_URL, auth=HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET))
obj = json.loads(r.text)
obj['expires_in'] = int(obj['expires_in'])
try:
mpesa_token = MpesaAccessToken.objects.get(id=1)
mpesa_token.access_token = obj['access_token']
mpesa_token.save()
print(obj)
print(mpesa_token.access_token)
print("saved")
except:
print(obj)
mpesa_token = MpesaAccessToken.objects.create(**obj)
return 1
I am testing a Django Library application, which has a Book model and a search bar to filter those books that checks title__icontains = 'q'.
The url pattern:
path('search_book/', views.BookSearchListView.as_view(), name='search_book'),
The url routing:
http://127.0.0.1:8000/catalog/search_book/?q=house
Implementation of the following Class-based view:
class BookSearchListView(BookListView):
paginate_by = 3
def get_queryset(self):
result = super(BookSearchListView, self).get_queryset()
query = self.request.GET.get('q')
if query:
query_list = query.split()
result = result.filter(
reduce(operator.and_,
(Q(title__icontains=q) for q in query_list))
)
return result
In my tests.py, I have to develop test cases for the above view, but do not understand how to go about it. I have attempted the following:
class BookSearchListViewTest(TestCase):
"""
Test case for the Book Search List View
"""
def setUp(self):
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
test_author = Author.objects.create(first_name='John', last_name='Smith')
Book.objects.create(title='House', author=test_author, summary='Published in 1990',
isbn='123456789123')
Book.objects.create(title='Money', author=test_author, summary='Published in 1991',
isbn='9876543210123')
Book.objects.create(title='Mouse', author=test_author, summary='Published in 1992',
isbn='1293874657832')
def test_redirect_if_not_logged_in(self):
response = self.client.get(reverse('books'))
self.assertRedirects(response, '/catalog/customer_login/?next=/catalog/books/')
def test_query_search_filter(self):
self.assertQuerysetEqual(Book.objects.filter(title__icontains='House'), ["<Book: House>"])
While the test_query_search_filter test runs successfully, in my coverage report, the class BookSearchListView is not getting tested.
I am a complete novice in Django and have just started out with test cases.
If you have parameter in your url then you should send it via url in your test case.
You created a Book object which title is House in your setUp method so;
def test_query_filter(self):
# If you have login required to access 'books' then
# you have to login with 'client.login' first.
url = '{url}?{filter}={value}'.format(
url=reverse('books'),
filter='q', value='Hou')
# With string format finally we expect a url like;
# '/books/q=Hou'
self.client.login(username='testuser1', password='1X<ISRUkw+tu')
response = self.client.get(url)
...
# test cases
...
You can test it like this:
def test_redirect_if_not_logged_in(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tu')
response = self.client.get(reverse('books'))
self.assertQuerysetEqual(response.context['object_list'], Book.objects.all(), transform= lambda x:x)
You can check the testing tools documentation for more details.
I am building an application with GeoDjango and I have the following problem:
I need to read track data from a GPX file and those data should be stored in a model MultiLineStringField field.
This should happen in the admin interface, where the user uploads a GPX file
I am trying to achieve this, namely that the data grabbed from the file should be assigned to the MultiLineStringField, while the other fields should get values from the form.
My model is:
class GPXTrack(models.Model):
nome = models.CharField("Nome", blank = False, max_length = 255)
slug = models.SlugField("Slug", blank = True)
# sport natura arte/cultura
tipo = models.CharField("Tipologia", blank = False, max_length = 2, choices=TIPOLOGIA_CHOICES)
descrizione = models.TextField("Descrizione", blank = True)
gpx_file = models.FileField(upload_to = 'uploads/gpx/')
track = models.MultiLineStringField(blank = True)
objects = models.GeoManager()
published = models.BooleanField("Pubblicato")
rel_files = generic.GenericRelation(MyFiles)
#publish_on = models.DateTimeField("Pubblicare il", auto_now_add = True)
created = models.DateTimeField("Created", auto_now_add = True)
updated = models.DateTimeField("Updated", auto_now = True)
class Meta:
#verbose_name = "struttura'"
#verbose_name_plural = "strutture"
ordering = ['-created']
def __str__(self):
return str(self.nome)
def __unicode__(self):
return '%s' % (self.nome)
def put(self):
self.slug = sluggy(self.nome)
key = super(Foresta, self).put()
# do something after save
return key
While in the admin.py file I have overwritten the save method as follows:
from django.contrib.gis import admin
from trails.models import GPXPoint, GPXTrack
from django.contrib.contenttypes import generic
from django.contrib.gis.gdal import DataSource
#from gpx_mapping import GPXMapping
from django.contrib.gis.utils import LayerMapping
from django.template import RequestContext
import tempfile
import os
import pprint
class GPXTrackAdmin(admin.OSMGeoAdmin):
list_filter = ( 'tipo', 'published')
search_fields = ['nome']
list_display = ('nome', 'tipo', 'published', 'gpx_file')
inlines = [TrackImagesInline, TrackFilesInline]
prepopulated_fields = {"slug": ("nome",)}
def save_model(self, request, obj, form, change):
"""When creating a new object, set the creator field.
"""
if 'gpx_file' in request.FILES:
# Get
gpxFile = request.FILES['gpx_file']
# Save
targetPath = tempfile.mkstemp()[1]
destination = open(targetPath, 'wt')
for chunk in gpxFile.chunks():
destination.write(chunk)
destination.close()
#define fields of interest for LayerMapping
track_point_mapping = {'timestamp' : 'time',
'point' : 'POINT',
}
track_mapping = {'track' : 'MULTILINESTRING'}
gpx_file = DataSource(targetPath)
mytrack = LayerMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
mytrack.save()
#remove the temp file saved
os.remove(targetPath)
orig = GPXTrack.objects.get(pk=mytrack.pk)
#assign the parsed values from LayerMapping to the appropriate Field
obj.track = orig.track
obj.save()
As far as I know:
LayerMapping cannot be used to update a field but only to save a new one
I cannot access a specific field of the LayerMapping object (ie in the code above: mytrack.track) and assign its value to a model field (ie obj.track) in the model_save method
I cannot retrieve the primary key of the last saved LayerMapping object (ie in the code above: mytrack.pk) in order to update it with the values passed in the form for the field not mapped in LayerMapping.mapping
What can I do then?!?!
I sorted it out subclassing LayerMapping and adding a method get_values() that instead of saving the retrieved data, returns them for any use or manipulation.The get_values method is a copy of the LayerMapping::save() method that returns the values instead of saving them.
I am using django 1.5
import os
from django.contrib.gis.utils import LayerMapping
import sys
class MyMapping(LayerMapping):
def get_values(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Returns the contents from the OGR DataSource Layer
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
#self.transaction_decorator
def _get_values(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
pippo = kwargs
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return pippo
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
pippo = _get_values(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
return _get_values()
In a custom save or save_model method you can then use:
track_mapping = {'nome': 'name',
'track' : 'MULTILINESTRING'}
targetPath = "/my/gpx/file/path.gpx"
gpx_file = DataSource(targetPath)
mytrack = MyMapping(GPXTrack, gpx_file, track_mapping, layer='tracks')
pippo = mytrack.get_values()
obj.track = pippo['track']