In trying to understand django's select_for_update, I created a test to see. In my understanding, if there are two processes, one selects for update inside transaction, the other tries to write to the locked rows before transaction ends in other process, the write would fail. However, the following test passes ok. What could be wrong?
#pytest.mark.django_db(transaction=True)
def test_select_for_update():
ExampleModel.objects.create(type="test1")
ExampleModel.objects.create(type="test2")
import os
child_pid = os.fork()
if child_pid == 0:
sleep(5)
qs = ExampleModel.objects.all()
ExampleModel.objects.bulk_update(qs, ["type"])
else:
with transaction.atomic():
qs = ExampleModel.objects.all().select_for_update()
for obj in qs:
obj.type = "test3"
ExampleModel.objects.bulk_update(qs, ["type"])
sleep(10)
Related
I want to fetch categories from a Magento API and display them in a template. In the same time, I want to save them in DB for an ulterior use.
Categories are too many and the render of the template takes more than 30 sec.
I start to learn using asyncio but couldn't get my way with it. I surely missed something.
First, my URL leads to the function that retrieves the categories
#login_required
def get_categories(request):
Category.objects.all().delete()
try:
cats = fetch_categories()
tree = cats['children_data']
except:
print('erreur : impossible de récupérer les catégories (fetch_categories)')
asyncio.run(parse_categories(tree))
return render(request, 'categories/categories_list.html', {'tree': tree})
When I get the "categories tree", I send it to
async def parse_categories(tree):
for lvl1 in tree:
all_tasks = []
asyncio.create_task(save_cat(lvl1))
# main products categories (turbo, injectors ...)
for lvl2 in lvl1['children_data']:
asyncio.create_task(save_cat(lvl2))
# sub categories like RENAULT, DACIA
for lvl3 in lvl2['children_data']:
asyncio.create_task(save_cat(lvl3))
for lvl4 in lvl3['children_data']:
asyncio.create_task(save_cat(lvl4))
for lvl5 in lvl4['children_data']:
asyncio.create_task(save_cat(lvl5))
My save() function is async. I'm not sure it should be. Before I started using async, it was working.
async def save_cat(cat):
cat_id = cat['id']
new_cat = Category()
new_cat.id = cat_id
new_cat.name = cat.get('name', None)
new_cat.parent = cat.get('parent_id', None)
new_cat.url = cat.get('path', None)
new_cat.is_active = cat.get('is_active', None)
new_cat.position = cat.get('position', None)
new_cat.level = cat.get('level', None)
new_cat.save()
When I run, no error. The context is well sent to the template and displays well. But no category is saved.
I also tried to make a task list with asyncio.create_task in each level and execute the loop at the end of parse_categories() like said in this thread, without success.
all_tasks.append(asyncio.create_task(save_cat(lvl1)))
[...]
responses = asyncio.gather(*all_tasks, return_exceptions=True)
loop = asyncio.get_event_loop()
loop.run_until_complete(responses)
loop.close()
Any clue to solve my case will be welcome
Hello I have a celery task that is suppose to run every 1 hour to fetch a key and it runs and even acts like it has updates the database but it does not update in reality
#app.task
def refresh_token():
r = requests.get(AUTH_URL, auth=HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET))
obj = json.loads(r.text)
obj['expires_in'] = int(obj['expires_in'])
try:
mpesa_token = MpesaAccessToken.objects.get(id=1)
mpesa_token.access_token = obj['access_token']
mpesa_token.save()
print(obj)
print(mpesa_token.access_token)
print("saved")
except:
print(obj)
mpesa_token = MpesaAccessToken.objects.create(**obj)
return 1
the last thee prints all shows in the logs but checking the admin panel, the values are not updated however when I use a view and make a request then call the function, the database get updated, could anyone know what is going on
You need to add transaction.atomic() to your code. Like this:
from django.db import transaction
#app.task
def refresh_token():
with transaction.atomic():
r = requests.get(AUTH_URL, auth=HTTPBasicAuth(CONSUMER_KEY, CONSUMER_SECRET))
obj = json.loads(r.text)
obj['expires_in'] = int(obj['expires_in'])
try:
mpesa_token = MpesaAccessToken.objects.get(id=1)
mpesa_token.access_token = obj['access_token']
mpesa_token.save()
print(obj)
print(mpesa_token.access_token)
print("saved")
except:
print(obj)
mpesa_token = MpesaAccessToken.objects.create(**obj)
return 1
I am getting weird results when testing for 2 different views. They are both list views. The first one should only show active orders(active=True), the second one only showing historic views (completed=True).
I am using postgresql as a database also. Issue is that both the view works perfectly fine in the browser, however when testing pytest raises an error saying that an order that shouldn't be listed, is listed.
The views are as follows:
class OrderHistoryView(LoginRequiredMixin, ListView):
template_name = 'orders/order_history.html'
def get_queryset(self):
user = self.request.user
qs = Order.objects.filter(Q(buyer=user, completed=True)|Q(seller=user, completed=True))
return qs
class OrderListView(LoginRequiredMixin, ListView):
template_name = 'orders/order_list_view.html'
def get_queryset(self):
user = self.request.user
qs = Order.objects.filter(Q(buyer=user, active=True)|Q(seller=user, active=True))
return qs
The tests are:
class OrderHistoryViewTests(TestCase):
#classmethod
def setUp(self):
self.req = RequestFactory()
self.user = mixer.blend('user.CustomUser', email='test#test.com', password='1234')
self.user2 = mixer.blend('user.CustomUser')
self.advert = mixer.blend('advert.Advert', author=self.user)
self.offer = mixer.blend(
'offer.Offer', advert=self.advert, author=self.user2, receiver=self.user, accepted=True)
self.order = mixer.blend(
'orders.Order', advert=self.advert, seller=self.user2, buyer=self.user, offer=self.offer,
pending=True, completed=True, active=True, disputed=False, cancelled=False)
self.order2 = mixer.blend('orders.Order', completed=False, buyer=self.user)
def test_only_shows_completed(self):
request = self.req.get('/')
request.user = self.user
resp = OrderHistoryView.as_view()(request)
self.assertContains(resp, self.order)
self.assertNotContains(resp, order2)
The second test is exactly the same, it just tests that its only showing active orders
error message is :
FAILED orders/tests/test_views.py::OrderHistoryViewTests::test_only_shows_completed - AssertionError: 3 != 0 : Response should not contain '10'
FAILED orders/tests/test_views.py::OrderListViewTests::test_order_listing_showing_only_active - AssertionError: 2 != 0 : Response should not contain '15'
EDIT:
I have done some further investigation on this and have found that even that the first "self.assertContains(resp, self.order)" statement is failing also. However when i run test_views only this "self.assertContains(resp, self.order)" test passes, however when i run all the tests together it fails. I have noticed after commenting out 5 different views in the test_forms directory, when i run the tests they all pass. Issue is that there is nothing wrong with the 5 form tests, their is no mock statements on them and the tear down seems to be working.
I then put a print statement to see what queryset is actually showing up:
printing Order.objects.all() shows ", ]>" and when i print(self.order.completed) also comes up true. However the result of the test failing says : "AssertionError: False is not true : Couldn't find '25' in response"
Even though 25 is clearly there and completed. The view code looks perfect also and the view is a very simple view. What could be causing this?
Also i have noticed if i run py.test i only need to comment out 3 of the form tests and it will all pass, however with manage.py test i have to comment out 5 form tests for it to pass.
I just used print(resp.context_data) to see what is actually in the response...
{'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': ]>, 'order_list': ]>, 'view': }
From here we can see the order "25" is actually in the response, i printed the username also and it matches.
I have several test cases which all have a similar tearDown:
def tearDown(self):
execution_time = time.time() - self.startTime
result_list = [self._testMethodName]
result = [str(x) for x in sys.exc_info()]
if result[0] == 'None':
result_list.append('PASS')
elif 'Assertion' in result[0]:
result_list.append('FAIL')
else:
result_list.append('ERROR')
result_list.append(result)
result_list.append(str(execution_time))
TEST_RESULTS.append(result_list)
I'm using the tearDown function to store results from each test (in the test case) to a global TEST_RESULTS object (so each TestCase file has a TEST_RESULTS global defined).
Then in the tearDownClass function im doing this to store results to csv:
#classmethod
def tearDownClass(cls):
with open ('tests/results/test_case_1_output.csv', 'wr') as resultFile:
wr = csv.writer(resultFile)
wr.writerows(TEST_RESULTS)
To me this is a terrible implementation. Globals defined everywhere and tearDown/tearDownClass implemented over and over again in each test case rather than defined once.
Furthermore, I would like to create a test result file which collects results from all test cases.
My hunch is this requires defining the file handle at the runner level (or somewhere before the TestCases are being called). This would allow me to reinitialize the csv file at a higher level (rather than arbitrarily in one TestCase).
Does anyone have a suggestion on how this can be accomplished? Did not see a way to do this from the docs and overriding django TestCase seemed hazardous.
I will post my solution (thanks very much to #xyres) since i think it might help some others.
Below is an example of a TestCase which calls SetUp, tearDown and setUpClass from the base class (either TestManager or TestCase. The trick was to call setUpClass from base class 'TestCase' and create another initialization function 'initialize' called on the 'TestManager' base class.
class MyTestCase(TestManager, TestCase)
def setUp(self):
self.param1, self.param2 = super(MyTestCase, self).setUp()
def tearDown(self):
test_name = self._testMethodName
super(MyTestCase, self).get_and_write_results_to_csv(test_name)
#classmethod
def setUpClass(cls):
super(MyTestCase, cls).setUpClass()
super(MyTestCase, cls).initialize('my test case name')
class TestManager():
#classmethod
def initialize(cls, test_case_name):
with open('path/to/my/testresults.csv', 'wr') as resultFile:
wr = csv.writer(resultFile)
wr.writerow("Results for " + test_case_name + "are below:")
def setUp(self):
"""
Do some setup stuff that's the same for each TestCase.
Im not showing the actions here but assume you want this
function to return 2 params that are the same for every
TestCase setup
"""
return param1, param2
def get_and_write_results_to_csv(self, test_name):
execution_time = time.time() - self.startTime
result_list = [test_name]
result = [str(x) for x in sys.exc_info()]
if result[0] == 'None':
result_list.append('PASS')
elif 'Assertion' in result[0]:
result_list.append('FAIL')
else:
result_list.append('ERROR')
result_list.append(result)
result_list.append(str(execution_time))
with open('path/to/my/testresults.csv', 'a') as resultFile:
wr = csv.writer(resultFile)
wr.writerow(result_list)
I'm trying to make something like "task manager" using thread in Django which will be waiting some job.
import multiprocessing
from Queue import Queue
def task_maker(queue_obj):
while True:
try:
print queue_obj.qsize() # << always print 0
_data = queue_obj.get(timeout=10)
if _data:
_data['function'](*_data['args'], **_data['kwargs'])
except Empty:
pass
except Exception as e:
print e
tasks = Queue()
stream = multiprocessing.Process(target=task_maker, args=(tasks,))
stream.start()
def add_task(func=lambda: None, args=(), kwargs={}):
try:
tasks.put({
'function': func,
'args': args,
'kwargs': kwargs
})
print tasks.qsize() # print a normal size 1,2,3,4...
except Exception as e:
print e
I'm using "add_task" in views.py files, when user makes some request.
Why queue in "stream" always empty? what i'm doing wrong?
There are two issues with the current code. 1) with multiprocess (but not threading), the qsize() function is unreliable -- I suggest don't use it, as it is confusing. 2) you can't modify an object directly that's been taken from a queue.
Consider two processes, sending data back and forth. One won't know if the other has modified some data, as data is private. To communicate, send data explicitly, with Queue.put() or using a Pipe.
The general way producer/consumer system works is this: 1) jobs are stuff into a queue 2) worker blocks, waiting for work. When a job appears, it puts the result on a different queue. 3) a manager or 'beancounter' process consumes the output from the 2nd queue, and prints it or otherwise processes it.
Have fun!
#!/usr/bin/env python
import logging, multiprocessing, sys
def myproc(arg):
return arg*2
def worker(inqueue, outqueue):
logger = multiprocessing.get_logger()
logger.info('start')
while True:
job = inqueue.get()
logger.info('got %s', job)
outqueue.put( myproc(job) )
def beancounter(inqueue):
while True:
print 'done:', inqueue.get()
def main():
logger = multiprocessing.log_to_stderr(
level=logging.INFO,
)
logger.info('setup')
data_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
for num in range(5):
data_queue.put(num)
worker_p = multiprocessing.Process(
target=worker, args=(data_queue, out_queue),
name='worker',
)
worker_p.start()
bean_p = multiprocessing.Process(
target=beancounter, args=(out_queue,),
name='beancounter',
)
bean_p.start()
worker_p.join()
bean_p.join()
logger.info('done')
if __name__=='__main__':
main()
I've got it. I do not know why, but when I tried "threading", it worked!
from Queue import Queue, Empty
import threading
MailLogger = logging.getLogger('mail')
class TaskMaker(threading.Thread):
def __init__(self, que):
threading.Thread.__init__(self)
self.queue = que
def run(self):
while True:
try:
print "start", self.queue.qsize()
_data = self.queue.get()
if _data:
print "make"
_data['function'](*_data['args'], **_data['kwargs'])
except Empty:
pass
except Exception as e:
print e
MailLogger.error(e)
tasks = Queue()
stream = TaskMaker(tasks)
stream.start()
def add_task(func=lambda: None, args=(), kwargs={}):
global tasks
try:
tasks.put_nowait({
'function': func,
'args': args,
'kwargs': kwargs
})
except Exception as e:
print e
MailLogger.error(e)