Does Django Cache Clear Itself After Expiration - django

I am currently working with Memcache and Django to cache data requested from an external API, so I don't overwhelm their servers. Currently my code looks like this:
# CACHE CURRENT PRICE
cache_key_price = str(stock.id)+'_price' # needs to be unique
cache_key_change = str(stock.id)+'_change'
cache_keychange_pct = str(stock.id)+'_changePct'
cache_time = 60 * 5 # time in seconds for cache to be valid
price_data = cache.get(cache_key_price) # returns None if no key-value pair
change_data = cache.get(cache_key_change) # returns None if no key-value pair
changePct_data = cache.get(cache_keychange_pct) # returns None if no key-value pair
if not price_data:
delayed_price, change, changePct = get_quote(stock.ticker)
price_data = delayed_price
change_data = change
changePct_data = changePct
cache.set(cache_key_price, price_data, cache_time)
cache.set(cache_key_change, change_data, cache_time)
cache.set(cache_keychange_pct, changePct_data, cache_time)
context_dict['delayed_price'] = cache.get(cache_key_price)
context_dict['change'] = cache.get(cache_key_change)
context_dict['changePct'] = cache.get(cache_keychange_pct)
I'm a bit new to caching and I am curious if after 5 mins the cache will clear itself and data will return None triggering the if not data: bit of code to get updated data.
Thanks in advance for any help!

Here is simplified version of your code (with just 1 key, not all 3 keys); you extend this to suit your needs.
I made 2 changes: first, the statement cache.set(..) needs to be inside the if not price_data: block, so that it is only run when the cache is empty (or expired).
Second, you should use the variable price_data to load into the context; so you don't need to call cache.get(..) a second time.
cache_key_price = str(stock.id)+'_price' # needs to be unique
cache_time = 60 * 5 # time in seconds for cache to be valid
price_data = cache.get(cache_key_price) # returns None if no key-value pair
if not price_data:
delayed_price, change, changePct = get_quote(stock.ticker)
price_data = delayed_price
cache.set(cache_key_price, price_data, cache_time)
context_dict['delayed_price'] = price_data

Related

Is updating a set in dynamodb thread safe?

I am creating/updating a set in dynamodb with multiple threads. This is the code I am using
# sends a request to add or update
def update(key, value_to_be_added_to_set):
# creates a key and add value to the mySet column if it doesn't exist
# else it will just add value to mySet
response = table.update_item(
Key={
'key_name': key
},
UpdateExpression='ADD mySet :val',
ExpressionAttributeValues={
':val': {value_to_be_added_to_set}
},
ReturnConsumedCapacity='INDEXES'
)
return response
I couldn't find anything in AWS documentation as to whether this operation guarantees thread safety. That is if I add [value=1] and add [value=2] to a set, the result should always be value={1,2}.
So I wrote this script to test it.
import threading
from random import randrange
import boto3
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('my-test')
key = f'test{randrange(1, 1000000000)}'
max_concurrency = 50
multiplier = 10
# sends a request to add or update
def update(key, value_to_be_added_to_set):
# this call will create a key and add value to the mySet column if it doesn't exist
# else it will add value to mySet
response = table.update_item(
Key={
'key_name': key
},
UpdateExpression='ADD mySet :val',
ExpressionAttributeValues={
':val': {value_to_be_added_to_set}
},
ReturnConsumedCapacity='INDEXES'
)
return response
# this method will be called by every thread
# every thread receives a unique number start from 1 to 50
def create_or_update_values(num):
start = num * multiplier
# if the thread receives 0, it will add the add values to the set from 1 to 10
# similarly thread 2 will add values from 11 to 20
# ..
# thread 49 will add values from 491 to 500
for i in range(start + 1, start + multiplier + 1):
resp = update(key, i)
print(f"Thread {i} has finished")
threads = []
# spin up threads
for i in range(0, max_concurrency):
t = threading.Thread(target=create_or_update_values, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
print("All threads have finished.")
# get mySet, convert it to list and sort it
l = list(table.get_item(Key={
'key_name': key
}, )['Item']['mySet'])
l.sort()
# verify if list contains values from 1 to 500
for i in range(1, max_concurrency * multiplier + 1):
assert int(l[i]) == i
This test passes every time it is run.
Assuming I update 50 identical keys at the same time, can I safely assume the thread safety here?
DynamoDB Architecture
DynamoDB stores items in partitions, which are located on servers known as storage nodes.
DynamoDB follows a leader/follower architecture in which all writes (and strongly consistent reads) are served by the leader node for that partition group.
Serialized Writes
All writes are serialized by the leader node, meaning all updates will happen in order as they are received by the node. The changes are then replicated to the follower nodes in an eventually consistent manner.
Serializable isolation ensures that the results of multiple concurrent operations are the same as if no operation begins until the previous one has finished. src
For more information on DynamoDB Architecture, please refer to this YouTube Video
Yes, individual item updates are serialized.

PyFlink on Kinesis Analytics Studio - Cannot convert DataStream to Amazon Kinesis Data Stream

I have a DataStream <pyflink.datastream.data_stream.DataStream> coming from a CoFlatMapFunction (simplified here):
%flink.pyflink
# join two streams and update the rule-set
class MyCoFlatMapFunction(CoFlatMapFunction):
def open(self, runtime_context: RuntimeContext):
state_desc = MapStateDescriptor('map', Types.STRING(), Types.BOOLEAN())
self.state = runtime_context.get_map_state(state_desc)
def bool_from_user_number(self, user_number: int):
'''Retunrs True if user_number is greater than 0, False otherwise.'''
if user_number > 0:
return True
else:
return False
def flat_map1(self, value):
'''This method is called for each element in the first of the connected streams'''
self.state.put(value[1], self.bool_from_user_number(value[2]))
def flat_map2(self, value):
'''This method is called for each element in the second of the connected streams (exchange_server_tickers_data_py)'''
current_dateTime = datetime.now()
dt = current_dateTime
x = value[1]
y = value[2]
yield Row(dt, x, y)
def generate__ds(st_env):
# interpret the updating Tables as DataStreams
type_info1 = Types.ROW([Types.SQL_TIMESTAMP(), Types.STRING(), Types.INT()])
ds1 = st_env.to_append_stream(table_1 , type_info=type_info1)
type_info2 = Types.ROW([Types.SQL_TIMESTAMP(), Types.STRING(), Types.STRING()])
ds2 = st_env.to_append_stream(table_2 , type_info=type_info2)
output_type_info = Types.ROW([ Types.PICKLED_BYTE_ARRAY() ,Types.STRING(),Types.STRING() ])
# Connect the two streams
connected_ds = ds1.connect(ds2)
# Apply the CoFlatMapFunction
ds = connected_ds.key_by(lambda a: a[0], lambda a: a[0]).flat_map(MyCoFlatMapFunction(), output_type_info)
return ds
ds = generate__ds(st_env)
The output, however, I am unable to view, either via registering it as a view / table, writing to a sink table or (the best case) using a Kinesis Streams sink to write data from the Flink stream into a Kinesis stream. Firehouse would also not fit my use case as the 30 second latency would be too long. Any help would be appreciated, thanks!
What I have tried:
Registering it as a view / table like so:
# interpret the DataStream as a Table
input_table = st_env.from_data_stream(ds).alias("dt", "x", "y")
z.show(input_table, stream_type="update")
Which gives an error of:
Query schema: [dt: RAW('[B', '...'), x: STRING, y: STRING]
Sink schema: [dt: RAW('[B', ?), x: STRING, y: STRING]
I have also tried writing to a sink table, like so:
%flink.pyflink
# create a sink table to emit results
st_env.execute_sql("""DROP TABLE IF EXISTS table_sink""")
st_env.execute_sql("""
CREATE TABLE table_sink (
dt RAW('[B', '...'),
x VARCHAR(32),
y STRING
) WITH (
'connector' = 'print'
)
""")
# convert the Table API table to a SQL view
table = st_env.from_data_stream(ds).alias("dt", "spread", "spread_orderbook")
st_env.execute_sql("""DROP TEMPORARY VIEW IF EXISTS table_api_table""")
st_env.create_temporary_view('table_api_table', table)
# emit the Table API table
st_env.execute_sql("INSERT INTO table_sink SELECT * FROM table_api_table").wait()
I get the error:
org.apache.flink.table.api.ValidationException: Unable to restore the RAW type of class '[B' with serializer snapshot '...'.
I have also tried to use a sink and add_sink to write the data to a sink, which would be an AWS kinesis data stream like in these Docs, like so:
%flink.pyflink
from pyflink.common.serialization import JsonRowSerializationSchema
from pyflink.datastream.connectors import KinesisStreamsSink
output_type_info = Types.ROW([Types.SQL_TIMESTAMP(), Types.STRING(), Types.STRING()])
serialization_schema = JsonRowSerializationSchema.Builder().with_type_info(output_type_info).build()
# Required
sink_properties = {
'aws.region': 'eu-west-2'
}
kds_sink = KinesisStreamsSink.builder()
.set_kinesis_client_properties(sink_properties)
.set_serialization_schema(SimpleStringSchema())
.set_partition_key_generator(PartitionKeyGenerator
.fixed())
.set_stream_name("test_stream")
.set_fail_on_error(False)
.set_max_batch_size(500)
.set_max_in_flight_requests(50)
.set_max_buffered_requests(10000)
.set_max_batch_size_in_bytes(5 * 1024 * 1024)
.set_max_time_in_buffer_ms(5000)
.set_max_record_size_in_bytes(1 * 1024 * 1024)
.build()
ds.sink_to(kds_sink)
Which i assume would work, but KinesisStreamsSink is not found in pyflink.datastream.connectors and I am unable to find any documentation on how to do this within AWS Kinesis Analytics Studio. Any help would be much much appreciated, thank you! How would I go about writing the data to a Kinesis Streams sink / converting it to a table?
Okay, i have figured it out. There were a couple issues with the particular Pyflink version available on AWS Kinesis Analytics Studio (1.13). The error messages themselves were not that useful, so for anyone who is having issues themselves I would really recommend viewing the errors in the Flink Web UI. Firstly, the MapStateDescriptor datatypes must be specified using Types.PICKLED_BYTE_ARRAY(). Secondly, not shown in the Qn, but each MapStateDescriptor must have a distinct name. I also found that using Row from pyflink.common threw errors for me. It worked better for me to switch to using use Tuples by specifying Types.TUPLE() as is done in this example. I also had to switch to specifying the output as a tuple.
Another thing I have not done is specify a watermark strategy for the DataStream, which could potentially be done by extracting the timestamp from the first field, and assign watermarks based on knowledge of the stream:
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp: int) -> int:
return int(value[0])
watermark_strategy = WatermarkStrategy.for_bounded_out_of_orderness(Duration.of_seconds(5)).with_timestamp_assigner(MyTimestampAssigner())
ds = ds.assign_timestamps_and_watermarks(watermark_strategy)
# the first field has been used for timestamp extraction, and is no longer necessary
# replace first field with a logical event time attribute
table = st_env.from_data_stream(ds, col("dt").rowtime, col('f0'), col('f1'))
But i have instead created a sink table for writing to a Kinesis Data Stream again as an output. In total, the corrected code would look something like this:
from pyflink.table.expressions import col
from pyflink.datastream.state import MapStateDescriptor
from pyflink.datastream.functions import RuntimeContext, CoFlatMapFunction
from pyflink.common.typeinfo import Types
from pyflink.common import Duration as Time, WatermarkStrategy, Duration
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.functions import KeyedProcessFunction, RuntimeContext
from pyflink.datastream.state import ValueStateDescriptor
from datetime import datetime
# Register the tables in the env
table1 = st_env.from_path("sql_table_1")
table2 = st_env.from_path("sql_table_2")
# interpret the updating Tables as DataStreams
type_info1 = Types.TUPLE([Types.SQL_TIMESTAMP(), Types.STRING(), Types.INT()])
ds1 = st_env.to_append_stream(table2, type_info=type_info1)
type_info2 = Types.TUPLE([Types.SQL_TIMESTAMP(), Types.STRING(), Types.STRING()])
ds2 = st_env.to_append_stream(table1, type_info=type_info2)
# join two streams and update the rule-set state
class MyCoFlatMapFunction(CoFlatMapFunction):
def open(self, runtime_context: RuntimeContext):
'''This method is called when the function is opened in the runtime. It is the initialization purposes.'''
# Map state that we use to maintain the filtering and rules
state_desc = MapStateDescriptor('map', Types.PICKLED_BYTE_ARRAY(), Types.PICKLED_BYTE_ARRAY())
self.state = runtime_context.get_map_state(state_desc)
# maintain state 2
ob_state_desc = MapStateDescriptor('map_OB', Types.PICKLED_BYTE_ARRAY(), Types.PICKLED_BYTE_ARRAY())
self.ob_state = runtime_context.get_map_state(ob_state_desc)
# called on ds1
def flat_map1(self, value):
'''This method is called for each element in the first of the connected streams '''
list_res = value[1].split('|')
for i in list_res:
time = datetime.utcnow().replace(microsecond=0)
yield (time, f"{i}_one")
# called on ds2
def flat_map2(self, value):
'''This method is called for each element in the second of the connected streams'''
list_res = value[1].split('|')
for i in list_res:
time = datetime.utcnow().replace(microsecond=0)
yield (time, f"{i}_two")
connectedStreams = ds1.connect(ds2)
output_type_info = Types.TUPLE([Types.SQL_TIMESTAMP(), Types.STRING()])
ds = connectedStreams.key_by(lambda value: value[1], lambda value: value[1]).flat_map(MyCoFlatMapFunction(), output_type=output_type_info)
name = 'output_table'
ds_table_name = 'temporary_table_dump'
st_env.execute_sql(f"""DROP TABLE IF EXISTS {name}""")
def create_table(table_name, stream_name, region, stream_initpos):
return """ CREATE TABLE {0} (
f0 TIMESTAMP(3),
f1 STRING,
WATERMARK FOR f0 AS f0 - INTERVAL '5' SECOND
)
WITH (
'connector' = 'kinesis',
'stream' = '{1}',
'aws.region' = '{2}',
'scan.stream.initpos' = '{3}',
'sink.partitioner-field-delimiter' = ';',
'sink.producer.collection-max-count' = '100',
'format' = 'json',
'json.timestamp-format.standard' = 'ISO-8601'
) """.format(
table_name, stream_name, region, stream_initpos
)
# Creates a sink table writing to a Kinesis Data Stream
st_env.execute_sql(create_table(name, 'output-test', 'eu-west-2', 'LATEST'))
table = st_env.from_data_stream(ds)
st_env.execute_sql(f"""DROP TEMPORARY VIEW IF EXISTS {ds_table_name}""")
st_env.create_temporary_view(ds_table_name, table)
# emit the Table API table
st_env.execute_sql(f"INSERT INTO {name} SELECT * FROM {ds_table_name}").wait()

DynamoDB Client.Scan() is not returning the LastEvaluatedKey parameter

I have a table with 10k rows.
I'm trying to parse them to change a small thing inside an attribute (inside each row) with Python, so I'm using the client.scan() taking batches of 10 rows and giving the "LastEvaluatedKey" parameter to the next .scan().
The problem is that after 40 rows the scan() doesn't return the lastKey, like the DB it's only 40 lines long.
I've noticed that launching the same script against another table, 3x times bigger, the stop happens at 120 rows (3x times bigger).
The table has On-Demand capacity.
Any idea about this?
client = boto3.client('dynamodb')
resource = boto3.resource('dynamodb')
table = resource.Table(table_name)
remaining = 3961
iteration = 0
limit = 10
while remaining > 0:
# retrieve Limit
if iteration == 0:
response = client.scan(
TableName=table_name,
Limit=limit,
Select='ALL_ATTRIBUTES',
ReturnConsumedCapacity='TOTAL',
TotalSegments=123,
Segment=122,
)
key = response["LastEvaluatedKey"]
else:
response = client.scan(
TableName=table_name,
Limit=limit,
Select='ALL_ATTRIBUTES',
ExclusiveStartKey=key,
ReturnConsumedCapacity='TOTAL',
TotalSegments=123,
Segment=122,
)
key = response["LastEvaluatedKey"]
iteration += 1
for el in response["Items"]:
print(el)
I think there are two problems:
you seem to be scanning with a limit: try removing that
your are running a parallel scan and always scanning the last segment:
TotalSegments=123
Segment=122
I'm not sure how big your tables are but 123 segments is quite a lot and I don't see you scanning any of other segments, from 0 to 121.
Try this:
iteration = 0
response = client.scan(
TableName=table_name,
Select='ALL_ATTRIBUTES',
ReturnConsumedCapacity='TOTAL'
)
while True:
iteration += 1
for el in response["Items"]:
print(el)
last_key = response["LastEvaluatedKey"]
if not last_key:
break
response = client.scan(
TableName=table_name,
Select='ALL_ATTRIBUTES',
ExclusiveStartKey=last_key,
ReturnConsumedCapacity='TOTAL'
)
I expect the above should work to retrieve all items in your table. Then, if you still would like to run a parallel scan, you can do so but you'll have to handle the splitting into segments and in order for that to be efficient you'll have to handle running those concurrently (more complicated to do than a sequential scan).

Increase recursion limit and stack size in python 2.7

I'm working with large trees and need to increase the recursion limit on Python 2.7.
Using sys.setrecursionlimit(10000) crashes my kernel, so I figured I needed to increase the stack size.
However I don't know how large the stack size should be. I tried 100 MiB like this threading.stack_size(104857600), but the kernel still dies. Giving it 1 GiB throws an error.
I haven't worked with the threading module yet so am I using it wrong when I just put the above statement at the beginning of my script? I'm not doing any kind of parallel processing, everything is done in the same thread.
My computer has 128 GB of physical RAM, running Windows 10, iPython console in Spyder.
The error displayed is simply:
Kernel died, restarting
Nothing more.
EDIT:
Full code to reproduce the problem. The building of the tree works well thought it takes quite long, the kernel only dies during the recursive execution of treeToDict() when reading the whole tree into a dictionary. Maybe there is something wrong with the code of that function. The tree is a non-binary tree:
import pandas as pd
import threading
import sys
import random as rd
import itertools as it
import string
threading.stack_size(104857600)
sys.setrecursionlimit(10000)
class treenode:
# class to build the tree
def __init__(self,children,name='',weight=0,parent=None,depth=0):
self.name = name
self.weight = weight
self.children = children
self.parent = parent
self.depth = depth
self.parentname = parent.name if parent is not None else ''
def add_child(node,name):
# add element to the tree
# if it already exists at the given node increase weight
# else add a new child
for i in range(len(node.children)):
if node.children[i].name == name:
node.children[i].weight += 1
newTree = node.children[i]
break
else:
newTree = treenode([],name=name,weight=1,parent=node,depth=node.depth+1)
node.children.append(newTree)
return newTree
def treeToDict(t,data):
# read the tree into a dictionary
if t.children != []:
for i in range(len(t.children)):
data[str(t.depth)+'_'+t.name] = [t.name, t.children[i].name, t.depth, t.weight, t.parentname]
else:
data[str(t.depth)+'_'+t.name] = [t.name, '', t.depth, t.weight, t.parentname]
for i in range(len(t.children)):
treeToDict(t.children[i],data)
# Create random dataset that leads to very long tree branches:
# A is an index for each set of data B which becomes one branch
rd.seed(23)
testSet = [''.join(l) for l in it.combinations(string.ascii_uppercase[:20],2)]
A = []
B = []
for i in range(10):
for j in range(rd.randint(10,6000)):
A.append(i)
B.append(rd.choice(testSet))
dd = {"A":A,"B":B}
data = pd.DataFrame(dd)
# The maximum length should be above 5500, use another seed if it's not:
print data.groupby('A').count().max()
# Create the tree
root = treenode([],name='0')
for i in range(len(data.values)):
if i == 0:
newTree = add_child(root,data.values[i,1])
oldses = data.values[i,0]
else:
if data.values[i,0] == oldses:
newTree = add_child(newTree,data.values[i,1])
else:
newTree = add_child(root,data.values[i,1])
oldses = data.values[i,0]
result={}
treeToDict(root,result)
PS: I'm aware the treeToDict() function is faulty in that it will overwrite entries because there can be duplicate keys. For this error this bug is unimportant however.
To my experience you have a problem not with stack size, but with an algorithm itself.
It's possible to implement tree traversal procedure without recursion at all. You should implement stack-based depth/breadth first search algorithm.
Python-like pseudo-code might look like this:
stack = []
def traverse_tree(root):
stack.append(root)
while stack:
cur = stack.pop()
cur.do_some_awesome_stuff()
stack.append(cur.get_children())
This approach is incredibly scalable and allows you to deal with any trees.
As further reading you can try this and that.

The queryset's `count` is wrong after `extra`

When I use extra in a certain way on a Django queryset (call it qs), the result of qs.count() is different than len(qs.all()). To reproduce:
Make an empty Django project and app, then add a trivial model:
class Baz(models.Model):
pass
Now make a few objects:
>>> Baz(id=1).save()
>>> Baz(id=2).save()
>>> Baz(id=3).save()
>>> Baz(id=4).save()
Using the extra method to select only some of them produces the expected count:
>>> Baz.objects.extra(where=['id > 2']).count()
2
>>> Baz.objects.extra(where=['-id < -2']).count()
2
But add a select clause to the extra and refer to it in the where clause, and the count is suddenly wrong, even though the result of all() is correct:
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).all()
[<Baz: Baz object>, <Baz: Baz object>] # As expected
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).count()
0 # Should be 2
I think the problem has to do with django.db.models.sql.query.BaseQuery.get_count(). It checks whether the BaseQuery's select or aggregate_select attributes have been set; if so, it uses a subquery. But django.db.models.sql.query.BaseQuery.add_extra adds only to the BaseQuery's extra attribute, not select or aggregate_select.
How can I fix the problem? I know I could just use len(qs.all()), but it would be nice to be able to pass the extra'ed queryset to other parts of the code, and those parts may call count() without knowing that it's broken.
Redefining get_count() and monkeypatching appears to fix the problem:
def get_count(self):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or self.extra:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model, obj.connection)
obj.add_subquery(subquery)
obj.add_count_column()
number = obj.get_aggregation()[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
django.db.models.sql.query.BaseQuery.get_count = quuux.get_count
Testing:
>>> Baz.objects.extra(select={'negid': '0 - id'}, where=['"negid" < -2']).count()
2
Updated to work with Django 1.2.1:
def basequery_get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or self.extra:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
obj.add_subquery(subquery, using=using)
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
models.sql.query.Query.get_count = basequery_get_count
I'm not sure if this fix will have other unintended consequences, however.