RQ status queued not changing - what's wrong (running on Heroku)? - flask

I am running a test app on Heroku with RQ. I want to fetch the result of queuing and running function in RQ and send it over to HTML. My code as follows:
Worker.py
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDIS_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()code here
App.py
q = Queue(connection=conn)
app = Flask(__name__, template_folder='template')
def somejob(y):
import time
time.sleep(3)
x = y
return x
#app.route('/pureredis', methods=['POST', 'GET'])
def pureredis():
job = q.enqueue(somejob,5000)
id = job.id
print("id",id)
job = job.fetch(id, connection=conn)
print("job",job)
status = job.get_status()
print('status',status)
if status in ['queued', 'started', 'deferred', 'failed']:
return render_template('redis_1.html', result=status)
elif status == 'finished':
result = job.result
print('result',result)
return render_template('redis_1.html',result=result)
redis_1.html
<html>
<head>
{% if refresh %}
<meta http-equiv="refresh" content="5">
{% endif %}
</head>
<body>{{ result }}</body>
</html>
When I refresh my HTML, following are my server logs:
My questions - 1) Why is the RQ status not changing and what should I do so that job gets executed. 2) What should I do to grab the result of the function (somejob()) so that I can send it to my HTML through render_template.

Simple snag in code I figured was that the status of the job was being checked too soon. Modified code:
#app.route('/pureredis', methods=['POST', 'GET'])
def pureredis():
job = q.enqueue(somejob,5000)
id = job.id
print("id",id)
job = job.fetch(id, connection=conn)
print("job",job)
time.sleep(4)
status = job.get_status()
print('status',status)
if status in ['queued', 'started', 'deferred', 'failed']:
return render_template('redis_1.html', result=status)
elif status == 'finished':
result = job.result
print('result',result)
return render_template('redis_1.html',result=result)

Related

Is it possible to call socketio.emit from a job scheduled via ApScheduler (BackgroundScheduler)?

Here is what I am trying to do
I have a Flask app that is responding to clients requests
The same is also running some jobs to collect information from various sources
This is done by using ApScheduler and BackgroundScheduler
I would like that at the end of the job to send the webclient an update with the results of the job
Below is the code but it is not really working.
Here is the server side
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'secret-key'
socketio = SocketIO(app)
def get_updates(socketio):
socketio.emit('updates', {'updates': "this is the updates"})
return
if __name__ == '__main__':
if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true': #prevents scheduling the job twice -When in debug mode this runs twice
scheduler = BackgroundScheduler()
scheduler.add_job(func=get_updates, trigger="interval", seconds=30, kwargs={"socketio":socketio})
scheduler.start()
socketio.run(app,host='0.0.0.0',port=5001)
Browser side
<html>
<div>
<ul id="messages" style="list-style-type:none;" ></ul>
<form id="chat" action="">
<input id="input" autocomplete="off" /><button>Send</button>
</form>
</div>
<script>
var socket = io();
socket.on('connect', function() {
socket.send('Hello world');
});
var messages = document.getElementById('messages');
var form = document.getElementById('chat');
var input = document.getElementById('input');
form.addEventListener('submit', function(e) {
e.preventDefault();
if (input.value) {
socket.emit('message', input.value);
input.value = '';
}
});
socket.on('updates', function(msg) {
console.log("Updates received")
console.log(msg)
var item = document.createElement('li');
item.textContent = msg;
messages.appendChild(item);
window.scrollTo(0, document.body.scrollHeight);
});
</script>
</html>
Update: While reading as much as I can about his I was watching the console and I noticed that this worked once in like 50 exectutions of the scheduled job
For whoever is wasting a day of reading to solve this here is the solution
Add these lines before any imports related to flask or apscheduler
import eventlet
eventlet.monkey_patch(thread=True, time=True)
See this and this and also the docs for eventlet regarding the monkey patching

How to send a message over websocket at a certain time

I am new to websockets and have just now gotten a working websocket connection in my application. I am trying to have the server check once per minute in the database to find upcoming tournaments, and for each player connected to a websocket that is registered in a tournament starting that minute, send a message that tournament with ID xxxxx is starting now. I have the following
tournaments/consumers.py:
from channels.generic.websocket import WebsocketConsumer
import json
class TournamentLobbyConsumer(WebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
print("The websocket received a message: '%s'" % message)
tournaments/routing.py:
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url('ws/tournaments/$', consumers.TournamentLobbyConsumer),
]
tournaments/templates/index.html:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>Tournament Lobby</title>
</head>
<script>
var chatSocket = new WebSocket(
'ws://' + window.location.host +
'/ws/tournaments/');
chatSocket.onmessage = function(e) {
// var data = JSON.parse(e.data);
// var message = data['message'];
alert("message received from websocket")
};
chatSocket.onclose = function(e) {
console.error('Chat socket closed unexpectedly');
};
</script>
{% if tournaments %}
<ul>
{% for tournament in tournaments %}
<li> {{ tournament.name }} {{ tournament.start_time }}</li>
{% endfor %}
</ul>
{% else %}
<p>No tournaments are available</p>
{% endif %}
</html>
When I go to this tournament lobby, I get the message on the server that a "websocket handshake" has taken place. So the websocket connection is working. I am confused now as to how to run a loop on the running server which checks every minute for new tournaments and then sends the message to these connected clients. The tutorials I've done only show a server responding to client requests, but a websocket should be able to go in either direction.
Check out apscheduler for scheduling your jobs. Your code would looks like this:
scheduler = BackgroundScheduler()
scheduler.add_job(check, 'cron', second='*/60')
scheduler.start()
# Function to run every 60 seconds
def check():
pass
You must first call the consumer method in charge of sending the notification (channel)
https://channels.readthedocs.io/en/latest/topics/channel_layers.html (Using Outside Of Consumers)
import channels.layers
from asgiref.sync import async_to_sync
def SimpleShipping(data, **kwargs):
group_name = 'notifications'
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'notify_event',
'data': data,
# other: data,
}
)
declare the method in the consumer (add consumer to the notification channel)
import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
class TournamentLobbyConsumer(WebsocketConsumer):
room_group_name = 'notifications'
def connect(self):
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
# ...
pass
# Receive message from room group (notifications)
def notify_event(self, event):
data = event['data']
# Send message to WebSocket
self.send(text_data=json.dumps({
'data': data,
}))
now you must choose the method for background tasks (Celery is recommended http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html)
(look at this question Django Celery Periodic Task at specific time)
from projectname.appname.modulename import SimpleShipping
#shared_task()
def daily_reports():
# Run the query in the database.
# ...
data = { 'results': 'results' }
# notify consumers (results)
SimpleShipping(data)
note: I hope it helps, since the task you want to implement is quite extensive, and you should not take it lightly, although this summary will allow you to see the flow of data

send message from server to client via socketio

I am posting data to the server and process the data on the server. Since the page reload takes a while, I would like to send status updates to the client and display them in a div. Things like "first file being processed...", "first file done", "second file being processed"..., and so on.
My first attempt:
#app.route('/', methods=["GET", 'POST'])
def main():
if request.method == 'POST':
socketio.emit("message", {'data': 42})
_urls = request.form['url_area'].split("\n")
lang = request.form['lang']
image_list = get_images(_urls, lang)
return render_template('index.html', images=image_list)
return render_template('index.html')
<script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/socket.io/1.3.6/socket.io.min.js"></script>
<script type="text/javascript" charset="utf-8">
var socket = io.connect('http://' + document.domain + ':' + location.port);
socket.on('connect', function(msg) {
console.log("connected");
});
socket.on('message', function(msg) {
console.log(msg);
});
</script>
The client connects correctly. However, no message ever gets from the server to the client.
How do I need to do this?
I've looked at some examples but cannot figure out what I am doing differently. Many examples just show how to send data that the client sent, but I don't want that (right now). For now, I'd just like to send messages from the server.
EDIT
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
if __name__ == '__main__':
socketio.run(app)

Heroku worker does not work

I am following Heroku's documentation about workers but never succeeded. There is a module util.py which will be run as a background job:
util.py:
import requests
def count_words_at_url(url):
resp = requests.get(url)
return len(resp.text.split())
And to put utils.py to the worker's queue:
from utils import count_words_at_url
from rq import Queue
from worker import conn
q = Queue(connection=conn)
result = q.enqueue(count_words_at_url, 'http://heroku.com')
Question1: I would like to know how q.enqueue(...) works. I suppose that the first argument (count_words_at_url is the function name and the second argument ('http://heroku.com') will be used as the count_words_at_url function's argument.
If I am correct, why is resp = requests.get(url) necessary to get that argument? Why not just return len(url.split())?
EDIT:
Question 2: In order to be able to upload large files (Heroku always terminate such a request due to TIMEOUT), I would like to pass an HTTP request to a function which will be run as a background job:
urls.py:
urlpatterns = [
url(r'upload/', views.upload),
]
views.py
def upload(request):
if request.method=='GET':
return render(request, 'upload/upload.html')
# POST
q = Queue(connection=conn)
q.enqueue(uploadFile, request)
return render(request, 'upload/upload.html')
def uploadFile(request):
# Upload files to Google Storage
fileToUpload = request.FILES.get('fileToUpload')
cloudFilename = 'uploads/' + fileToUpload.name
conn = boto.connect_gs(gs_access_key_id=GS_ACCESS_KEY,
gs_secret_access_key=GS_SECRET_KEY)
bucket = conn.get_bucket(GS_BUCKET_NAME)
fpic = boto.gs.key.Key(bucket)
fpic.key = cloudFilename
# Public read:
fpic.set_contents_from_file(fileToUpload, policy='public-read')
upload.html:
...
<form method=post action="/upload/" enctype=multipart/form-data>
{% csrf_token %}
<input type=file name=fileToUpload><br><br>
<input type=submit value=Upload>
</form>
...
I got the error message: Cannot serialize socket object
Turn out that I cannot pass a socket object to a forked process. Is there any other solution for large file uploading? Thanks.
The clue to 1 is in the name of the function. It doesn't say "count words in URL", it says "count words at URL". In other words, it needs to go and get the page that is referenced by the URL, then count the words on it. That's what requests.get() does.
I don't understand what you're asking in point 2.

Flask / Jinja memoization

I have been trying to use and Flask-Cache's memoize feature to only return cached results of statusTS(), unless a certain condition is met in another request where the cache is then deleted.
It is not being deleted though, and the Jinja template still displays Online when it should infact display Offline because the server has been stopped. So it is returning a cached result when it should not.
#cache.memoize(60)
def statusTS(sid):
try:
server = Server.get_info(sid)
m = Masters.get_info(server.mid)
if not m.maintenance:
tsconn = ts3conn(server.mid)
tsconn.use(str(server.msid))
command = tsconn.send_command('serverinfo')
tsconn.disconnect()
if not command.data[0]['virtualserver_status'] == 'template':
return 'Online'
return 'Unknown'
except:
return 'Unknown'
app.jinja_env.globals.update(statusTS=statusTS)
Jinja template:
{% if statusTS(server.sid) == 'Online' %}
<span class="label label-success">
Online
</span>{% endif %}
This renders the view:
#app.route('/manage/')
def manage():
if g.user:
rl = requests_list(g.user.id)
admin = User.is_admin(g.user.id)
g.servers = get_servers_by_uid(g.user.id)
if 's' in request.args:
s = request.args.get('s')
s = literal_eval(s)
else:
s = None
return render_template('manage.html',
user=g.user,
servers=g.servers,
admin=admin,
globallimit=Config.get_opts('globallimit'),
news=News.get_latest(),
form=Form(),
masters=Masters.get_all(),
treply=rl,
s=s)
else:
return redirect(url_for('login'))
this is what is supposed to delete the entry.
#app.route('/stop/<id>/')
#limiter.limit("3/minute")
def stop(id):
if g.user:
if Server.is_owner(g.user.id, id):
m = Masters.get_info(Server.get_info(id).mid)
if not m.maintenance:
cache.delete_memoized(statusTS, id)
flash(stopTS(id))
return redirect(url_for('manage'))
else:
flash(
'You cannot stop this server while the master is locked for maintenance - please check for further info.')
return redirect(url_for('manage'))
else:
flash(
'You do not have permission to modify this server - please contact support.')
return redirect(url_for('manage'))
else:
return redirect(url_for('login'))
Your code looks correct. It could be a bug in the whichever backend cache you are using.
Toy example that works:
from flask import Flask
# noinspection PyUnresolvedReferences
from flask.ext.cache import Cache
app = Flask(__name__)
# Check Configuring Flask-Cache section for more details
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
#cache.memoize(timeout=10)
def statusTS(sid):
print('Cache Miss: {}.'.format(sid))
return sid
#app.route('/<int:status_id>')
def status(status_id):
return 'Status: ' + str(statusTS(status_id))
#app.route('/delete/<int:status_id>')
def delete(status_id):
print('Deleting Cache')
cache.delete_memoized(statusTS, status_id)
return 'Cache Deleted'
if __name__ == '__main__':
app.debug = True
app.run()
What I think is happening is that the delete is failing. I would step into the
cache.delete_memoized(statusTS, status_id) and see if it actually finds and deletes the cached function result. Check werkzeug.contrib.cache.SimpleCache
if you are using the cache type simple.