Delay in updating live status from text file in django - django

I want to update some live status through text file on django html template.
Issue is sometimes delay comes very late, not sure if this is due to linux-server dependencies or something else.
Also, what is recommended location to store live logs for reading purpose, currently I am using static directory in django server.
function update() {
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function () {
document.getElementById("live_status").innerHTML = this.readyState;
if (this.readyState == 4 && this.status == 200) {
var myObj = JSON.parse(this.responseText);
x = "{{ details.name }}" + "</br>";
for ( i in myObj.users ){
for ( j in myObj.users[i].live_status ){
x += myObj.users[i].live_status[j] + "</br>";
}
}
document.getElementById("live_status").innerHTML = x;
//document.getElementById("live_status").innerHTML = "Test";
}
};
xmlhttp.open("GET", "{% static 'live_status.txt' %}", true);
xmlhttp.send();
}
update();
setInterval(update, 10000);

Related

Postman / Newman retry in case of failure

In Newman I want to test to ensure that the response code is correct, response time is reasonable and response values are correct.
In some cases, due to network hiccups or other system conditions, some requests might end up with timeouts or incorrect values that will resolve if the same request was processed a few seconds later.
in such cases, I would like to retry the exact request x times with a Y timeout between requests.
If an iteration pass after a retry, I would like the Newman exit code to be 0 (successful run).
After few hours I had ended up with a function like this:
function retryOnFailure(successCode, numberOfRetrys) {
var key = request.name + '_counter';
var execCounter = postman.getEnvironmentVariable(key) || 1;
var sleepDuration = 1000;
var waitUntilTime = new Date().getTime() + sleepDuration;
if (responseCode.code !== successCode && execCounter <= numberOfRetrys) {
while (new Date().getTime() < waitUntilTime) {
// Do Nothing -> Wait
}
console.log('Retrying: ' + request.name + '\nGot: ' + responseCode.code + ' Expected: ' + successCode + '\nWaited: ' + sleepDuration / 1000 + 'sec \nRetry Number: ' + execCounter + ' of ' + numberOfRetrys);
execCounter++;
postman.setEnvironmentVariable(key, execCounter);
postman.setNextRequest(request.name);
}
}
Usage:
retryOnFailure(404, 4);
You can setup a request workflow like this:
Create a collection with a request, then:
In the pre-request tab you can implement a counter:
// Counter for number of requests
var counter = environment.counter ? _.parseInt(environment.counter) + 1 : 1;
postman.setEnvironmentVariable("counter", counter);
Your tests tab would look like this:
const code = (responseCode.code === 200);
if (code === 200 && environment.counter < X) {
// Stop execution
tests["Status code is 200"] = code;
postman.setNextRequest();
}
else {
// retry the same request
postman.setNextRequest("Name of this request");
}
A timeout for the request itself can be configured with the newman CLI:
newman run myCollection.json --timeout-request Y
Here is the reusable function for the same
postmanFunctions.common.retryOnFailure(predicate,retryCount,waitbetweenRetrySec,ReroutetorequestifNeeded ,postmanAssertions);
predicate function decides success or failure
assertion function has all postman assertion
if reroute is blank then after retry attempts assertions gets executed.
Flexible polling with retrycount and waittime(if predicate passed no more
polling/reflow)
There is a maxflow counter(env var) which limits the number of flow jumps to
avoid infinite loop
Store the below function in Globals or env:
() => {
var sleep = (sleepDuration) => {
var startTime = new Date().getTime();
while (new Date().getTime() - startTime < sleepDuration) {}
}
var sleepByAsyncDelayTime = () => {
var sleepDuration = postman.getEnvironmentVariable('asyncDelayTime') || 0;
sleep(sleepDuration);
}
var retryOnFailure = (predicate, numberOfRetrys, sleepDuration, reRouteRequestName, postmanAssertions) => {
var retryCountPerReq_key = request.name + '_retry_count';
var retryCountPerReq = pm.environment.get(retryCountPerReq_key) || 0;
var reflowCountPerReq_key = request.name + '_reflow_count';
var reflowCountPerReq = pm.environment.get(reflowCountPerReq_key) || 0;
var totalReflowCount_key = 'totalReflowCount';
var totalReflowCount = pm.environment.get(totalReflowCount_key) || 0;
var maxReflowCounter = postman.getEnvironmentVariable('maxReflowCounter') || 0;
var maxReflowCounterPerReq = postman.getEnvironmentVariable('maxReflowCounterPerReq') || 0;
function clearAndExit() {
pm.environment.unset(retryCountPerReq_key);
pm.environment.unset(reflowCountPerReq_key);
postmanAssertions();
}
function retry() {
sleep(sleepDuration);
pm.environment.set(retryCountPerReq_key, ++retryCountPerReq);
postman.setNextRequest(request.name);
}
function reFlow() {
if (totalReflowCount < maxReflowCounter && reflowCountPerReq < maxReflowCounterPerReq) {
pm.environment.unset(retryCountPerReq_key);
pm.environment.set(totalReflowCount_key, ++totalReflowCount);
pm.environment.set(reflowCountPerReq_key, ++reflowCountPerReq);
postman.setNextRequest(reRouteRequestName);
} else clearAndExit();
}
if (predicate()) clearAndExit();
else if (retryCountPerReq < numberOfRetrys) retry();
else if (reRouteRequestName != '') reFlow();
else clearAndExit();
}
return {
common: {
sleepByAsyncDelayTime,
sleep,
retryOnFailure
}
};
}
Here is my retry function that I define in collection pre-request script. It only works when tests are executed via collection :
Utils = {
wait: function (that, sleepDuration){
that.setTimeout(() => {}, sleepDuration);
},
withRetry: function(that, expectedHttpStatus, maxNumberOfTries, sleepBetweenTries, businessRetryConditionCallBack, endRetryCallback){
if (!that.pm.environment.get("collection_tries")) {
that.pm.environment.set("collection_tries", 1);
}
if (((that.pm.response.code != expectedHttpStatus) || businessRetryConditionCallBack())
&& (that.pm.environment.get("collection_tries") <= maxNumberOfTries)) {
var tries = parseInt(that.pm.environment.get("collection_tries"), 10);
that.pm.environment.set("collection_tries", tries + 1);
Utils.wait(that, sleepBetweenTries, maxNumberOfTries);
that.postman.setNextRequest(that.request.name);
} else {
if(businessRetryConditionCallBack()){
// On ne passe pas à la requête suivante
that.postman.setNextRequest(null);
}
that.pm.environment.unset("collection_tries");
endRetryCallback();
}
}
};
And here is how to use it in request on pre-request or test scripts :
var expectedHttpStatus = 200;
var maxNumberOfTries = 5;
var sleepBetweenTries = 5000;
Utils.withRetry(this, expectedHttpStatus, maxNumberOfTries, sleepBetweenTries, function(){
// Retry business condition callback
return pm.response.json().length <= 0;
}, function(){
// End retry callback
pm.test("Has one result", function () {
pm.expect(pm.response.json().length).to.equals(0);
});
});
This code will retry request as long as (http statut is different from expectedHttpStatus or businessRetryConditionCallBack is true) AND maxNumberOfTries is not reached and .
When http statut condition is true and maxNumberOfTries is reached, a check is done to verify businessRetryConditionCallBack. If not true, collection execution is stopped.

How to embed Wufoo form in Ember application

My client has asked me to integrate a Wufoo form into their Ember application, and provided the following JS (anonymized):
<script type="text/javascript">var abc123;(function(d, t) {
var s = d.createElement(t), options = {
'userName':'example',
'formHash':'abc123',
'autoResize':true,
'height':'491',
'async':true,
'host':'wufoo.com',
'header':'show',
'ssl':true};
s.src = ('https:' == d.location.protocol ? 'https://' : 'http://') + 'www.wufoo.com/scripts/embed/form.js';
s.onload = s.onreadystatechange = function() {
var rs = this.readyState; if (rs) if (rs != 'complete') if (rs != 'loaded') return;
try { abc123 = new WufooForm();abc123.initialize(options);abc123.display(); } catch (e) {}};
var scr = d.getElementsByTagName(t)[0], par = scr.parentNode; par.insertBefore(s, scr);
})(document, 'script');</script>
I've tried including it in index.html and also creating a custom component, but keep getting an error from Wufoo:
TypeError: Cannot set property 'innerHTML' of null
Is there a way to use the provided Wufoo JS in an Ember.js app?

IE9: store.find is failing

I can't seem to fetch new data in Internet Explorer 9. For the purpose of an example I test the store this way:
App.__container__.lookup('store:main').find('style')
The only error I receive is the following:
SCRIPT5022: Error: Assertion Failed: [object Object]
Does Ember-data works out of the box (without polyfills, ...) in Internet Explorer 9?
versions:
Ember: 1.9.1
Ember-data: 1.0.0-beta.12
Problem solved. When doing an AJAX request with jQuery, this normally happens through the XMLHttpRequest object.
On IE8-9, this object is not present, instead it uses XDomainRequest. The simplest fix for this is adding: https://github.com/MoonScript/jQuery-ajaxTransport-XDomainRequest.
ember-data works out of the box with IE8+. According to this issue:
We've been supporting IE8 with our platform (built on Ember) for a
while now. Things I know:
shim/sham is not needed, it's polyfilled by Ember and Ember-Data.
You will need it if you want additional things like .bind() on a function, then you must prepend it to the vendor file (using Brocfile)
and we only include the shim for that purpose, not the sham
Solution Synthesis
Reason :
On IE8-9, this object is not present, instead it uses XDomainRequest.
Solution :
The issue is solved. When using an AJAX request with jQuery. Normally this is done through the XMLHttpRequest object. A simple fix would be using the Open-Source jQuery-ajaxTransport-XDomainRequest
Code : Adding :
jQuery-ajaxTransport-XDomainRequest.js
/*!
* jQuery-ajaxTransport-XDomainRequest - v1.0.4 - 2015-03-05
* https://github.com/MoonScript/jQuery-ajaxTransport-XDomainRequest
* Copyright (c) 2015 Jason Moon (#JSONMOON)
* Licensed MIT (/blob/master/LICENSE.txt)
*/
(function(factory) {
if (typeof define === 'function' && define.amd) {
// AMD. Register as anonymous module.
define(['jquery'], factory);
} else if (typeof exports === 'object') {
// CommonJS
module.exports = factory(require('jquery'));
} else {
// Browser globals.
factory(jQuery);
}
}(function($) {
// Only continue if we're on IE8/IE9 with jQuery 1.5+ (contains the ajaxTransport function)
if ($.support.cors || !$.ajaxTransport || !window.XDomainRequest) {
return $;
}
var httpRegEx = /^(https?:)?\/\//i;
var getOrPostRegEx = /^get|post$/i;
var sameSchemeRegEx = new RegExp('^(\/\/|' + location.protocol + ')', 'i');
// ajaxTransport exists in jQuery 1.5+
$.ajaxTransport('* text html xml json', function(options, userOptions, jqXHR) {
// Only continue if the request is: asynchronous, uses GET or POST method, has HTTP or HTTPS protocol, and has the same scheme as the calling page
if (!options.crossDomain || !options.async || !getOrPostRegEx.test(options.type) || !httpRegEx.test(options.url) || !sameSchemeRegEx.test(options.url)) {
return;
}
var xdr = null;
return {
send: function(headers, complete) {
var postData = '';
var userType = (userOptions.dataType || '').toLowerCase();
xdr = new XDomainRequest();
if (/^\d+$/.test(userOptions.timeout)) {
xdr.timeout = userOptions.timeout;
}
xdr.ontimeout = function() {
complete(500, 'timeout');
};
xdr.onload = function() {
var allResponseHeaders = 'Content-Length: ' + xdr.responseText.length + '\r\nContent-Type: ' + xdr.contentType;
var status = {
code: 200,
message: 'success'
};
var responses = {
text: xdr.responseText
};
try {
if (userType === 'html' || /text\/html/i.test(xdr.contentType)) {
responses.html = xdr.responseText;
} else if (userType === 'json' || (userType !== 'text' && /\/json/i.test(xdr.contentType))) {
try {
responses.json = $.parseJSON(xdr.responseText);
} catch(e) {
status.code = 500;
status.message = 'parseerror';
//throw 'Invalid JSON: ' + xdr.responseText;
}
} else if (userType === 'xml' || (userType !== 'text' && /\/xml/i.test(xdr.contentType))) {
var doc = new ActiveXObject('Microsoft.XMLDOM');
doc.async = false;
try {
doc.loadXML(xdr.responseText);
} catch(e) {
doc = undefined;
}
if (!doc || !doc.documentElement || doc.getElementsByTagName('parsererror').length) {
status.code = 500;
status.message = 'parseerror';
throw 'Invalid XML: ' + xdr.responseText;
}
responses.xml = doc;
}
} catch(parseMessage) {
throw parseMessage;
} finally {
complete(status.code, status.message, responses, allResponseHeaders);
}
};
// set an empty handler for 'onprogress' so requests don't get aborted
xdr.onprogress = function(){};
xdr.onerror = function() {
complete(500, 'error', {
text: xdr.responseText
});
};
if (userOptions.data) {
postData = ($.type(userOptions.data) === 'string') ? userOptions.data : $.param(userOptions.data);
}
xdr.open(options.type, options.url);
xdr.send(postData);
},
abort: function() {
if (xdr) {
xdr.abort();
}
}
};
});
return $;
}));

How to set content-length-range for s3 browser upload via boto

The Issue
I'm trying to upload images directly to S3 from the browser and am getting stuck applying the content-length-range permission via boto's S3Connection.generate_url method.
There's plenty of information about signing POST forms, setting policies in general and even a heroku method for doing a similar submission. What I can't figure out for the life of me is how to add the "content-length-range" to the signed url.
With boto's generate_url method (example below), I can specify policy headers and have got it working for normal uploads. What I can't seem to add is a policy restriction on max file size.
Server Signing Code
## django request handler
from boto.s3.connection import S3Connection
from django.conf import settings
from django.http import HttpResponse
import mimetypes
import json
conn = S3Connection(settings.S3_ACCESS_KEY, settings.S3_SECRET_KEY)
object_name = request.GET['objectName']
content_type = mimetypes.guess_type(object_name)[0]
signed_url = conn.generate_url(
expires_in = 300,
method = "PUT",
bucket = settings.BUCKET_NAME,
key = object_name,
headers = {'Content-Type': content_type, 'x-amz-acl':'public-read'})
return HttpResponse(json.dumps({'signedUrl': signed_url}))
On the client, I'm using the ReactS3Uploader which is based on tadruj's s3upload.js script. It shouldn't be affecting anything as it seems to just pass along whatever the signedUrls covers, but copied below for simplicity.
ReactS3Uploader JS Code (simplified)
uploadFile: function() {
new S3Upload({
fileElement: this.getDOMNode(),
signingUrl: /api/get_signing_url/,
onProgress: this.props.onProgress,
onFinishS3Put: this.props.onFinish,
onError: this.props.onError
});
},
render: function() {
return this.transferPropsTo(
React.DOM.input({type: 'file', onChange: this.uploadFile})
);
}
S3upload.js
S3Upload.prototype.signingUrl = '/sign-s3';
S3Upload.prototype.fileElement = null;
S3Upload.prototype.onFinishS3Put = function(signResult) {
return console.log('base.onFinishS3Put()', signResult.publicUrl);
};
S3Upload.prototype.onProgress = function(percent, status) {
return console.log('base.onProgress()', percent, status);
};
S3Upload.prototype.onError = function(status) {
return console.log('base.onError()', status);
};
function S3Upload(options) {
if (options == null) {
options = {};
}
for (option in options) {
if (options.hasOwnProperty(option)) {
this[option] = options[option];
}
}
this.handleFileSelect(this.fileElement);
}
S3Upload.prototype.handleFileSelect = function(fileElement) {
this.onProgress(0, 'Upload started.');
var files = fileElement.files;
var result = [];
for (var i=0; i < files.length; i++) {
var f = files[i];
result.push(this.uploadFile(f));
}
return result;
};
S3Upload.prototype.createCORSRequest = function(method, url) {
var xhr = new XMLHttpRequest();
if (xhr.withCredentials != null) {
xhr.open(method, url, true);
}
else if (typeof XDomainRequest !== "undefined") {
xhr = new XDomainRequest();
xhr.open(method, url);
}
else {
xhr = null;
}
return xhr;
};
S3Upload.prototype.executeOnSignedUrl = function(file, callback) {
var xhr = new XMLHttpRequest();
xhr.open('GET', this.signingUrl + '&objectName=' + file.name, true);
xhr.overrideMimeType && xhr.overrideMimeType('text/plain; charset=x-user-defined');
xhr.onreadystatechange = function() {
if (xhr.readyState === 4 && xhr.status === 200) {
var result;
try {
result = JSON.parse(xhr.responseText);
} catch (error) {
this.onError('Invalid signing server response JSON: ' + xhr.responseText);
return false;
}
return callback(result);
} else if (xhr.readyState === 4 && xhr.status !== 200) {
return this.onError('Could not contact request signing server. Status = ' + xhr.status);
}
}.bind(this);
return xhr.send();
};
S3Upload.prototype.uploadToS3 = function(file, signResult) {
var xhr = this.createCORSRequest('PUT', signResult.signedUrl);
if (!xhr) {
this.onError('CORS not supported');
} else {
xhr.onload = function() {
if (xhr.status === 200) {
this.onProgress(100, 'Upload completed.');
return this.onFinishS3Put(signResult);
} else {
return this.onError('Upload error: ' + xhr.status);
}
}.bind(this);
xhr.onerror = function() {
return this.onError('XHR error.');
}.bind(this);
xhr.upload.onprogress = function(e) {
var percentLoaded;
if (e.lengthComputable) {
percentLoaded = Math.round((e.loaded / e.total) * 100);
return this.onProgress(percentLoaded, percentLoaded === 100 ? 'Finalizing.' : 'Uploading.');
}
}.bind(this);
}
xhr.setRequestHeader('Content-Type', file.type);
xhr.setRequestHeader('x-amz-acl', 'public-read');
return xhr.send(file);
};
S3Upload.prototype.uploadFile = function(file) {
return this.executeOnSignedUrl(file, function(signResult) {
return this.uploadToS3(file, signResult);
}.bind(this));
};
module.exports = S3Upload;
Any help would be greatly appreciated here as I've been banging my head against the wall for quite a few hours now.
You can't add it to a signed PUT URL. This only works with the signed policy that goes along with a POST because the two mechanisms are very different.
Signing a URL is a lossy (for lack of a better term) process. You generate the string to sign, then sign it. You send the signature with the request, but you discard and do not send the string to sign. S3 then reconstructs what the string to sign should have been, for the request it receives, and generates the signature you should have sent with that request. There's only one correct answer, and S3 doesn't know what string you actually signed. The signature matches, or doesn't, either because you built the string to sign incorrectly, or your credentials don't match, and it doesn't know which of these possibilities is the case. It only knows, based on the request you sent, the string you should have signed and what the signature should have been.
With that in mind, for content-length-range to work with a signed URL, the client would need to actually send such a header with the request... which doesn't make a lot of sense.
Conversely, with POST uploads, there is more information communicated to S3. It's not only going on whether your signature is valid, it also has your policy document... so it's possible to include directives -- policies -- with the request. They are protected from alteration by the signature, but they aren't encrypted or hashed -- the entire policy is readable by S3 (so, by contrast, we'll call this the opposite, "lossless.")
This difference is why you can't do what you are trying to do with PUT while you can with POST.

Upload an image to Drupal 7 / Services 3 from Titanium?

This must be close but I can't figure out what's causing the error.
In my Titanium app, I have a Webview with a canvas element and this code:
function getImageData() {
return canvas.toDataURL('image/png');
}
I am moving that data to the Titanium app like this:
var imageBase64data = webview.evalJS('getImageData()')
The data looks good starting with "data:image/png;base64,"...
Then in Titanium, I have a logged-in drupal session and call this function:
function uploadImage(imageBase64data, callback) {
var url = REST_PATH + "file.json";
var file = {
filename: utils.createRandomString() + ".png",
file: imageBase64data
// uid: Titanium.App.Properties.getString("userUid"),
// filesize: ""+Titanium.Utils.base64decode(imageBase64data).length,
};
var xhr = Titanium.Network.createHTTPClient({timeout: 30000});
xhr.setRequestHeader("Content-Type", "application/json; charset=utf-8");
var authString = Titanium.App.Properties.getString("userSessionName")+'='+Titanium.App.Properties.getString("userSessionId");
xhr.setRequestHeader("Cookie", authString);
xhr.onload = function() {
if(xhr.status == 200) {
var response = xhr.responseText;
callback(response);
}
};
xhr.onerror = function(e) {
alert("There was an error: " + e.error);
Ti.API.info(JSON.stringify(this));
};
xhr.open("POST", url);
xhr.send(file);
}
xhr.onerror is being called with e.error = "undefined"
The trace looks like this:
{
"responseData":{},
"readyState":4,
"connected":true,"UNSENT":0,"DONE":4,"HEADERS_RECEIVED":2,"OPENED":1,"LOADING":3,
"responseText":null,"status":406
}
I think authentication is working because I was previously getting a "need authentication" error until I added the Cookie header.
That was with the installation provided by Drupanium. I just did a fresh Drupal and fresh Services 3 install and my file is uploading nicely.