Python restkit module connection returns error - python-2.7

I'm attempting to use a script I found online, (https://confluence.atlassian.com/display/DOCSPRINT/The+Simplest+Possible+JIRA+REST+Examples) probably 5+ years old, to access Jira REST API. I have all the modules installed in Pyhton 2.7. However, when run the script I get an error that SimplePool cannot be imported. Having done my Goodiligence (Google searching), I see that SimplePool is deprecated in restkit 4.2.2 (which is the version I have installed). So, the doc that I found (http://pydoc.net/Python/restkit/2.3.0/restkit.pool.simple/) says to use TConnectionManager (which I did with no success). I still get a similar error. So, I stumbled upon another doc (http://benoitc.github.io/restkit/pool.html) and it says to use ConnectionPool, and I still get a similar error. I appreciate any direction given. Here is my code:
import simplejson as json
from restkit import * #added this line after finding the last document
from restkit import Resource, BasicAuth, request
from socketpool import ConnectionPool
SimplePool = ConnectionPool
def rest_test(server_base_url, user, password, issue_key):
'''
Use restkit to make a REST request to JIRA
'''
verbose = False
# A pool of connections
pool = SimplePool(factory=Connection)
# This sends the user and password with the request.
auth = BasicAuth(user, password)
resource_name = "issue"
complete_url = "%s/rest/api/latest/%s/%s" % (server_base_url, resource_name, issue_key)
resource = Resource(complete_url, pool_instance=pool, filters=[auth])
try:
response = resource.get(headers = {'Content-Type' : 'application/json'})
except Exception,ex:
# ex.msg is a string that looks like a dictionary
print "EXCEPTION: %s " % ex.msg
return
# Most successful responses have an HTTP 200 status
if response.status_int != 200:
print "ERROR: status %s" % response.status_int
return
# Convert the text in the reply into a Python dictionary
issue = json.loads(response.body_string())
# Pretty-print the JSON
if verbose:
print json.dumps(issue, sort_keys=True, indent=4)
# The properties of the issue include:
# self, html, key, transitions, expand, fields
print "Issue key: %s" % issue['key']
fields = issue['fields']
for field_name in fields:
field_object = fields[field_name]
print "Field %s = %s" % (field_name, field_object['type'])
# The type of the value of a field depends on the type of the field
if field_name in ["summary"]:
print " Value = %s" % field_object['value']
if __name__ == '__main__':
user = 'myuname'
password = '*****'
server_url = 'http://jira.mysite.com/'
issue_key = 'JRA-219'
rest_test(server_url, user, password, issue_key)
Here is the error returned:
File "simplest_client.py", line 30, in rest_test
pool = SimplePool()
TypeError: __init__() takes at least 2 arguments (1 given)
EXCEPTION: <?xml version="1.0" encoding="UTF-8" standalone="yes"?><status><status-code>404</status-code><message>null for uri: http://jira.mysite.com//rest/api/latest/issue/JRA-219</message></status>

Related

How to return Flask-SqlAlchemy error details

I'm using Flask 1.0, Flask-SqlAlchemy 2 and Angular 7.
When SqlAlchemy throws an error I want to show a tailored error message in the frontend.
There is a section in the official Flask documentation about how to handle errors and also a similar question here on SO, that is related to Flask-Restless though. And yet I'm not able to connect the dots.
When SqlAlchemy throws an error, it looks something like this:
DETAIL: Key (id)=(123) is not present in table "foo".
I'm returning the error to the route:
try:
db.session.commit()
except Exception as error:
db.session.flush()
db.session.rollback()
return error
In the route I'm checking if it is an error:
if status == True:
return jsonify( { "success": True } ), 201
else:
return error_response(500, str(status))
And my error_response class looks like this:
def error_response(status_code, message=None):
payload = {"error": HTTP_STATUS_CODES.get(status_code, "Unknown error")}
if message:
payload["message"] = message
response = jsonify(payload)
response.status_code = status_code
return response
But the response json just contains a generic error message:
"message": "Http failure response for http://127.0.0.1:5000/database/add_foo: 0 Unknown Error"
You can read the error object and create your own custom message out of it. for all details try printing error.\__dict__ in console.
for example:
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import Integer, String
db_uri = 'sqlite:///'
engine = create_engine(db_uri)
conn = engine.connect()
# Create a metadata instance
meta = MetaData(engine)
table = Table('user', meta,
Column('id', Integer, primary_key=True),
Column('l_name', String),
Column('f_name', String))
meta.create_all()
# Insert Data
conn.execute(table.insert(),[
{'id':1,'l_name':'Hi','f_name':'bob'},
{'id':2,'l_name':'Hello','f_name':'john'},
{'id':3,'l_name':'yo','f_name':'bob-john'}])
result =conn.execute("SELECT * FROM user")
for res in result:
print(res)
# Intensionally violating unique constraint
try:
ins = table.insert().values(
id=3,
l_name='Hello',
f_name='World')
# conn = engine.connect()
conn.execute(ins)
except Exception as error:
print(str(error.orig) + " for parameters" + str(error.params))
output will be :-
Turns out the error was returned when I tried to do a bulk save:
db.session.bulk_save_objects(companies_to_add, return_defaults = True)
I was under the impression that an error would just be raised when performing either
db.session.commit()
or
db.session.flush()
Apparantly I was wrong. I now put the bulk save in a try block:
try:
db.session.bulk_save_objects(companies_to_add, return_defaults = True)
except Exception as error:
db.session.rollback()
return error
Now I'm able to catch the error in the frontend.

Quickfix with several Python strategies

I am having difficulties to trade several trading strategies written in Python.
I have established FIX connection via Quickfix but I only can send orders if the script of the strategy is inside the Quickfix connection script. Since I have several strategies I really have no idea how to send the order from a separate script. Can someone give me some solution?
import sys
import datetime
import time
import quickfix as fix
class Application(fix.Application):
orderID = 0
execID = 0
def gen_ord_id(self):
global orderID
orderID+=1
return orderID
def onCreate(self, sessionID):
return
def onLogon(self, sessionID):
self.sessionID = sessionID
print ("Successful Logon to session '%s'." % sessionID.toString())
return
def onLogout(self, sessionID):
return
def toAdmin(self, message, sessionID):
username = fix.Username("username")
mypass = fix.Password("password")
mycompid = fix.TargetSubID("targetsubid")
message.setField(username)
message.setField(mypass)
message.setField(mycompid)
def fromAdmin(self, message, sessionID):
TradeID = fix.TradingSessionID
message.getField(TradeID)
return
def toApp(self, sessionID, message):
print "Sent the following message: %s" % message.toString()
return
def fromApp(self, message, sessionID):
print "Received the following message: %s" % message.toString()
return
def genOrderID(self):
self.orderID = self.orderID + 1
return `self.orderID`
def genExecID(self):
self.execID = self.execID + 1
return `self.execID`
def put_order(self, sessionID, myinstrument, myquantity):
self.myinstrument = myinstrument
self.myquantity = myquantity
print("Creating the following order: ")
today = datetime.datetime.now()
nextID = today.strftime("%m%d%Y%H%M%S%f")
trade = fix.Message()
trade.getHeader().setField(fix.StringField(8, "FIX.4.4"))
trade.getHeader().setField(fix.MsgType(fix.MsgType_NewOrderSingle))
trade.setField(fix.ClOrdID(nextID)) #11=Unique order
trade.getHeader().setField(fix.Account("account"))
trade.getHeader().setField(fix.TargetSubID("targetsubid"))
trade.setField(fix.Symbol(myinstrument)) #55=SMBL ?
trade.setField(fix.TransactTime())
trade.setField(fix.CharField(54, fix.Side_BUY))
trade.setField(fix.OrdType(fix.OrdType_MARKET)) # 40=2 Limit order
trade.setField(fix.OrderQty(myquantity)) # 38=100
print trade.toString()
fix.Session.sendToTarget(trade, self.sessionID)
try:
file = sys.argv[1]
settings = fix.SessionSettings(file)
application = Application()
storeFactory = fix.FileStoreFactory(settings)
logFactory = fix.ScreenLogFactory(settings)
initiator = fix.SocketInitiator(application, storeFactory, settings, logFactory)
initiator.start()
while 1:
time.sleep(1)
if input == '1':
print "Putin Order"
application.put_order(fix.Application)
if input == '2':
sys.exit(0)
if input == 'd':
import pdb
pdb.set_trace()
else:
print "Valid input is 1 for order, 2 for exit"
except (fix.ConfigError, fix.RuntimeError) as e:
print e
This is my initator app. My question is can I update the following values from another python script:
trade.setField(fix.Symbol(myinstrument))
trade.setField(fix.OrderQty(myquantity))
So I want to change myinstrument and myquantity from another python script and force the initiator to execute the following command application.put_order(fix.Application) with the new values. My question is is this possible at all?
Sounds like you need an internal messaging layer that QuickFIX subscribes to, and that your separate Python scripts publish orders to. It's about workflow design. Try something like VertX as that can be setup using Python.

fetching various data corresponding to a tweet

I am trying to fetch data from twitter for processing. Please see the code I want various data corresponding to a particular tweet corresponding to a given topic. I am able to fetch data (created_at, text, username, user_id). It shows error when i try to fetch(location, followers_count, friends_count, retweet_count).
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
import json
ckey = '***********************'
csecret = '************************'
atoken ='*************************'
asecret = '**********************'
class listener(StreamListener):
def on_data(self,data):
try:
all_data = json.loads(data)
tweet = all_data["text"]
username = all_data["user"]["screen_name"]
timestamp = all_data["created_at"]
user_id = all_data["id_str"]
location = all_data["location"]
followers_count = all_data["followers_count"]
friends_count = all_data["friends_count"]
retweet_count = all_data["retweet_count"]
saveThis = str(time.time())+'::'+timestamp+'::'+username+'::'+user_id+'::'+tweet+'::'+followers_count+'::'+friends_count+'::'+retweet_count+'::'+location
saveFile = open('clean2.txt','a')
saveFile.write(saveThis)
saveFile.write('\n')
saveFile.close
return True
except BaseException, e:
print 'failed on data,',str(e)
time.sleep(5)
def on_error(self, status):
print status
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["tweepy"])#topic
The reason it fails on all_data["location"] is that tweets don't have such a property: https://dev.twitter.com/overview/api/tweets
same with friends_count, followers_count - they are properties of users, not tweets.
The code should not be failing on all_date["retweet_count"] as tweets have such a property.
P.S. please include the error message (even if you skip the full error trackback) when reporting errors. makes it's easier to help you, otherwise one has to guess what the error might be.

python web py automated testing

I am having an issue with automated testing in web py framework.
I am going through the last exercise of learn python the hard way. In this exercise we make a web application "engine" that runs a map of rooms.
I want to be able to automate test every single room, but there is one problem, is that the engine depends on the previous room to decide which room to go to next (and user input).
if web.config.get("_session") is None:
store = web.session.DiskStore("sessions")
session = web.session.Session(app, store, initializer={"room":None})
web.config._session = session
else:
session = web.config._session
This class handles GET request sent to /
class Index(object):
def GET(self):
session.room = map.START
web.seeother("/game")
This class handles GET and POST requests to /game
class GameEngine(object):
def GET(self):
if session.room:
return render.show_room(room=session.room)
else:
return render.you_died()
def POST(self):
form = web.input(action=None)
if session.room and form.action:
session.room = session.room.go(form.action)
web.seeother("/game")
In my automated testing I use two things: first I use the app.request API:
app.request(localpart='/', method='GET',data=None,
host='0.0.0.0:8080', headers=None, https=False)
create a response object, something like:
resp = app.request("/game", method = "GET")
Second I pass the resp object to this function to check for certain things:
from nose.tools import *
import re
def assert_response(resp, contains=None, matches=None, headers=None,
status="200"):
assert status in resp.status, "Expected response %r not in %r" %
(status, resp.status)
if status == "200":
assert resp.data, "Response data is empty"
if contains:
assert contains in resp.data, "Response does not contain %r" %
contains
if matches:
reg = re.compile(matches)
assert reg.matces(resp.data), "Response does not match %r" %
matches
if headers:
assert_equal(resp.headers, headers)
We can pass variables as a dictionary to the keyword argument data in the API app.request to modify the web.input().
my question is: in my automated test module how do we "pass" a value that overwrite the room value in the initializer dictionary in our session:
session = web.session.Session(app, store, initializer={"room":None})
In the app module its done by setting
session.room = map.START
and then session.room updates using:
if session.room and form.action:
session.room = session.room.go(form.action)
Thanks for taking the time to read this, and any insights would be appreciated!
Alright I finally found it! The main issue here was that every time I make a http request through app.request it gives me a new session ID.
The trick that I found thanks to this post:
How to initialize session data in automated test? (python 2.7, webpy, nosetests)
is to record the session ID of the request to reuse that ID in my automated tests by passing it to the headers keyword argument in the request!
record the session ID using this function (which I placed as suggested in the post in tests/tools.py):
def get_session_id(resp):
cookies_str = resp.headers['Set-Cookie']
if cookies_str:
for kv in cookies_str.split(';'):
if 'webpy_session_id=' in kv:
return kv
then in the automated tests something like:
def test_session():
resp = app.request('/')
session_id = get_session_id(resp)
resp1 = app.request('/game', headers={'Cookie':session_id})
assert_response(resp1, status='200', contains='Central Corridor')
I hope that helps in the future for programmers who get stuck on the same issue!

Diagnosing proxy issue with python

So I am trying to work with python 2.7 to do various things that require pulling data from the internet. I have not been very successful, and I am looking for help to diagnose what I am doing wrong.
Firstly I managed to get pip to work by by defining the proxy like so, pip install --proxy=http://username:password#someproxy.com:8080 numpy. Hence python must be capable of getting through it!
However when it came to actually writing a .py script that could do the same I have had no success. I tried using the following code with urllib2 first:
import urllib2
uri = "http://www.python.org"
http_proxy_server = "someproxyserver.com"
http_proxy_port = "8080"
http_proxy_realm = http_proxy_server
http_proxy_user = "username"
http_proxy_passwd = "password"
# Next line = "http://username:password#someproxyserver.com:8080"
http_proxy_full_auth_string = "http://%s:%s#%s:%s" % (http_proxy_user,
http_proxy_passwd,
http_proxy_server,
http_proxy_port)
def open_url_no_proxy():
urllib2.urlopen(uri)
print "Apparent success without proxy server!"
def open_url_installed_opener():
proxy_handler = urllib2.ProxyHandler({"http": http_proxy_full_auth_string})
opener = urllib2.build_opener(proxy_handler)
urllib2.install_opener(opener)
urllib2.urlopen(uri)
print "Apparent success through proxy server!"
if __name__ == "__main__":
open_url_no_proxy()
open_url_installed_opener()
However I just get this error:
URLError: <urlopen error [Errno 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond>
Then I tried urllib3 as this is the module used by pip to handle proxies:
from urllib3 import ProxyManager, make_headers
# Establish the Authentication Settings
default_headers = make_headers(basic_auth='username:password')
http = ProxyManager("https://www.proxy.com:8080/", headers=default_headers)
# Now you can use `http` as you would a normal PoolManager
r = http.request('GET', 'https://www.python.org/')
# Check data is from destination
print(r.data)
I got this error:
raise MaxRetryError(_pool, url, error or ResponseError(cause)) MaxRetryError: HTTPSConnectionPool(host='www.python.org', port=443): Max retries exceeded with url: / (Caused by ProxyError('Cannot connect to proxy.', error('Tunnel connection failed: 407 Proxy Authorization Required',)))
I would really appreciate any help diagnosing this issue.
The solution to my problem was to use the requests module, see the below thread:
Proxies with Python 'Requests' module
mtt2p list this code which worked for me.
import requests
import time
class BaseCheck():
def __init__(self, url):
self.http_proxy = "http://user:pw#proxy:8080"
self.https_proxy = "http://user:pw#proxy:8080"
self.ftp_proxy = "http://user:pw#proxy:8080"
self.proxyDict = {
"http" : self.http_proxy,
"https" : self.https_proxy,
"ftp" : self.ftp_proxy
}
self.url = url
def makearr(tsteps):
global stemps
global steps
stemps = {}
for step in tsteps:
stemps[step] = { 'start': 0, 'end': 0 }
steps = tsteps
makearr(['init','check'])
def starttime(typ = ""):
for stemp in stemps:
if typ == "":
stemps[stemp]['start'] = time.time()
else:
stemps[stemp][typ] = time.time()
starttime()
def __str__(self):
return str(self.url)
def getrequests(self):
g=requests.get(self.url,proxies=self.proxyDict)
print g.status_code
print g.content
print self.url
stemps['init']['end'] = time.time()
#print stemps['init']['end'] - stemps['init']['start']
x= stemps['init']['end'] - stemps['init']['start']
print x
test=BaseCheck(url='http://google.com')
test.getrequests()