Http 403 error bottlenose python (Amazon API) - python-2.7

I am trying to access data using Amazon's API. I created product advertisement and aws account with same email and password, got the aws_secret, aws_key and affiliate_associate_key.
Then, I tried the following:
import bottlenose
aws_key = 'my_aws_key'
aws_secret = 'my_aws_secret'
aws_associate_tag = 'my_aws_associate_tag'
amazon = bottlenose.Amazon(aws_key, aws_secret, aws_associate_tag)
product = amazon.lookup(ItemId='B00EOE0WKQ')
And, I get the HTTP 403 error as follows (I added some print statement in my urllib2.py file):
message from urllib2.py Forbidden
headers from urllib2.py Date: Sun, 21 Aug 2016 05:44:59 GMT
Server: Apache-Coyote/1.1
Vary: Accept-Encoding,User-Agent
Content-Encoding: gzip
Content-Length: 304
Keep-Alive: timeout=2, max=15
Connection: Keep-Alive
code from urllib2.py 403
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
<ipython-input-1-973306797553> in <module>()
5
6 amazon = bottlenose.Amazon(aws_key, aws_secret, aws_associate_tag)
----> 7 product = amazon.lookup(ItemId='B00EOE0WKQ')
8
/Users/chaitra/anaconda/lib/python2.7/site-packages/bottlenose/api.pyc in __call__(self, **kwargs)
263 # make the actual API call
264 response = self._call_api(api_url,
--> 265 {'api_url': api_url, 'cache_url': cache_url})
266
267 # decompress the response if need be
/Users/chaitra/anaconda/lib/python2.7/site-packages/bottlenose/api.pyc in _call_api(self, api_url, err_env)
224 else:
225 # the simple way
--> 226 return urllib2.urlopen(api_request, timeout=self.Timeout)
227 except:
228 if not self.ErrorHandler:
/Users/chaitra/anaconda/lib/python2.7/urllib2.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
152 else:
153 opener = _opener
--> 154 return opener.open(url, data, timeout)
155
156 def install_opener(opener):
/Users/chaitra/anaconda/lib/python2.7/urllib2.py in open(self, fullurl, data, timeout)
438 for processor in self.process_response.get(protocol, []):
439 meth = getattr(processor, meth_name)
--> 440 response = meth(req, response)
441
442 return response
/Users/chaitra/anaconda/lib/python2.7/urllib2.py in http_response(self, request, response)
551 if not (200 <= code < 300):
552 response = self.parent.error(
--> 553 'http', request, response, code, msg, hdrs)
554
555 return response
/Users/chaitra/anaconda/lib/python2.7/urllib2.py in error(self, proto, *args)
476 if http_err:
477 args = (dict, 'default', 'http_error_default') + orig_args
--> 478 return self._call_chain(*args)
479
480 # XXX probably also want an abstract factory that knows when it makes
/Users/chaitra/anaconda/lib/python2.7/urllib2.py in _call_chain(self, chain, kind, meth_name, *args)
410 func = getattr(handler, meth_name)
411
--> 412 result = func(*args)
413 if result is not None:
414 return result
/Users/chaitra/anaconda/lib/python2.7/urllib2.py in http_error_default(self, req, fp, code, msg, hdrs)
559 class HTTPDefaultErrorHandler(BaseHandler):
560 def http_error_default(self, req, fp, code, msg, hdrs):
--> 561 raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
562
563 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 403: Forbidden
Things tried:
I made sure that all the keys and the associate_tag are infact from the same account.
I also synced clock on my machine (which was in Central timezone) to sync to N.California timezone.
I have made around 5 requests in over an hour.
I am using Python v 2.7.11 and running this code using Anaconda's QtConsole on my machine locally.

I am extracting the data same way using Anaconda cloud. I faced similar issue , It is likely due to you running the code using office network which has proxies enabled and obstructing your access to web . Try using your home network and execute the code .

Related

How to handle SSLError in Geocoding API?

I am using GCP's paid Geocoding API to get the latitude and longitude of around 1 million addresses. Though it is very slow but I can wait. The problem is I am getting SSLError, sometimes after 15000 requests, and sometimes after 6000 requests, or so. I am using Python 2.7 and I have tried to catch the error but it doesn't get resolved.
Error:
SSLErrorTraceback (most recent call last)
<ipython-input-2-7dd6c7aa3195> in <module>()
14 count=0
15 try:
---> 16 location2 = geocoder2.geocode(row,timeout=1,components={"country": "PK","locality":"Sindh"})
17 if location2:
18 print(count2,location2.latitude,location2.longitude)
/home/prassani/prassani/local/lib/python2.7/site-packages/geopy/geocoders/googlev3.pyc in geocode(self, query, exactly_one, timeout, bounds, region, components, place_id, language, sensor)
271 logger.debug("%s.geocode: %s", self.__class__.__name__, url)
272 return self._parse_json(
--> 273 self._call_geocoder(url, timeout=timeout), exactly_one
274 )
275
/home/prassani/prassani/local/lib/python2.7/site-packages/geopy/geocoders/base.pyc in _call_geocoder(self, url, timeout, raw, requester, deserializer, **kwargs)
398 return page
399
--> 400 page = decode_page(page)
401
402 if deserializer is not None:
/home/prassani/prassani/local/lib/python2.7/site-packages/geopy/util.pyc in decode_page(page)
50 else:
51 encoding = page.headers.getparam("charset") or "utf-8"
---> 52 return text_type(page.read(), encoding=encoding)
53 else: # requests?
54 encoding = page.headers.get("charset") or "utf-8"
/usr/lib/python2.7/socket.pyc in read(self, size)
353 while True:
354 try:
--> 355 data = self._sock.recv(rbufsize)
356 except error, e:
357 if e.args[0] == EINTR:
/usr/lib/python2.7/httplib.pyc in read(self, amt)
605 # connection, and the user is reading more bytes than will be provided
606 # (for example, reading in 1k chunks)
--> 607 s = self.fp.read(amt)
608 if not s and amt:
609 # Ideally, we would raise IncompleteRead if the content-length
/usr/lib/python2.7/socket.pyc in read(self, size)
382 # fragmentation issues on many platforms.
383 try:
--> 384 data = self._sock.recv(left)
385 except error, e:
386 if e.args[0] == EINTR:
/usr/lib/python2.7/ssl.pyc in recv(self, buflen, flags)
770 "non-zero flags not allowed in calls to recv() on %s" %
771 self.__class__)
--> 772 return self.read(buflen)
773 else:
774 return self._sock.recv(buflen, flags)
/usr/lib/python2.7/ssl.pyc in read(self, len, buffer)
657 v = self._sslobj.read(len, buffer)
658 else:
--> 659 v = self._sslobj.read(len)
660 return v
661 except SSLError as x:
SSLError: ('The read operation timed out',)
Code I am using:
from geopy.geocoders import GoogleV3
from geopy.exc import GeocoderServiceError, GeocoderTimedOut
from requests.exceptions import SSLError
import time
import pandas as pd
df=pd.read_csv("./sat-data.csv")
df['Address']=''
df['Location']=''
geocoder2 = GoogleV3(api_key="somekey")
count=0
count2=0
for index, row in df.School_Address.iteritems():
count+=1
count2+=1
if count ==4900:
time.sleep(100)
count=0
try:
location2 = geocoder2.geocode(row,timeout=1,components={"country": "PK","locality":"Sindh"})
if location2:
print(count2,location2.latitude,location2.longitude)
df.Address.loc[index]=location2.address
df.Location.loc[index]=(location2.latitude, location2.longitude)
else:
df.Address.loc[index]="None"
df.Location.loc[index]="None"
except (GeocoderServiceError, GeocoderTimedOut, SSLError), e:
print("Error: geocode failed on input")
df.to_csv('./sat-data-new.csv', index=False)

Pyredis cannot connect to Digital Ocean hosted Redis (connection lost ConnectionError exception)

In [16]: r
Out[16]: Redis<ConnectionPool<Connection<host=****,port=*****,db=3>>>
In [17]: r.set('zaza', 'king')
---------------------------------------------------------------------------
ConnectionError Traceback (most recent call last)
<ipython-input-17-8126d1846970> in <module>
----> 1 r.set('zaza', 'king')
/usr/local/lib/python3.7/site-packages/redis/client.py in set(self, name, value, ex, px, nx, xx)
1517 if xx:
1518 pieces.append('XX')
-> 1519 return self.execute_command('SET', *pieces)
1520
1521 def __setitem__(self, name, value):
/usr/local/lib/python3.7/site-packages/redis/client.py in execute_command(self, *args, **options)
834 pool = self.connection_pool
835 command_name = args[0]
--> 836 conn = self.connection or pool.get_connection(command_name, **options)
837 try:
838 conn.send_command(*args)
/usr/local/lib/python3.7/site-packages/redis/connection.py in get_connection(self, command_name, *keys, **options)
1069 try:
1070 # ensure this connection is connected to Redis
-> 1071 connection.connect()
1072 # connections that the pool provides should be ready to send
1073 # a command. if not, the connection was either returned to the
/usr/local/lib/python3.7/site-packages/redis/connection.py in connect(self)
545 self._sock = sock
546 try:
--> 547 self.on_connect()
548 except RedisError:
549 # clean up after any error in on_connect
/usr/local/lib/python3.7/site-packages/redis/connection.py in on_connect(self)
615 # to check the health prior to the AUTH
616 self.send_command('AUTH', self.password, check_health=False)
--> 617 if nativestr(self.read_response()) != 'OK':
618 raise AuthenticationError('Invalid Password')
619
/usr/local/lib/python3.7/site-packages/redis/connection.py in read_response(self)
697 "Read the response from a previously sent command"
698 try:
--> 699 response = self._parser.read_response()
700 except socket.timeout:
701 self.disconnect()
/usr/local/lib/python3.7/site-packages/redis/connection.py in read_response(self)
307
308 def read_response(self):
--> 309 response = self._buffer.readline()
310 if not response:
311 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
/usr/local/lib/python3.7/site-packages/redis/connection.py in readline(self)
239 while not data.endswith(SYM_CRLF):
240 # there's more data in the socket that we need
--> 241 self._read_from_socket()
242 buf.seek(self.bytes_read)
243 data = buf.readline()
/usr/local/lib/python3.7/site-packages/redis/connection.py in _read_from_socket(self, length, timeout, raise_on_timeout)
184 # an empty string indicates the server shutdown the socket
185 if isinstance(data, bytes) and len(data) == 0:
--> 186 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
187 buf.write(data)
188 data_length = len(data)
This started happening after a move to hosted redis from a local instance
So, the problem is that the Redis connection string on those hosted solutions has to start with rediss:// as in redis + SSL as per the official documentation:
https://redislabs.com/lp/python-redis/
If you use hosted Redis from AWS or Digital Ocean this might as well happen to you :)
if you are using Celery you would also need to modify your app config in app.py as per
https://github.com/celery/celery/issues/5371

H2O machine learning platform for Python incurs EnvironmentError while building models

I am new to h2o machine learning platform and having the below issue while trying to build models.
When i was trying to build 5 GBM models with a not so large dataset, it has the following error:
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [################# ] 34%
EnvironmentErrorTraceback (most recent call last)
<ipython-input-22-e74b34df2f1a> in <module>()
13 params_model={'x': features_pca_all, 'y': response, 'training_frame': train_holdout_pca_hex, 'validation_frame': validation_holdout_pca_hex, 'ntrees': ntree, 'max_depth':depth, 'min_rows': min_rows, 'learn_rate': 0.005}
14
---> 15 gbm_model=h2o.gbm(**params_model)
16
17 #store model
C:\Anaconda2\lib\site-packages\h2o\h2o.pyc in gbm(x, y, validation_x, validation_y, training_frame, model_id, distribution, tweedie_power, ntrees, max_depth, min_rows, learn_rate, nbins, nbins_cats, validation_frame, balance_classes, max_after_balance_size, seed, build_tree_one_node, nfolds, fold_column, fold_assignment, keep_cross_validation_predictions, score_each_iteration, offset_column, weights_column, do_future, checkpoint)
1058 parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
1059 parms["algo"]="gbm"
-> 1060 return h2o_model_builder.supervised(parms)
1061
1062
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in supervised(kwargs)
28 algo = kwargs["algo"]
29 parms={k:v for k,v in kwargs.items() if (k not in ["x","y","validation_x","validation_y","algo"] and v is not None) or k=="validation_frame"}
---> 30 return supervised_model_build(x,y,vx,vy,algo,offsets,weights,fold_column,parms)
31
32 def unsupervised_model_build(x,validation_x,algo_url,kwargs): return _model_build(x,None,validation_x,None,algo_url,None,None,None,kwargs)
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in supervised_model_build(x, y, vx, vy, algo, offsets, weights, fold_column, kwargs)
16 if not is_auto_encoder and y is None: raise ValueError("Missing response")
17 if vx is not None and vy is None: raise ValueError("Missing response validating a supervised model")
---> 18 return _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs)
19
20 def supervised(kwargs):
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in _model_build(x, y, vx, vy, algo, offsets, weights, fold_column, kwargs)
86 do_future = kwargs.pop("do_future") if "do_future" in kwargs else False
87 future_model = H2OModelFuture(H2OJob(H2OConnection.post_json("ModelBuilders/"+algo, **kwargs), job_type=(algo+" Model Build")), x)
---> 88 return future_model if do_future else _resolve_model(future_model, **kwargs)
89
90 def _resolve_model(future_model, **kwargs):
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in _resolve_model(future_model, **kwargs)
89
90 def _resolve_model(future_model, **kwargs):
---> 91 future_model.poll()
92 if '_rest_version' in kwargs.keys(): model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key, _rest_version=kwargs['_rest_version'])["models"][0]
93 else: model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key)["models"][0]
C:\Anaconda2\lib\site-packages\h2o\model\model_future.pyc in poll(self)
8
9 def poll(self):
---> 10 self.job.poll()
11 self.x = None
C:\Anaconda2\lib\site-packages\h2o\job.pyc in poll(self)
39 time.sleep(sleep)
40 if sleep < 1.0: sleep += 0.1
---> 41 self._refresh_job_view()
42 running = self._is_running()
43 self._update_progress()
C:\Anaconda2\lib\site-packages\h2o\job.pyc in _refresh_job_view(self)
52
53 def _refresh_job_view(self):
---> 54 jobs = H2OConnection.get_json(url_suffix="Jobs/" + self.job_key)
55 self.job = jobs["jobs"][0] if "jobs" in jobs else jobs["job"][0]
56 self.status = self.job["status"]
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in get_json(url_suffix, **kwargs)
410 if __H2OCONN__ is None:
411 raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
--> 412 return __H2OCONN__._rest_json(url_suffix, "GET", None, **kwargs)
413
414 #staticmethod
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in _rest_json(self, url_suffix, method, file_upload_info, **kwargs)
419
420 def _rest_json(self, url_suffix, method, file_upload_info, **kwargs):
--> 421 raw_txt = self._do_raw_rest(url_suffix, method, file_upload_info, **kwargs)
422 return self._process_tables(raw_txt.json())
423
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in _do_raw_rest(self, url_suffix, method, file_upload_info, **kwargs)
476
477 begin_time_seconds = time.time()
--> 478 http_result = self._attempt_rest(url, method, post_body, file_upload_info)
479 end_time_seconds = time.time()
480 elapsed_time_seconds = end_time_seconds - begin_time_seconds
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in _attempt_rest(self, url, method, post_body, file_upload_info)
526
527 except requests.ConnectionError as e:
--> 528 raise EnvironmentError("h2o-py encountered an unexpected HTTP error:\n {}".format(e))
529
530 return http_result
EnvironmentError: h2o-py encountered an unexpected HTTP error:
('Connection aborted.', BadStatusLine("''",))
My hunch is that the cluster memory has only around 247.5 MB which is not enough to handle the model building hence aborted the connection to h2o. Here are the codes I used to initiate h2o:
#initialization of h2o module
import subprocess as sp
import sys
import os.path as p
# path of h2o jar file
h2o_path = p.join(sys.prefix, "h2o_jar", "h2o.jar")
# subprocess to launch h2o
# the command can be further modified to include virtual machine parameters
sp.Popen("java -jar " + h2o_path)
# h2o.init() call to verify that h2o launch is successfull
h2o.init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False, \
license=None, max_mem_size_GB=4, min_mem_size_GB=4, ice_root=None)
and here is the returned status table:
Any ideas on the above would be greatly appreciated!!
Just to close out this question, I'll restate the solution mentioned in the comments above. The user was able to resolve the issue by starting H2O from the command line with 1GB of memory using java -jar -Xmx1g h2o.jar, and then connected to the existing H2O server in Python using h2o.init().
It's not clear to me why h2o.init() was not creating the correct size cluster using the max_mem_size_GB argument. Regardless, this argument has been deprecated recently and replaced by another argument, max_mem_size, so it may no longer be an issue.

happy base integration not working with hbase

I am able to connect with my hbase
connection = happybase.Connection(host='node-04',port=16000)
table = connection.table('test')
These 2 commands work without any error. but when I run the below cammand i am getting following error
print connection.tables()
error
Traceback (most recent call last)
<ipython-input-49-de0848d7286f> in <module>()
----> 1 print connection.tables()
/root/anaconda2/lib/python2.7/site-packages/happybase/connection.pyc in tables(self)
236 :rtype: List of strings
237 """
--> 238 names = self.client.getTableNames()
239
240 # Filter using prefix, and strip prefix from names
/root/anaconda2/lib/python2.7/site-packages/happybase/hbase/Hbase.pyc in getTableNames(self)
815 #return returns a list of names
816 """
--> 817 self.send_getTableNames()
818 return self.recv_getTableNames()
819
/root/anaconda2/lib/python2.7/site-packages/happybase/hbase/Hbase.pyc in send_getTableNames(self)
823 args.write(self._oprot)
824 self._oprot.writeMessageEnd()
--> 825 self._oprot.trans.flush()
826
827 def recv_getTableNames(self, ):
/root/anaconda2/lib/python2.7/site-packages/thrift/transport/TTransport.pyc in flush(self)
172 # reset wbuf before write/flush to preserve state on underlying failure
173 self.__wbuf = StringIO()
--> 174 self.__trans.write(out)
175 self.__trans.flush()
176
/root/anaconda2/lib/python2.7/site-packages/thrift/transport/TSocket.pyc in write(self, buff)
128 have = len(buff)
129 while sent < have:
--> 130 plus = self.handle.send(buff)
131 if plus == 0:
132 raise TTransportException(type=TTransportException.END_OF_FILE,
error: [Errno 32] Broken pipe
I am usingHbase version:1.1.2.2.3.4.0-3485
Please help if you can suggest any package which i can use to code for hbase using python
happybase requires you to connect to the thrift daemon, which you need to start on your hbase cluster. happybase does not connect to hbase nodes directly.
judging from the port number, you are not connecting to thrift (uses port 9090 by default) but to the hbase master. this is not how happybase works.

Python Pxssh Timeout exceeded in read_nonblocking()

I've come here to get back on track about a device on a switch Extreme Network.
Currently, Extreme Network asks the prompt to say that the switch is not saved. I am currently developing a script to verify if all the switches in the park are protected.
Here's what I start with:
s = pxssh.pxssh()
hostname = ('hostname')
username = ('username')
password = ('password')
s.login (hostname, username, password)
The only problem is that the last line of my code, I get the following error message:
In [93]: s.login (hostname, username, password)
---------------------------------------------------------------------------
TIMEOUT Traceback (most recent call last)
/root/<ipython-input-93-6a124c6a47a0> in <module>()
----> 1 s.login (hostname, username, password)
/usr/lib/python2.7/dist-packages/pxssh.pyc in login(self, server, username, password, terminal_type, original_prompt, login_timeout, port, auto_prompt_reset)
241 self.close()
242 raise ExceptionPxssh ('unexpected login response')
--> 243 if not self.synch_original_prompt():
244 self.close()
245 raise ExceptionPxssh ('could not synchronize with original prompt')
/usr/lib/python2.7/dist-packages/pxssh.pyc in synch_original_prompt(self)
132 # If latency is worse than these values then this will fail.
133
--> 134 self.read_nonblocking(size=10000,timeout=1) # GAS: Clear out the cache before getting the prompt
135 time.sleep(0.1)
136 self.sendline()
/usr/lib/python2.7/dist-packages/pexpect.pyc in read_nonblocking(self, size, timeout)
822 raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
823 else:
--> 824 raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
825
826 if self.child_fd in r:
TIMEOUT: Timeout exceeded in read_nonblocking().
Have you any idea?
thank you