i've a problem, am a beginer and i try to make a simple asynchronous program that posts to facebook, i use the tornado example and the tornado-facebook-sdk, here is the code:
class MainHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
#tornado.web.authenticated
#tornado.web.asynchronous
def get(self):
self.facebook_request("/me/home", self.print_callback, access_token=self.current_user["access_token"])
a = self.current_user["access_token"]
#print a
def print_callback(data):
print data
ioloop.stop()
graph.get_object('/facebook', callback=print_callback)
and i get this error:
TypeError: print_callback() takes exactly 1 argument (2 given)
because i want to understand this example to get the token, and then use the example:
def callback(response):
# ...
graph.put_object('me', 'feed', message="Maoe!!", callback=callback)
to write something on my facebook's wall, i did it with the synchronous library, but sadly this is blocking!
UPDATE: still getting and error:
class MainHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
#tornado.web.authenticated
#tornado.web.asynchronous
def get(self):
self.facebook_request("/me/home", self.print_callback, access_token=self.current_user["access_token"])
a = self.current_user["access_token"]
print a
def print_callback(self, data):
graph.post_wall(self, "heloooooooo")
and got this error:
[E 121009 14:28:47 web:1108] Uncaught exception GET / (::1)
HTTPRequest(.....)
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\tornado-2.4.post1-py2.7.egg\tornado\web.py", line 1043, in _stack_context_handle_exception
raise_exc_info((type, value, traceback))
File "C:\Python27\lib\site-packages\tornado-2.4.post1 py2.7.egg\tornado\stack_context.py", line 237, in _nested
yield vars
File "C:\Python27\lib\site-packages\tornado-2.4.post1-py2.7.egg\tornado\stack_context.py", line 210, in wrapped
callback(*args, **kwargs)
File "C:\Python27\lib\site-packages\tornado-2.4.post1-py2.7.egg\tornado\gen.py", line 405, in inner self.set_result(key, result)
File "C:\Python27\lib\site-packages\tornado-2.4.post1-py2.7.egg\tornado\gen.py", line 335, in set_result
self.run()
File "C:\Python27\lib\site-packages\tornado-2.4.post1-py2.7.egg\tornado\gen.py", line 365, in run
yielded = self.gen.send(next)
File "build\bdist.win-amd64\egg\facebook\graphapi.py", line 129, in _make_request
raise GraphAPIError(data)
GraphAPIError: (#200) This API call requires a valid app_id.
and when i go to Facebook, i see that it's a valide key that i'm using, i even use the generated Token (here the a variable), and pasting it to Api Debug, and i got everything works fine:
Valid : True
Origin : Web
Scopes : create_note photo_upload publish_actions publish_stream read_stream share_item status_update video_upload
Add self to print_callback.
def print_callback(self, data):
print data
ioloop.stop()
graph.get_object('/facebook', callback=print_callback)
Related
I have one question:I want to test "select" and "input".can I write it like the code below:
original code:
12 class Sinaselecttest(unittest.TestCase):
13
14 def setUp(self):
15 binary = FirefoxBinary('/usr/local/firefox/firefox')
16 self.driver = webdriver.Firefox(firefox_binary=binary)
17
18 def test_select_in_sina(self):
19 driver = self.driver
20 driver.get("https://www.sina.com.cn/")
21 try:
22 WebDriverWait(driver,30).until(
23 ec.visibility_of_element_located((By.XPATH,"/html/body/div[9]/div/div[1]/form/div[3]/input"))
24 )
25 finally:
26 driver.quit()
# #测试select功能
27 select=Select(driver.find_element_by_xpath("//*[#id='slt_01']")).select_by_value("微博")
28 element=driver.find_element_by_xpath("/html/body/div[9]/div/div[1]/form/div[3]/input")
29 element.send_keys("杨幂")
30 driver.find_element_by_xpath("/html/body/div[9]/div/div[1]/form/input").click()
31 driver.implicitly_wait(5)
32 def tearDown(self):
33 self.driver.close()
I want to test Selenium "select" function.so I choose sina website to select one option and input text in textarea.then search it .but when I run this test,it has error:
Traceback (most recent call last):
File "test_sina_select.py", line 32, in tearDown
self.driver.close()
File "/usr/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py", line 688, in close
self.execute(Command.CLOSE)
File "/usr/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py", line 319, in execute
response = self.command_executor.execute(driver_command, params)
File "/usr/lib/python2.7/site-packages/selenium/webdriver/remote/remote_connection.py", line 376, in execute
return self._request(command_info[0], url, body=data)
File "/usr/lib/python2.7/site-packages/selenium/webdriver/remote/remote_connection.py", line 399, in _request
resp = self._conn.request(method, url, body=body, headers=headers)
File "/usr/lib/python2.7/site-packages/urllib3/request.py", line 68, in request
**urlopen_kw)
File "/usr/lib/python2.7/site-packages/urllib3/request.py", line 81, in request_encode_url
return self.urlopen(method, url, **urlopen_kw)
File "/usr/lib/python2.7/site-packages/urllib3/poolmanager.py", line 247, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/usr/lib/python2.7/site-packages/urllib3/connectionpool.py", line 617, in urlopen
release_conn=release_conn, **response_kw)
File "/usr/lib/python2.7/site-packages/urllib3/connectionpool.py", line 617, in urlopen
release_conn=release_conn, **response_kw)
File "/usr/lib/python2.7/site-packages/urllib3/connectionpool.py", line 617, in urlopen
release_conn=release_conn, **response_kw)
File "/usr/lib/python2.7/site-packages/urllib3/connectionpool.py", line 597, in urlopen
_stacktrace=sys.exc_info()[2])
File "/usr/lib/python2.7/site-packages/urllib3/util/retry.py", line 271, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
MaxRetryError: HTTPConnectionPool(host='127.0.0.1', port=51379): Max retries exceeded with url: /session/2e64d2a1-3c7f-4221-96fe-9d0b1c102195/window (Caused by ProtocolError('Connection aborted.', error(111, 'Connection refused')))
----------------------------------------------------------------------
Ran 1 test in 72.106s
who can tell me why?thanks
This error message...
MaxRetryError: HTTPConnectionPool(host='127.0.0.1', port=51379): Max retries exceeded with url: /session/2e64d2a1-3c7f-4221-96fe-9d0b1c102195/window (Caused by ProtocolError('Connection aborted.', error(111, 'Connection refused')))
...implies that the call to self.driver.close() method failed raising MaxRetryError.
A couple of things:
First and foremost as per the discussion max-retries-exceeded exceptions are confusing the traceback is somewhat misleading. Requests wraps the exception for the users convenience. The original exception is part of the message displayed.
Requests never retries (it sets the retries=0 for urllib3's HTTPConnectionPool), so the error would have been much more canonical without the MaxRetryError and HTTPConnectionPool keywords. So an ideal Traceback would have been:
ConnectionError(<class 'socket.error'>: [Errno 1111] Connection refused)
But again #sigmavirus24 in his comment mentioned ...wrapping these exceptions make for a great API but a poor debugging experience...
Moving forward the plan was to traverse as far downwards as possible to the lowest level exception and use that instead.
Finally this issue was fixed by rewording some exceptions which has nothing to do with the actual connection refused error.
Solution
Even before self.driver.close() within tearDown(self) is invoked, the try{} block within test_select_in_sina(self) includes finally{} where you have invoked driver.quit()which is used to call the /shutdown endpoint and subsequently the web driver & the client instances are destroyed completely closing all the pages/tabs/windows. Hence no more connection exists.
You can find a couple of relevant detailed discussion in:
PhantomJS web driver stays in memory
Selenium : How to stop geckodriver process impacting PC memory, without calling
driver.quit()?
In such a situation when you invoke self.driver.close() the python client is unable to locate any active connection to initiate a clousure. Hence you see the error.
So a simple solution would be to remove the line driver.quit() i.e. remove the finally block.
tl; dr
As per the Release Notes of Selenium 3.14.1:
* Fix ability to set timeout for urllib3 (#6286)
The Merge is: repair urllib3 can't set timeout!
Conclusion
Once you upgrade to Selenium 3.14.1 you will be able to set the timeout and see canonical Tracebacks and would be able to take required action.
References
A couple of relevent references:
Adding max_retries as an argument
Removed the bundled charade and urllib3.
Third party libraries committed verbatim
Just had the same problem. The solution was to change the owner of the folder with a script recursively. In my case the folder had root:root owner:group and I needed to change it to ubuntu:ubuntu.
Solution: sudo chown -R ubuntu:ubuntu /path-to-your-folder
Use Try and catch block to find exceptions
try:
r = requests.get(url)
except requests.exceptions.Timeout:
#Message
except requests.exceptions.TooManyRedirects:
#Message
except requests.exceptions.RequestException as e:
#Message
raise SystemExit(e)
I need to download a group of csv using scrapy from FTP. But first I need to scrape a website(https://www.douglas.co.us/assessor/data-downloads/) in order to get the urls of csv in the ftp.I read about how to download files in the documentation(Downloading and processing files and images)
settings
custom_settings = {
'ITEM_PIPELINES': {
'scrapy.pipelines.files.FilesPipeline': 1,
},
'FILES_STORE' : os.path.dirname(os.path.abspath(__file__))
}
parse
def parse(self, response):
self.logger.info("In parse method!!!")
# Property Ownership
property_ownership = response.xpath("//a[contains(., 'Property Ownership')]/#href").extract_first()
# Property Location
property_location = response.xpath("//a[contains(., 'Property Location')]/#href").extract_first()
# Property Improvements
property_improvements = response.xpath("//a[contains(., 'Property Improvements')]/#href").extract_first()
# Property Value
property_value = response.xpath("//a[contains(., 'Property Value')]/#href").extract_first()
item = FiledownloadItem()
self.insert_keyvalue(item,"file_urls",[property_ownership, property_location, property_improvements, property_value])
yield item
But I got the following error
Traceback (most recent call last): File
"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py",
line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw) File "/usr/local/lib/python2.7/dist-packages/scrapy/pipelines/media.py",
line 79, in process_item
requests = arg_to_iter(self.get_media_requests(item, info)) File "/usr/local/lib/python2.7/dist-packages/scrapy/pipelines/files.py",
line 382, in get_media_requests
return [Request(x) for x in item.get(self.files_urls_field, [])] File
"/usr/local/lib/python2.7/dist-packages/scrapy/http/request/init.py",
line 25, in init
self._set_url(url) File "/usr/local/lib/python2.7/dist-packages/scrapy/http/request/init.py",
line 58, in _set_url
raise ValueError('Missing scheme in request url: %s' % self._url) ValueError: Missing scheme in request url: [
The best explanation to my problem is this answer of this question scrapy error :exceptions.ValueError: Missing scheme in request url:, that explain that the problem is that urls to download are missing the "http://".
What should I do in my case? Can I use FilesPipeline? or I need to do something different?
Thanks in advance.
ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: [
According to the traceback, scrapy thinks your file url is '['.
My best guess is that you have an error in the insert_keyvalue() method.
Also, why have a method for this? Simple assignment should work.
am novice in python.Extracted below code to login to website from an online post, but getting error.
Please help to fix it and an explanation will help me
import requests
with requests.Session() as c:
EMAIL = 'noob.python#gmail.com'
PASSWORD = 'Dabc#123'
URL = 'https://www.linkedin.com/'
c.get(URL)
token = c.cookies['CsrfParam']
# This is the form data that the page sends when logging in
login_data = {loginCsrfParam:token, session_key:EMAIL, session_password:PASSWORD}
# Authenticate
r = c.post(URL, data=login_data)
# Try accessing a page that requires you to be logged in
r = c.get('https://www.linkedin.com/feed/')
print r.content
Am stuck with below Error:
C:\Python27>python website.py
Traceback (most recent call last):
File "website.py", line 8, in <module>
token = c.cookies['CsrfParam']
File "C:\Python27\lib\site-packages\requests\cookies.py", line 329, in __getitem__
return self._find_no_duplicates(name)
File "C:\Python27\lib\site-packages\requests\cookies.py", line 400, in _find_no_duplicates
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
KeyError: "name='CsrfParam', domain=None, path=None"
The reason you're getting the error is that you're calling a value from a list which is empty. To call the first item in the list you say list[0]. In this case the list you're calling is empty so the first value doesn't exist hence the error.
I've ran your code and there is no #id value of 'recaptcha-token' which is why the code is returning an empty list. The only place a recaptcha token is needed is for signing up so I would suggest trying to log in without creating the authenticity_token.
I want get list of all groups in my contacts. I use that code:
import gdata.gauth
import gdata.contacts.client
token = gdata.gauth.OAuth2Token(client_id = "***.apps.googleusercontent.com",
client_secret = "***",
scope = "https://www.google.com/m8/feeds/",
user_agent = "GC")
gd_client = gdata.contacts.client.ContactsClient(source = 'GCv0.1')
gd_client = token.authorize(gd_client)
gd_client.GetGroups()
But got error:
Traceback (most recent call last):
File "F:/Yandex/Sites/GoogleContacts/cli_contacts.py", line 27, in <module>
gd_client.GetGroups()
File "C:\Users\Ishayahu\27Gdata\lib\site-packages\gdata\contacts\client.py", line 218, in get_groups
return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
File "C:\Users\Ishayahu\27Gdata\lib\site-packages\gdata\client.py", line 640, in get_feed
**kwargs)
File "C:\Users\Ishayahu\27Gdata\lib\site-packages\gdata\client.py", line 319, in request
RequestError)
gdata.client.RequestError: Server responded with: 400,
enter code here
I have no idea what a reason and I can't find any clue how to solve it.
UPD: It looks like I should somehow put acess_token or refresh_token into OAuth2Token, but I can't understand how
so it send headers
{'GData-Version': '3', 'Authorization': 'Bearer None', 'User-Agent': 'gdata-py/2.0.17'}
UPD2: by the way, if I test in in OAuth2Playground, it shows me a page with request access to my contacts. That script doesn't ask for it. Maybe that's the problem? How can I change it? I thought, it connect with url_redirect, but I can't uderstand, how to use it
UPD3: I was right: if I add access_token, which I get manualy from Playground, all works. But how should I get it in script?!
On my Raspberry Pi running raspbian jessie I tried to go through the OAuth2 flow to connect a program to my dropbox using the dropbox SDK for Python which I installed via pip.
For a test, I copied the code from the documentation (and defined the app-key and secret, of course):
from dropbox import DropboxOAuth2FlowNoRedirect
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = auth_flow.start()
print "1. Go to: " + authorize_url
print "2. Click \"Allow\" (you might have to log in first)."
print "3. Copy the authorization code."
auth_code = raw_input("Enter the authorization code here: ").strip()
try:
access_token, user_id = auth_flow.finish(auth_code)
except Exception, e:
print('Error: %s' % (e,))
return
dbx = Dropbox(access_token)
I was able to get the URL and to click allow. When I then entered the authorization code however, it printed the following error:
Error: 'str' object has no attribute 'copy'
Using format_exc from the traceback-module, I got the following information:
Traceback (most recent call last):
File "test.py", line 18, in <module>
access_token, user_id = auth_flow.finish(auth_code)
File "/usr/local/lib/python2.7/dist-packages/dropbox/oauth.py", line 180, in finish
return self._finish(code, None)
File "/usr/local/lib/python2.7/dist-packages/dropbox/oauth.py", line 50, in _finish
url = self.build_url(Dropbox.HOST_API, '/oauth2/token')
File "/usr/local/lib/python2.7/dist-packages/dropbox/oauth.py", line 111, in build_url
return "https://%s%s" % (self._host, self.build_path(target, params))
File "/usr/local/lib/python2.7/dist-packages/dropbox/oauth.py", line 89, in build_path
params = params.copy()
AttributeError: 'str' object has no attribute 'copy'
It seems the build_path method expects a dict 'params' and receives a string instead. Any ideas?
Thanks to smarx for his comment. The error is a known issue and will be fixed in version 3.42 of the SDK. source