I defined a function and when I use schedule, I don't have access to the admin page and other pages through http://127.0.0.1:8000/admin/. I have a blank page.But my program works properly and i try on localhost
my code:
def search():
.
.
.
schedule.every(1).minutes.do(search)
while True:
schedule.run_pending()
time.sleep(1)
Related
I try to make simple web site with django, Oracle DB and django web server. And when I make query to db in django shell:
mylist=person.objects.filter(name='Anon')
everything works fine. Same when I use in views simple render
def index(request):
return render(request, 'sos/index.html', {})
I get basic site. But when I try to pass parameters from query:
def index(request):
mylist=person.objects.filter(name='Anon')
return render(request, 'sos/index.html', {'mylist': mylist})
server hangs - no matter which browser I use, website is still connectig - only I can do is ctrl+C.
I developed a webserver using BaseHttpServer module.
in that i created a login page.html and getting the username and password details and sending to the request = requests.session()
s= request.get(username, password)
print s.text #which is session cookie
I logged in successfully and doing the get, post operations.
The problem is when other user trying to hit the url after the login page he is accessing my session cookie and doing all operations.
how to restrict that and is there any method of handling multiple user sessions at a time to login to the SSO page which i am calling that url in get method
import json
import getpass
import requests
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
port = 8000
returns
class S(BaseHTTPRequestHandler):
Page = '''\
# Myloginpage code in html
'''
get = '''\
#My Get login page html Details Form
'''
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type","text/html")
self.end_headers()
def do_Get(self):
if self.path =='/':
self.wfile.write(self.Page) #My login page
# Get operations which is working
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
post_data_tmp = post_data.split('&')
for querystring in post_data_tmp:
if 'uname' in querystring:
username= querystring.split('=')[1]
if 'psw' in querysting:
password = querystring.split('=')[1]
ssosession = xyz.sso.session(username, password)
s = ssosession.get('xyzurl',verify = False)
S.cookie = requests.utils.dict_from_cookiejar(ssosession)
# I am passing this cookie to connect to the api and get the details
def run(server_class=HTTPServer, handler_class = S, port = port)
server_address = ('',port)
httpd = server_class(server_address,handler_class)
print 'starting httpd...'
httpd.serve_forever()
if __name__="__main__":
from sys import argv
if len(argv) = =2:
run(port = int(argv[1])
else:
run()
when the webbrowser starts i opened the url http://xyz:8080/ then the login page opens which i created in html . after I submit the username and password , those will be stored and send to the SSo login page to get the cookie . with that session i am successfully can play with the browser.
the issue here is when other user hits the url, he is not authenticating and he is able to do the operations with my session.
could anyone suggest me how to create a session for each http request to authenticate.
if second user clicks the login page , he will be logged in . if suppose if he clicks the other page part of this project he is able to access with my session
I'm testing using Django's tests and Selenium so I can run tests with a live browser. But it won't let me log in more then once.
I am using Django's StaticLiveServerTestCase to access both static and media files live. I login by going to the page. I've tried putting the login code both in setUpClass to run once at the beginning and setUp to run each time. Both only let me log in once.
Here's the code:
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.contrib.auth.models import User
from selenium import webdriver
#override_settings(DEBUG=True)
class LoginTest(StaticLiveServerTestCase):
#classmethod
def setUpClass(cls):
super().setUpClass()
# create user
cls.user = User.objects.create_user( 'Testing User', 'somewhere#wherever.com', 'pwd')
# initalize webdriver
cls.driver = webdriver.Chrome() # doesn't work on FireFox either
cls.driver.set_window_size(1280,800)
cls.driver.implicitly_wait(5)
# login - send username and password to login page
cls.driver.get(cls.live_server_url+'/accounts/login/')
cls.driver.implicitly_wait(10)
username = cls.driver.find_element_by_name('login')
username.clear()
username.send_keys(cls.user.username)
password = cls.driver.find_element_by_name('password')
password.clear()
password.send_keys("pwd")
password.submit() # eubmits form
#classmethod
def tearDownClass(cls):
cls.driver.quit() # quit after tests have run
super().tearDownClass()
def test_login_one(self): # this test PASSES
self.driver.get(self.live_server_url) # go to home page
login_menu = self.driver.find_element_by_id('login_menu')
self.assertTrue(
# if logged in username is in text of #login_menu
self.user.username in login_menu.text
)
def test_login_two(self): # this test FAILS
self.driver.get(self.live_server_url) # go to home page
login_menu = self.driver.find_element_by_id('login_menu')
self.assertTrue(
# if logged in username is in text of #login_menu
self.user.username in login_menu.text
)
This code logs in once at the beginning. But I've also tried code that logs in each time a test is run (using setUp instead of 'setUpClass') and it still only lets me log in once.
Any idea what's going on?
Update:
I tried logging in a second time on test_log_in_two (the 2nd test) and I saw a "username and password not found" error in the chrome window.
What you are trying to achieve here is the capability of log in twice which is possible you just have to make a simple check inside your test method for presence of element after login happened, an if found you can simply logout and let the remaining code do it's work. Let me show you with a template what I am trying to say here :
#override_settings(DEBUG=True)
class LoginTest(StaticLiveServerTestCase):
#classmethod
def setUpClass(cls):
super().setUpClass()
logInFunction()
#classmethod
def tearDownClass(cls):
cls.driver.quit() # quit after tests have run
super().tearDownClass()
def test_login_one(self): # this test PASSES
if checkForAlreadyLoggedInElement() :
call logoutFunction()
logInFunction()
self.assertTrue(checkForAlreadyLoggedInElement())
def test_login_two(self):
if checkForAlreadyLoggedInElement() :
call logoutFunction()
logInFunction()
logoutFunction()
logInFunction()
self.assertTrue(checkForAlreadyLoggedInElement())
Hope the template clears the picture on how you should proceed. Let me know if you have any other doubts.
A year ago, I used Django's StreamingHttpResponse to stream a text file and Chrome immediately displayed every chunk of text that it received. Now, with the same code, Chrome only displays the text when it completely loads the text file, thus risks server timeout. This does not happen with Firefox.
I created a simple test:
# views.py
import time
from django.views import generic
class TestEditView(generic.TemplateView):
def generator(self):
for _ in range(15):
time.sleep(1)
yield 'THIS IS {}\n'.format(_)
print('LOG: THIS IS {}\n'.format(_))
def get(self, request, *args, **kwargs):
return StreamingHttpResponse(self.generator(),
content_type="text/plain; charset=utf-8")
If I access that view in Firefox, that browser will print out 'THIS IS ....' each second for 15 seconds. But in Chrome, the browser will wait 15 seconds, then print out all of the 'THIS IS...', even though the development server log 'LOG: THIS IS...' once a second.
I wonder if there is any subtlety in this problem that I missed. Thank you.
Python: 3.6.2.
Django: 1.10.5
Changing the content_type from "text/plain" to "text/html" or removing the content_type altogether solves the problem - it makes Chrome render each chunk of text immediately after it receives.
I m trying to scrape a website that uses Ajax to load the different pages.
Although my selenium browser is navigating through all the pages, but scrapy response is still the same and it ends up scraping same response(no of pages times).
Proposed Solution :
I read in some answers that by using
hxs = HtmlXPathSelector(self.driver.page_source)
You can change the page source and then scrape. But it is not working ,also after adding this the browser also stopped navigating.
code
def parse(self, response):
self.driver.get(response.url)
pages = (int)(response.xpath('//p[#class="pageingP"]/a/text()')[-2].extract())
for i in range(pages):
next = self.driver.find_element_by_xpath('//a[text()="Next"]')
print response.xpath('//div[#id="searchResultDiv"]/h3/text()').extract()[0]
try:
next.click()
time.sleep(3)
#hxs = HtmlXPathSelector(self.driver.page_source)
for sel in response.xpath("//tr/td/a"):
item = WarnerbrosItem()
item['url'] = response.urljoin(sel.xpath('#href').extract()[0])
request = scrapy.Request(item['url'],callback=self.parse_job_contents,meta={'item': item}, dont_filter=True)
yield request
except:
break
self.driver.close()
Please Help.
When using selenium and scrapy together, after having selenium perform the click I've read the page back for scrapy using
resp = TextResponse(url=self.driver.current_url, body=self.driver.page_source, encoding='utf-8')
That would go where your HtmlXPathSelector selector line went. All the scrapy code from that point to the end of the routine would then need to refer to resp (page rendered after the click) rather than response (page rendered before the click).
The time.sleep(3) may give you issues as it doesn't guarantee the page has actually loaded, it's just an unconditional wait. It might be better to use something like
WebDriverWait(self.driver, 30).until(test page has changed)
which waits until the page you are waiting for passes a specific test, such as finding the expected page number or manufacturer's part number.
I'm not sure what the impact of closing the driver at the end of every pass through parse() is. I've used the following snippet in my spider to close the driver when the spider is closed.
def __init__(self, filename=None):
# wire us up to selenium
self.driver = webdriver.Firefox()
dispatcher.connect(self.spider_closed, signals.spider_closed)
def spider_closed(self, spider):
self.driver.close()
Selenium isn't in any way connected with scrapy, nor their response object, and in your code I don't see you changing the response object.
You'll have to work with them independently.