BeautifulSoup web table scraping - python-2.7

from urllib2 import urlopen, Request
from bs4 import BeautifulSoup
site = 'https://racing.hkjc.com/racing/information/English/racing/LocalResults.aspx/'
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site, headers=hdr)
res = urlopen(req)
rawpage = res.read()
page = rawpage.replace("<!-->", "")
soup = BeautifulSoup(page, "html.parser")
table = soup.find("table", {"class":"f_tac table_bd draggable"})
print (table)
this work perfectly got a table output, untill i change the url to next page there is nothing to output (None)
'https://racing.hkjc.com/racing/information/English/Racing/LocalResults.aspx?RaceDate=2020/03/14&Racecourse=ST&RaceNo=2'
please help what's wrong of the url or the code?

you must add query string to end of url:
example:
to fetch table from page 2:
site ='https://racing.hkjc.com/racing/information/English/racing/LocalResults.aspx/?RaceDate=2020/03/14&Racecourse=ST&RaceNo=2'

Related

BeautifulSoup unable to get Inner tags

I am currently trying to scrape product data off lazada.sg using bs4 in the below code.
from bs4 import BeautifulSoup
import requests
url = "https://www.lazada.sg/shop-mobiles/"
page = requests.get(url)
content = page.text #read html
soup = BeautifulSoup(content, 'html.parser')
products = soup.find_all("div", {"class" : "c16H9d"}) #find div tags containing product details
with open("test.txt", 'w') as f:
f.write(str(products))
However the output in test.txt is just [].
I found that the above class is in <div id="root">, which I extracted and got this result.
How will I be able to access the 'inner div tags'?
Here is a snippet of the page source.
Data is dynamically loaded from script tag. You can regex out and use json library to view. You will need to adjust print line presumably for 2.7
import requests, re, json, pprint
r = requests.get('https://www.lazada.sg/shop-mobiles/')
p = re.compile(r'window.pageData=(.*)<')
data = json.loads(p.findall(r.text)[0])
for item in data['mods']['listItems']:
pprint.pprint(item)
break # delete me later

Need to scrape the data using BeautifulSoup

I am in need to get the celebrity details from https://www.astrotheme.com/celestar/horoscope_celebrity_search_by_filters.php
Input: Time of birth as known only, except the world events in a profession, where I get nearby 22,822 celebrities. I am able to get the first page data, using the urllib2 and bs4
import re
import urllib2
from bs4 import BeautifulSoup
url = "https://www.astrotheme.com/celestar/horoscope_celebrity_search_by_filters.php"
data = "sexe=M|F&categorie[0]=0|1|2|3|4|5|6|7|8|9|10|11|12&connue=1&pays=-1&tri=0&x=33&y=13"
fp = urllib2.urlopen(url, data)
soup = BeautifulSoup(fp, 'html.parser')
from_div = soup.find_all('div', attrs={'class': 'titreFiche'})
for major in from_div:
name = re.findall(r'portrait">(.*?)<br/>', str(major))
link = re.findall(r'<a href="(.*?)"', str(major))
print name[0], link[0]
For the next 230 pages, I am unable to get the data. I used to change the URL as page equal to until end but I can't scrape. Is there any way to get those remaining data from that page?
you need session cookies, use requests to save session easily
from bs4 import BeautifulSoup
import requests, re
url = "https://www.astrotheme.com/celestar/horoscope_celebrity_search_by_filters.php"
searchData = {
"sexe": "M|F",
"categorie[0]": "0|1|2|3|4|5|6|7|8|9|10|11|12",
"connue": 1, "pays": -1, "tri": 0, "x": 33, "y": 13
}
session = requests.session()
def doSearch(url, data=None):
if data:
fp = session.post(url, data=data).text
else:
fp = session.get(url).text
soup = BeautifulSoup(fp, 'html.parser')
from_div = soup.find_all('div', attrs={'class': 'titreFiche'})
for major in from_div:
name = re.findall(r'portrait">(.*?)<br/>', str(major))
link = re.findall(r'<a href="(.*?)"', str(major))
print name[0], link[0]
# do Post search in first request
doSearch(url, searchData)
# we have session and we can use Get request for next page
for index in range(2, 4): # get page 2 to 3
print('getting page: %s' % index)
pageurl = '%s?page=%s' % (url, index)
print(pageurl)
doSearch(pageurl)

Beautiful Soup - Unable to scrape links from paginated pages

I'm unable to scrape the links of the articles present in the paginated webpages. Additionally I get a blank screen at times as my output. I am unable to find the problem in my loop. Also the csv file doesn't get created.
from pprint import pprint
import requests
from bs4 import BeautifulSoup
import lxml
import csv
import urllib2
def get_url_for_search_key(search_key):
for i in range(1,100):
base_url = 'http://www.thedrum.com/'
response = requests.get(base_url + 'search?page=%s&query=' + search_key +'&sorted=')%i
soup = BeautifulSoup(response.content, "lxml")
results = soup.findAll('a')
return [url['href'] for url in soup.findAll('a')]
pprint(get_url_for_search_key('artificial intelligence'))
with open('StoreUrl.csv', 'w+') as f:
f.seek(0)
f.write('\n'.join(get_url_for_search_key('artificial intelligence')))
Are you sure, that you need only first 100 pages? Maybe there's more of them...
My vision of your task below, this will collect links from all pages and also precisely catches next page button links:
import requests
from bs4 import BeautifulSoup
base_url = 'http://www.thedrum.com/search?sort=date&query=artificial%20intelligence'
response = requests.get(base_url)
soup = BeautifulSoup(response.content, "lxml")
res = []
while 1:
results = soup.findAll('a')
res.append([url['href'] for url in soup.findAll('a')])
next_button = soup.find('a', text='Next page')
if not next_button:
break
response = requests.get(next_button['href'])
soup = BeautifulSoup(response.content, "lxml")
EDIT: alternative approach for collecting only article links:
import requests
from bs4 import BeautifulSoup
base_url = 'http://www.thedrum.com/search?sort=date&query=artificial%20intelligence'
response = requests.get(base_url)
soup = BeautifulSoup(response.content, "lxml")
res = []
while 1:
search_results = soup.find('div', class_='search-results') #localizing search window with article links
article_link_tags = search_results.findAll('a') #ordinary scheme goes further
res.append([url['href'] for url in article_link_tags])
next_button = soup.find('a', text='Next page')
if not next_button:
break
response = requests.get(next_button['href'])
soup = BeautifulSoup(response.content, "lxml")
to print links use:
for i in res:
for j in i:
print(j)

how to get particular tag data from url in python from urllib2

I'm very new to python 2.7 and I have a task to read a table in the URL.
I'm getting the data from URL with table. and now the issue is, I need only data but I am getting with tags also.
Please help me. Thank you in advance.
from bs4 import BeautifulSoup
import urllib2
response = urllib2.urlopen('https://www.somewebsite.com/')
html = response.read()
soup = BeautifulSoup(html)
tabulka = soup.find("table", {"class" : "defaultTableStyle tableFontMD tableNoBorder"})
records = []
for row in tabulka.findAll('tr'):
col = row.findAll('td')
print col
you have to use .text attribute
from bs4 import BeautifulSoup
import urllib2
response = urllib2.urlopen('https://www.somewebsite.com/')
html = response.read()
soup = BeautifulSoup(html)
tabulka = soup.find("table", {"class" : "defaultTableStyle tableFontMD tableNoBorder"})
records = []
for row in tabulka.findAll('tr'):
col = row.findAll('td')
print [coli.text for coli in col]

print soup.prettify producing blank output

I have tried the following codes to extract the page url.
from bs4 import BeautifulSoup
import urllib3
http = urllib3.PoolManager()
url = 'http://www.thefamouspeople.com/singers.php'
response = http.request('GET', url)
html = response.read()
soup = BeautifulSoup(html)
print soup.prettify()
But, I was wondering its giving blank output. Am I missing something ?
You should try adding html = str(html) after html = response.read() because html = response.read() returns bytes.