I have the following code that I need to get absolute links from rather than relative links.
I believe I need to use urlparse and urljoin somewhere in here, but I'm just not sure where to use that.
The .csv from this code is also giving me rows like this: "/about.html" which is obviously not an link to another web page.
import urllib
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
import re
r = urllib.urlopen('https://www.census.gov/programs-surveys/popest.html')
soup = BeautifulSoup(r, "lxml")
links = []
for link in soup.findAll('a', attrs={'href': re.compile(r'(^http|.html)')}):
links.append(link.get('href'))
web_links_df = pd.DataFrame(links)
web_links_df.columns = ['web_link']
web_links_df['web_link'] = web_links_df['web_link'].apply(lambda x:
x.rstrip('/'))
url_tail = web_links_df['web_link'].apply(lambda x: x[-4:])
web_links = pd.DataFrame(web_links_df['web_link'].unique())
web_links.columns = ['web_link']
print web_links.head()
web_links.to_csv("D:/MLCV/web_links_1.csv")
Any help would be greatly appreciated. I have spent hours going through other examples on Stack but I am just not getting the correct results.
Related
I am trying to make a Friday like virtual assistant using this code
import os
from gtts import gTTS
import time
import playsound
import speech_recognition as sr
while True:
def speak(text):
tts = gTTS(text=text, lang="en")
filename = "voice.mp3"
tts.save(filename)
playsound.playsound(filename)
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
said = ""
try:
said = r.recognize_google(audio)
print(said)
except Exception as e:
print("Exception: " + str(e))
return said
text = get_audio()
if "who are you" in text:
speak(" I am Monday the virtual assistant")
And i was wondering how to put wolfram alpha in it so i would, say search for ..., then it would speak the answer from wolfram alpha.
Any help would be amazing :)
Install wolframalpha
Then add the following to your code:
import wolframalpha
if 'search for ' in text:
text = text.replace("search for ", "")
client = wolframalpha.Client(app_id)
res = client.query(text)
print(next(res.results).text)
speak(next(res.results).text)
To use the API, you have to go to the homepage, sign up for an account, create an app and get an app id.
To avoid getting any errors, keep the indentation in your 'speak' function uniform.
I am using selenium to go to a website and then go to the search button type a zipcode which I am entering beforehand and then for that zip code I want the link that the webpage has to feed my web scraper created using beautiful soup and once the link comes up I can scrape required data to get my csv.
What I want:
I am having trouble getting that link to the beautiful soup URL. I basically want to automate it so that I just have to enter a zip code and it gives me my CSV.
What I am able to get:
I am able to enter the zip code and search using selenium and then add that url to my scraper to give csv.
Code I am using for selenium :
driver = webdriver.Chrome('/Users/akashgupta/Desktop/Courses and Learning/Automating Python and scraping/chromedriver')
driver.get('https://www.weather.gov/')
messageField = driver.find_element_by_xpath('//*[#id="inputstring"]')
messageField.click()
messageField.send_keys('75252')
time.sleep(3)
showMessageButton = driver.find_element_by_xpath('//*[#id="btnSearch"]')
showMessageButton.click()
#web scraping Part:
url="https://forecast.weather.gov/MapClick.php?lat=32.99802500000004&lon=-96.79775499999994#.Xo5LnFNKgWo"
res= requests.get(url)
soup=BeautifulSoup(res.content,'html.parser')
tag=soup.find_all('div',id='seven-day-forecast-body')
weekly=soup.find_all(class_='tombstone-container')
main=soup.find_all(class_='period-name')
description=soup.find_all(class_='short-desc')
temp=soup.find_all(class_='temp')
Period_Name=[]
Desc=[]
Temp=[]
for a in range(0,len(main)):
Period_Name.append(main[a].get_text())
Desc.append(description[a].get_text())
Temp.append(temp[a].get_text())
df = pd.DataFrame(list(zip(Period_Name, Desc,Temp)),columns =['Period_Name', 'Short_Desc','Temperature'])
from selenium import webdriver
import time
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
driver = webdriver.Chrome('chromedriver.exe')
driver.get('https://www.weather.gov/')
messageField = driver.find_element_by_xpath('//*[#id="inputstring"]')
messageField.click()
messageField.send_keys('75252')
time.sleep(3)
showMessageButton = driver.find_element_by_xpath('//*[#id="btnSearch"]')
showMessageButton.click()
WebDriverWait(driver, 10).until(EC.url_contains("https://forecast.weather.gov/MapClick.php")) # here you are waiting until url will match your output pattern
currentURL = driver.current_url
print(currentURL)
time.sleep(3)
driver.quit()
#web scraping Part:
res= requests.get(currentURL)
....
I am pulling PNG images from Jupyter Notebooks and manage to display with IPython.display.Image but not with matplotib.pyplot.plt. What am I missing? I use python 2.7.
I am using the following algorithm:
To open the notebook JSON content I do:
import nbformat
notebook_ = nbformat.read(file_notebook, 4)
After retrieving the relevant cell information I pull the png information from it using:
def cell_to_image(cell, out_value_item_number=1):
if "execution_count" in cell.keys(): # i.e version >=4
return cell["outputs"][out_value_item_number]['data']['image/png']
elif "prompt_number" in cell.keys(): # i.e version < 4
return cell["outputs"][out_value_item_number]['png']
return None
cell_image = cell_to_image(cell)
The first few characters of cell_image (which is unicode) looks like:
iVBORw0KGgoAAAANSUhEUgAAA64AAAFMCAYAAADLFeHSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\n
AAALEgAACxIB0t1+/AAAIABJREFUeJzs3Xd8jef/x/HXyTjZiYQkCGrU3ruR0tr9oq2qGtGo0dbe
\nm5pVlJpFUSMoVb6UoEZ/lCpatWuPUiNEEiMDmef3R75OexonJKUO3s/HI4/mXPd1X/d1f+LRR965
\n7/u6DSaTyYSIiIiIiIiIjbJ70hMQERERERERyYiCq4iIiIiIiNg0BVcRERERERGxaQquIiIiIiIi
\nYtMUXEVERERERMSmKbiKiIiIiIiITVNwFRGRxyIkJIRixYqxfv36+24/e/YsxYoVo3jx4v/yzGxb
\naGgoderUIS4uDoBdu3bRsmVLKlasyCuvvMKgQYOIjo622CcsLIyGDRtSunRp6tSpw8KFC62OW7p0
\naRo2bJju53Lnzh1GjRrFyy+/TNmyZWnRogW//fbbQ835q6++olGjRpQvX5769eszc+ZMkpOTzdtT
\nU1OZNGkSNWrUoHTp0jRp0oTdu3enGyc2NpZOn
I can easily plot in my Jupityer notebook using
from IPython.display import Image
Image(cell_image)
And now to my question:
How can I manipulate cell_image to be plt.subplot friendly?
(Assuming import matplotlib.pyplot as plt).
I realise that plt.imshow wouldn't work because this would require an array, which is not my case (which is a string, as far as I understand).
If you have your image string representation in a variable string_rep, the following code should work.
from io import BytesIO
import matplotlib.image as mpimage
import matplotlib.pyplot as plt
with BytesIO(string_rep.decode('base64')) as byte_rep:
image = mpimage.imread(byte_rep)
plt.imshow(image)
I wanted to create a database with commonly used words. Right now when I run this script it works fine but my biggest issue is I need all of the words to be in one column. I feel like what I did was more of a hack than a real fix. Using Beautifulsoup, can you print everything in one column without having extra blank lines?
import requests
import re
from bs4 import BeautifulSoup
#Website you want to scrap info from
res = requests.get("https://github.com/first20hours/google-10000-english/blob/master/google-10000-english-usa.txt")
# Getting just the content using bs4
soup = BeautifulSoup(res.content, "lxml")
# Creating the CSV file
commonFile = open('common_words.csv', 'wb')
# Grabbing the lines you want
for node in soup.findAll("tr"):
# Getting just the text and removing the html
words = ''.join(node.findAll(text=True))
# Removing the extra lines
ID = re.sub(r'[\t\r\n]', '', words)
# Needed to add a break in the line to make the rows
update = ''.join(ID)+'\n'
# Now we add this to the file
commonFile.write(update)
commonFile.close()
How about this?
import requests
import csv
from bs4 import BeautifulSoup
f = csv.writer(open("common_words.csv", "w"))
f.writerow(["common_words"])
#Website you want to scrap info from
res = requests.get("https://github.com/first20hours/google-10000-english/blob/master/google-10000-english-usa.txt")
# Getting just the content using bs4
soup = BeautifulSoup(res.content, "lxml")
words = soup.select('div[class=file] tr')
for i in range(len(words)):
word = words[i].text
f.writerow([word.replace('\n', '')])
I'm trying to extract features from a text document. Here is my code:
import sklearn
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
files = sklearn.datasets.load_files('/home/niyas/Documents/project/container', shuffle = False)
vectorizer = CountVectorizer(min_df=1)
X = vectorizer.fit_transform(files.data[1])
Y=vectorizer.get_feature_names()
I'm getting an error "ValueError: empty vocabulary; perhaps the documents only contain stop words". The code works fine when I pass a string with the exact same content of the text doc.
Help me. Thanks in advance.