print if list index out of range - python-2.7

hi all im trying to create a handle for "list index out of range" but seem not to be having any luck.
import json, urllib, re
from urllib import urlencode
import googlemaps
import tempfile
import win32api
import win32print
start = "Adelaide, South Australia"
finish = " ghkjffzh, south Australia "
url = 'http://maps.googleapis.com/maps/api/directions/json?%s' % urlencode((
('origin', start),
('destination', finish)
))
ur = urllib.urlopen(url)
result = json.load(ur)
filename = "output.txt"
with open(filename, 'w') as output:
for i in range(0, len(result['routes'][0]['legs'][0]['steps'])):
try:
s = (result['routes'][0]['legs'][0]['steps'][i]['html_instructions'])
d = (result['routes'][0]['legs'][0]['steps'][i]['distance']['text'])
l = (result['routes'][0]['legs'][0]['steps'][i]['duration']['text'])
s = re.sub('<[A-Za-z\/][^>]*>', '', s)
output.writelines(s + " " + d + " " + l + '\n')
except Exception:
print "Directions could not be printed"
output.write("Directions could not be given due to the format of page or the address type")
but nothing is written to .txt and still get error.
ive tried to replace Exception with IndexError and VauleError but no change

Solved used by exploring the returned json result and found a Status result so I passed that first.
with open(filename, 'w') as output:
if result ['status'] == "NOT_FOUND"
output.write( " no directions avalible")
else:
for i in range(0, len(result['routes'][0]['legs'][0]['steps'])):
s = (result['routes'][0]['legs'][0]['steps'][i]['html_instructions'])
d = (result['routes'][0]['legs'][0]['steps'][i]['distance']['text'])
l = (result['routes'][0]['legs'][0]['steps'][i]['duration']['text'])
s = re.sub('<[A-Za-z\/][^>]*>', '', s)
output.writelines(s + " " + d + " " + l + '\n')

Related

Timeout not working in ProcessPoolExecutor map function

I am trying to implement a timeout in my ProccessPoolExecutor but it seems that when I run the code, the timeout just doesn't work and the process continues as if the timeout didn't exist in the first place.
Here is the code:
from concurrent.futures import ProcessPoolExecutor
import angr
from angr.analyses import (
CFGFast,
Decompiler
)
from claripy import *
import os
#import ipdb
#ipdb.set_trace()
def decompile(filename):
p = angr.Project('/Path/' + filename, auto_load_libs=False, load_debug_info=True)
cfg = p.analyses[CFGFast].prep()(data_references=True, normalize=True)
for f in cfg.functions.values():
#print(f.name + " " + filename)
name = filename + " " + f.name
filepath = os.path.join('/path/'+ name)
try:
dec = p.analyses[Decompiler].prep()(f, cfg=cfg.model)
with open(filepath, "w") as f:
f.write(dec.codegen.text)
except AttributeError:
with open('/path', "a") as a:
a.write(name + "\n")
except ValueError:
with open('/path', "a") as a:
a.write(name + "\n")
except ClaripyOperationError:
with open('/path', "a") as a:
a.write(name + "\n")
except TypeError:
with open('/path', "a") as a:
a.write(name + "\n")
if __name__ == '__main__':
with ProcessPoolExecutor(4) as executor:
for result in executor.map(decompile, os.listdir('/path'), timeout=5):
print(result)
I think there must be some problem in the for loop as the result variable doesn't print. I would appreciate any help. Thanks!

Getting the connected components in networkx in the order of which edges are added

Each sentence in doc2 is displayed as a graph. Now the edges were added in the form s-o-v from the respective subject_list, object_list and verb_list.
I have tried to display the connected components. But the order in which it displays the sentence is not in the order in which the edges were added.
# This Python file uses the following encoding: utf-8
%matplotlib notebook
import codecs
import itertools
import re
import networkx as nx
import matplotlib.pyplot as pl
from matplotlib.font_manager import FontProperties
prop = FontProperties()
graph = nx.Graph()
labels = {}
each_one = []
list_of_sentences = []
subject_list = []
object_list = []
verb_list = []
newDict = {}
with codecs.open('doc2.txt', encoding='utf-8') as f:
text = f.read()
sentences = re.split(r' *[\.\?!][\'"\)\]]* *', text)
for stuff in sentences:
list_of_sentences.append(stuff)
new_list_of_sentences = []
for d in list_of_sentences:
s = d.replace(u'वतीन', '').replace(u'आनी', '').replace(u'हिणें', '').replace(',', '')
new_list_of_sentences.append(s)
f = open('doc2_tag.txt', 'r')
for line in f:
k, v = line.strip().split('/')
newDict[k.strip().decode('utf-8')] = v.strip()
f.close()
for sentence in new_list_of_sentences:
a = b = c = ""
sentence_word_list = sentence.split()
for word in sentence_word_list:
if newDict[word] == 'N-NNP':
a += word + " "
if newDict[word] == 'N-NN':
b += word + " "
if newDict[word] == 'JJ':
b += word + " "
if newDict[word] == 'QT-QTC':
b += word + " "
if newDict[word] == 'RB':
b += word + " "
if newDict[word] == 'N-NST':
b += word + " "
if newDict[word] == 'PR-PRP':
b += word + " "
if newDict[word] == 'PSP':
b += word + " "
if newDict[word] == 'CC-CCD':
b += word + " "
if newDict[word] == 'V-VM-VF':
c += word + " "
subject_list.append(a)
object_list.append(b)
verb_list.append(c)
konkani_dict = {u'सनरायझर्साक': u'सनरायझर्स', u'सनरायझर्सान': u'सनरायझर्स', u'सनरायझर्साच्या': u'सनरायझर्स'}
for idx, sub in enumerate(subject_list):
temp_list = sub.split(" ")
for i in temp_list:
if i in konkani_dict:
new_sub = sub.replace(i, konkani_dict[i])
subject_list[idx] = new_sub
for s in subject_list:
if s is not "":
graph.add_node(s)
labels[s] = s
for o in object_list:
if o is not "":
graph.add_node(o)
labels[b] = b
for v in verb_list:
if v is not "":
graph.add_node(v)
labels[v] = v
for (s, o, v) in zip(subject_list, object_list, verb_list):
if s and o is not "":
graph.add_edge(s, o)
if o and v is not "":
graph.add_edge(o, v)
pos=nx.spring_layout(graph,k=0.15,iterations=20)
nx.draw(graph, with_labels = True, font_family = "Nirmala UI", node_size = 40, font_size = 9 ,node_color = "darkblue")
pl.show()
sentences=[]
for component in nx.connected_components(graph):
g=(
filter(
lambda x: x[0] in component and x[1] in component,
graph.edges
)
)
p=[]
p= ''.join(item for tuple_ in g for item in tuple_)
print p
sentences.append(p)
print sentences
output=[]
for i in sentences:
inputWords = i.split(" ")
inputWords=inputWords[-1::-1]
output = ' '.join(inputWords)
print output
Expected output is spmething like this:
शिखर धवनान सगळ्यांत चड ४५ धांवड्यो केल्यो ,
सनरायझर्स दीपर हुडा जैतांत पर्जळ्ळो
This is the output I get: sentences displayed
networkx doesn't store the order of created nodes/edges because this information is mostly useless. If you want to have this information, you should add it manually. In your program, for example (for edges):
edge_index = 0
for (s, o, v) in zip(subject_list, object_list, verb_list):
if s and o is not "":
graph.add_edge(s, o, index=edge_index)
edge_index += 1
if o and v is not "":
graph.add_edge(o, v, index=edge_index)
edge_index += 1
Then you should print sorted edges:
sorted( # Sorted list of edges
list(g.edges.data('index')), # With 'index' data
key=lambda x: x[2] # Sorted by 'index' data
)```

"Unboundlocalerror: Local Variable "Val" Referenced before Assignment" Error

I have been trying to get my script to loop in such a way that it will load the outputs in 1 file, and then when it's done loading everything move the values to output file 2 , erase the values in output file 1 and start reloading them, then when those are down move the values into output two (overwriting the old ones) repeat.
I have been pretty successful so far and don't know what else to add to my script and am hoping someone here knows why I keep getting ""Unboundlocalerror: Local Variable "Val" Referenced before Assignment" error randomly midway through the loading process, when I have a very small input file, the script performs how I want.
Does anyone know how I can change my script to fix that error, I have tried to understand why it is happening but cannot.
I have tried to research it thoroughly but none of the suggestions I have found have worked (or I implemented them incorrectly, I have attached my script. Thanks!
import urllib2,re,urllib,urlparse,csv,sys,time,threading,codecs,shutil
from bs4 import BeautifulSoup
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
return val
while True:
ifile = open('output.csv', "w", 0)
inputs = csv.reader(open('input.csv'))
# inputs = csv.reader(codecs.open('input.csv', 'rU', 'utf-16'))
ifile.write('URL' + "," + 'Price' + "," + 'Stock' + "," + "Time" + '\n')
for i in inputs:
ifile.write(extract(i[0]))
ifile.close()
Update:
Thanks for the help guys! This is my new script:
import urllib2,re,urllib,urlparse,csv,sys,time,threading,codecs,shutil
from bs4 import BeautifulSoup
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
else:
return val
while True:
ifile = open('output.csv', "w", 0)
inputs = csv.reader(open('input.csv'))
# inputs = csv.reader(codecs.open('input.csv', 'rU', 'utf-16'))
ifile.write('URL' + "," + 'Price' + "," + 'Stock' + "," + "Time" + '\n')
for i in inputs:
val_to_write = extract(i[0])
if val_to_write:
ifile.write(val_to_write)
ifile.close()
shutil.copy('output.csv', 'output2.csv')
print("finished")
With the above script I am now getting the error: "ValueError: I/O operation on closed file". Thanks
Use try-except-else as you would only want to return val if no exception was raised (if an exception was raised then val wouldn't be assigned to when you try to return it). Another suggestion is not to use a "catch-em-all" except block.
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
else:
return val
But be warned: if an exception does occur then extract will return None and the calling code will have to take account for that, for example:
for i in inputs:
val_to_write = extract(i[0])
if val_to_write:
ifile.write(val_to_write)
ifile.close()

Python Value error

I'm getting an error when trying to split a piped output into python.
Error is need more than 3 values to unpack although I'm using 8 values
import subprocess, sys
from datetime import datetime
from time import sleep as sleep
multimon_ng = subprocess.Popen("multimon-ng -a FLEX -t wav flex.wav",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
while True:
nextline = multimon_ng.stdout.readline()
flex, mdate, mtime, bitrate, other, capcode, o2, msg = nextline.split(" ", 7) # error here
if nextline is " ":
print "napping"
else:
print mdate + " " + mtime + " " + capcode + " " + msg
multimon_ng.poll()
sys.stdout.flush()
any help would be great
3 in error message indicated length of iterable in right hand side argument.
Minimal wrong examples:
a, = [] # ValueError: need more than 0 values to unpack
a, b = [1] # ValueError: need more than 1 value to unpack
a, b, c = [1, 2] # ValueError: need more than 2 values to unpack
# etc ...
Minimal correct examples:
a, = [1]
a, b = [1, 2]
a, b, c = [1, 2, 3]
Smallest change to fix and expose issue would be to wrap unpacking iterable in try-except block.
while True:
nextline = multimon_ng.stdout.readline()
if not nextline:
print "napping"
else:
try:
flex, mdate, mtime, bitrate, other, capcode, o2, msg = nextline.split(" ", 7)
except ValueError:
print "invalid line", nextline
else:
print mdate + " " + mtime + " " + capcode + " " + msg
multimon_ng.poll()
sys.stdout.flush()
As you can see, I also moved check for empty line before unpacking. If line is empty, unpacking would also fail.

Learning defaultdict(list), Trying to make arrays of objects that have object arrays under them

My python program before is me trying to make an array of many arrays under the top like so
Some Object1
Array of objects under some object 1
Some Object2
Array of objects under this one as well.
This is the full python program I have. It seems right looking at it but the second list thinks there is only one item in it, like the append of the list doesnt append the full list. Not sure what I am doing wrong.
import os
import sys
import json
from pprint import pprint
import csv
from datetime import datetime
import numbers
import decimal
import re
import json
from datetime import timedelta
from datetime import datetime
import os
from collections import defaultdict
class SomeClass:
i = 1
name = "myname"
pointX = 232.12
pointY = 321.2
x=SomeClass()
x.name='first parent'
x.pointX = - 500
y=SomeClass()
y.name='Second Parent'
y.pointX = -1000
t=SomeClass()
t.name = 'first child of first set'
t.pointX = -113
t.pointY = -23
u=SomeClass()
u.name = "Another Name"
v=SomeClass()
v.name ="Last Name"
q = SomeClass()
q.name ='another child 2'
q.pointX = -250
q.pointY = -250
w = SomeClass()
w.name = 'Child 2'
w.pointX = 750
w.pointY = 750
def main():
parents=[x, y]
children1 = [ t, u, v]
children2 = [q , w]
d = defaultdict(list)
somevar =0
for p in parents:
if somevar==0:
d[p].append(children1)
else:
d[p].append(children2)
somevar+=1
print d
for e in d:
print "Pname: " + str(e.name) + " " + str(e.pointX) + " " + str(e.pointY) + " numc: " + str(len(d[e])) #should be len of children1 and len of children2. it is 1 both times
counter=0
for z in d[e]:
print "\t\t" + z[counter].name + " " + str(z[counter].pointX) + " " + str(z[counter].pointY)
counter+=1 #no idea how to really iterate through this, doing a z.name gave an error as it seemed like it was an array of itself
main() #kick it all off
The output:
Pname: first parent -500 321.2 numc: 1
first child of first set -113 -23
Pname: Second Parent -1000 321.2 numc: 1
another child 2 -250 -250
To me it should be showing more children, 3 for the first parent and 2 for the second?