-2147221231, 'ClassFactory cannot supply requested class', None, None - python-2.7

I am using the following code to reclassify multiple raster at once using python. after the "GP = win32com.client.Dispatch("esriGeoprocessing.GPDispatch.1")" line I receive this error:
Traceback (most recent call last):
File "<pyshell#12>", line 1, in <module>
GP = win32com.client.Dispatch("esriGeoprocessing.GPDispatch.1")
File "C:\Python27\ArcGIS10.1\lib\site-packages\win32com\client\__init__.py", line 95, in Dispatch
dispatch, userName = dynamic._GetGoodDispatchAndUserName(dispatch,userName,clsctx)
File "C:\Python27\ArcGIS10.1\lib\site-packages\win32com\client\dynamic.py", line 108, in _GetGoodDispatchAndUserName
return (_GetGoodDispatch(IDispatch, clsctx), userName)
File "C:\Python27\ArcGIS10.1\lib\site-packages\win32com\client\dynamic.py", line 85, in _GetGoodDispatch
IDispatch = pythoncom.CoCreateInstance(IDispatch, None, clsctx, pythoncom.IID_IDispatch)
com_error: (-2147221231, 'ClassFactory cannot supply requested class', None, None)
Does anyone know what the problem can be? ALso under reclassTable I am not using a .dbf but a .csv excel file. Could that be the reason? I don't know how to make a .dbf file.
Thanks ahead for any much needed help/
Kristin
code
inputDir = "c:\\tmp\\gis\\rasterReclass\\" # where all the rasters are located
outputDir = "c:\\tmp\\gis\\rasterReclass\\" # where the output rasters are to be saved
outputPrefix = "R_" # prefix of the output rasters
reclassTable = r"c:\tmp\gis\rasterReclass\reclassTable.dbf" # the reclass data table
fieldRasterName = "RASTERNAME" # column with the name of the raster
fieldRasterThreshold = "THRESHOLD" # column with the threshold value
import win32com.client, sys
GP = win32com.client.Dispatch("esriGeoprocessing.GPDispatch.1")
GP.RefreshCatalog(inputDir)
GP.RefreshCatalog(outputDir)
total = 0
ok = 0
tableRows = GP.SearchCursor(reclassTable)
tableRow = tableRows.Next()
while tableRow:
print ""
total = total + 1
rasterName = tableRow.GetValue(fieldRasterName)
threshold = tableRow.GetValue(fieldRasterThreshold)
sourceRaster = inputDir + rasterName
print "Processing " + rasterName + " with threshold value " + str(threshold)
if GP.Exists(sourceRaster):
expression = "SetNull(\"" + sourceRaster + "\" < " + str(threshold) + ", 1)"
outputRaster = outputDir + outputPrefix + rasterName
try:
GP.SingleOutputMapAlgebra(expression, outputRaster)
print "... done"
ok = ok + 1
except:
print "... " + rasterName + " failed"
else:
print rasterName + " does not exists"
tableRow = tableRows.Next()
print "--------------------------"
print str(ok) + " out of " + str(total) + " rasters sucessfully reclassified !"

Related

"Unboundlocalerror: Local Variable "Val" Referenced before Assignment" Error

I have been trying to get my script to loop in such a way that it will load the outputs in 1 file, and then when it's done loading everything move the values to output file 2 , erase the values in output file 1 and start reloading them, then when those are down move the values into output two (overwriting the old ones) repeat.
I have been pretty successful so far and don't know what else to add to my script and am hoping someone here knows why I keep getting ""Unboundlocalerror: Local Variable "Val" Referenced before Assignment" error randomly midway through the loading process, when I have a very small input file, the script performs how I want.
Does anyone know how I can change my script to fix that error, I have tried to understand why it is happening but cannot.
I have tried to research it thoroughly but none of the suggestions I have found have worked (or I implemented them incorrectly, I have attached my script. Thanks!
import urllib2,re,urllib,urlparse,csv,sys,time,threading,codecs,shutil
from bs4 import BeautifulSoup
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
return val
while True:
ifile = open('output.csv', "w", 0)
inputs = csv.reader(open('input.csv'))
# inputs = csv.reader(codecs.open('input.csv', 'rU', 'utf-16'))
ifile.write('URL' + "," + 'Price' + "," + 'Stock' + "," + "Time" + '\n')
for i in inputs:
ifile.write(extract(i[0]))
ifile.close()
Update:
Thanks for the help guys! This is my new script:
import urllib2,re,urllib,urlparse,csv,sys,time,threading,codecs,shutil
from bs4 import BeautifulSoup
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
else:
return val
while True:
ifile = open('output.csv', "w", 0)
inputs = csv.reader(open('input.csv'))
# inputs = csv.reader(codecs.open('input.csv', 'rU', 'utf-16'))
ifile.write('URL' + "," + 'Price' + "," + 'Stock' + "," + "Time" + '\n')
for i in inputs:
val_to_write = extract(i[0])
if val_to_write:
ifile.write(val_to_write)
ifile.close()
shutil.copy('output.csv', 'output2.csv')
print("finished")
With the above script I am now getting the error: "ValueError: I/O operation on closed file". Thanks
Use try-except-else as you would only want to return val if no exception was raised (if an exception was raised then val wouldn't be assigned to when you try to return it). Another suggestion is not to use a "catch-em-all" except block.
def extract(url):
try:
sys.stdout.write('0')
# global file
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
product = soup.find("div", {"class": "js-product-price"})
price = product.findNext('div',{'class': 'js-price-display'}).getText().strip()
oos = product.findNext('p', attrs={'class': "price-oos"})
if oos is None:
oos = 'In Stock'
else:
oos = oos.getText()
val = url + "," + price + "," + oos + "," + time.ctime() + '\n'
# ifile.write(val)
sys.stdout.write('1')
except Exception as e:
print e
else:
return val
But be warned: if an exception does occur then extract will return None and the calling code will have to take account for that, for example:
for i in inputs:
val_to_write = extract(i[0])
if val_to_write:
ifile.write(val_to_write)
ifile.close()

Python Value error

I'm getting an error when trying to split a piped output into python.
Error is need more than 3 values to unpack although I'm using 8 values
import subprocess, sys
from datetime import datetime
from time import sleep as sleep
multimon_ng = subprocess.Popen("multimon-ng -a FLEX -t wav flex.wav",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
while True:
nextline = multimon_ng.stdout.readline()
flex, mdate, mtime, bitrate, other, capcode, o2, msg = nextline.split(" ", 7) # error here
if nextline is " ":
print "napping"
else:
print mdate + " " + mtime + " " + capcode + " " + msg
multimon_ng.poll()
sys.stdout.flush()
any help would be great
3 in error message indicated length of iterable in right hand side argument.
Minimal wrong examples:
a, = [] # ValueError: need more than 0 values to unpack
a, b = [1] # ValueError: need more than 1 value to unpack
a, b, c = [1, 2] # ValueError: need more than 2 values to unpack
# etc ...
Minimal correct examples:
a, = [1]
a, b = [1, 2]
a, b, c = [1, 2, 3]
Smallest change to fix and expose issue would be to wrap unpacking iterable in try-except block.
while True:
nextline = multimon_ng.stdout.readline()
if not nextline:
print "napping"
else:
try:
flex, mdate, mtime, bitrate, other, capcode, o2, msg = nextline.split(" ", 7)
except ValueError:
print "invalid line", nextline
else:
print mdate + " " + mtime + " " + capcode + " " + msg
multimon_ng.poll()
sys.stdout.flush()
As you can see, I also moved check for empty line before unpacking. If line is empty, unpacking would also fail.

Python, WindowsError: [Error 32], file being used by another process

I'm writing a small programm that's supposed to loop through a folder of msg files (i.e. MS Outlook emails) and prefix a short string to their file names. I keep running into WindowsError: [Error 32] (The process cannot access the file because it is being used by another process) on line 33 (os.rename(filenameOLD, filenameNEW)). Any idea why?
import win32com.client
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
import os
path = 'C:\Users\MyName\Desktop\SomeFile\\'
msgFiles = os.listdir(path) # returns list
count = 0
for msgFile in msgFiles:
count = count + 1
msg = outlook.OpenSharedItem(path + msgFile)
date = str(msg.SentOn)
#Extract YYYY, MM, DD, HHMMSS from .msg sent date
YYYY = str(20)+date.split("/")[2][:2]
MM = date.split("/")[1]
DD = date.split("/")[0]
HHMMSS = "".join(date.split()[1].split(":")) + "Hrs"
#Reformat date to valid file name
filenamePrefix = YYYY + DD + MM + " " + HHMMSS + " "
#generate new file name
filenameOLD = path + msgFile
filenameNEW = path + filenamePrefix + msgFile
#rename file
os.rename(filenameOLD, filenameNEW)
print count, "files renamed"
You've opened the message without closing it. Instead do:
# ...
for msgFile in msgFiles:
count = count + 1
msg = outlook.OpenSharedItem(path + msgFile)
date = str(msg.SentOn)
del msg
# ...

python - ZeroDivisionError

I created a script which copy data to specific location. What i tried to do is print a results via progress-bar. I tried to use package : -> https://pypi.python.org/pypi/progressbar2
Here is my code:
src = raw_input("Enter source disk location: ")
src = os.path.abspath(src)
dst = raw_input("Enter first destination to copy: ")
dst = os.path.abspath(dst)
dest = raw_input("Enter second destination to move : ")
dest = os.path.abspath(dest)
for dir, dirs, files in os.walk(src):
if any(f.endswith('.mdi') for f in files):
dirs[:] = [] # do not recurse into subdirectories
continue # ignore this directory
files = [os.path.join(dir, f) for f in files]
progress, progress_maxval = 0, len(files) pbar = ProgressBar(widgets=['Progress ', Percentage(), Bar(), ' ', ETA(), ],maxval=progress_maxval).start()
debug_status = ''
for list in files:
part1 = os.path.dirname(list)
part2 = os.path.dirname(os.path.dirname(part1))
part3 = os.path.split(part1)[1]
path_miss1 = os.path.join(dst, "missing_mdi")
# ---------first location-------------------#
path_miss = os.path.join(path_miss1, part3)
# ---------second location-------------------#
path_missing = os.path.join(dest, "missing_mdi")
try:
# ---------first location-------------------#
if not os.path.exists(path_miss):
os.makedirs(path_miss)
else:
pass
if os.path.exists(path_miss):
distutils.dir_util.copy_tree(part1, path_miss)
else:
debug_status += "missing_file\n"
pass
if (get_size(path_miss)) == 0:
os.rmdir(path_miss)
else:
pass
# ---------second location-------------------#
if not os.path.exists(path_missing):
os.makedirs(path_missing)
else:
pass
if os.path.exists(path_missing):
shutil.move(part1, path_missing)
else:
debug_status += "missing_file\n"
if (get_size(path_missing)) == 0:
os.rmdir(path_missing)
else:
pass
except Exception:
pass
finally:
progress += 1
pbar.update(progress)
pbar.finish()
print debug_status
When i tried to execute it i got error and My Traceback is below:
Traceback (most recent call last):
File "<string>", line 254, in run_nodebug
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\CopyClass.py", in <module>
pbar = ProgressBar(widgets=['Progress ', Percentage(), Bar(), ' ', ETA(),],maxval=progress_maxval).start()
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\__init__.py", in start
self.update(0)
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\__init__.py", line 283, in update
self.fd.write(self._format_line() + '\r')
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\__init__.py", line 243, in _format_line
widgets = ''.join(self._format_widgets())
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\__init__.py", line 223, in _format_widgets
widget = format_updatable(widget, self)
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\widgets.py", in format_updatable
if hasattr(updatable, 'update'): return updatable.update(pbar)
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\widgets.py", in update
return '%3d%%' % pbar.percentage()
File "C:\Users\kostrzew\Desktop\REPORTS\ClassCopy\progressbar\__init__.py", line 208, in percentage
return self.currval * 100.0 / self.maxval
ZeroDivisionError: float division by zero
I know that there is a problem with "maxval=progress_maxval" because it can't be devided by zero.
My qestion is ,how to change it? Should i create exception to ignore zero ? How to do it ?
I think inside the ProgressBar its trying divide to zero. It calculates like this:
max_value - 100%
progress_value - x and from this formula if we find x? will be this:
x = (100 * progress_value) / max_value
for this solution set 1 instead of 0 for max_value.

print if list index out of range

hi all im trying to create a handle for "list index out of range" but seem not to be having any luck.
import json, urllib, re
from urllib import urlencode
import googlemaps
import tempfile
import win32api
import win32print
start = "Adelaide, South Australia"
finish = " ghkjffzh, south Australia "
url = 'http://maps.googleapis.com/maps/api/directions/json?%s' % urlencode((
('origin', start),
('destination', finish)
))
ur = urllib.urlopen(url)
result = json.load(ur)
filename = "output.txt"
with open(filename, 'w') as output:
for i in range(0, len(result['routes'][0]['legs'][0]['steps'])):
try:
s = (result['routes'][0]['legs'][0]['steps'][i]['html_instructions'])
d = (result['routes'][0]['legs'][0]['steps'][i]['distance']['text'])
l = (result['routes'][0]['legs'][0]['steps'][i]['duration']['text'])
s = re.sub('<[A-Za-z\/][^>]*>', '', s)
output.writelines(s + " " + d + " " + l + '\n')
except Exception:
print "Directions could not be printed"
output.write("Directions could not be given due to the format of page or the address type")
but nothing is written to .txt and still get error.
ive tried to replace Exception with IndexError and VauleError but no change
Solved used by exploring the returned json result and found a Status result so I passed that first.
with open(filename, 'w') as output:
if result ['status'] == "NOT_FOUND"
output.write( " no directions avalible")
else:
for i in range(0, len(result['routes'][0]['legs'][0]['steps'])):
s = (result['routes'][0]['legs'][0]['steps'][i]['html_instructions'])
d = (result['routes'][0]['legs'][0]['steps'][i]['distance']['text'])
l = (result['routes'][0]['legs'][0]['steps'][i]['duration']['text'])
s = re.sub('<[A-Za-z\/][^>]*>', '', s)
output.writelines(s + " " + d + " " + l + '\n')