IndexError: string index out of range in python2.7 - python-2.7

Could someone help me out with what is going wrong and what I can do to fix it?
File "C:/Users/rawaa/raw/untitled2.py", line 246, in disp_message
str02= str02 + str01[single]
IndexError: string index out of range
def disp_message(val01, file_name):
f = open(file_name,'r')
data = f.readlines()
f.close()
str01 = str(data[0])
str02 = ' '
for each in val01:
#print each
for single in each:
#print single
str02= str02 + str01[single]
str02 = str02 + ' '
print("the hidden message in the data is:")
print(str02)
print("\n\npress 1. to see hidden data visualisation or ctrl+c to terminate")
cho = raw_input()
if cho == '1':
print(show_data(str01,1,val01))

you didn't specify the range of length of each, you should do maybe :
for each in val01:
#print each
for single in range (len(each)):
#print single
str02= str02 + str01[single]
str02 = str02 + ' '

Related

Number stored as text when converted CSV to Xlsx

I wrote a code to convert/copy CSV file into Xlsx file. It copied the data successfully but all the data stored as text.
Now it is showing an exclamation mark on each data and when it is showing "Number stored as text" .
Can anyone pls help me how to get the data in number because i want to do manipulation on data.
here is the code:
wb = Workbook()
ws = wb.active
with open(plotDir + '\\' + file, 'r') as f:
for row in csv.reader(f):
ws.append(row)
wb.save(plotDir + '\\' + file[:-4] + '.xlsx')
You can use set_value_explicit :
with open(plotDir + '\\' + plotFile, 'r') as f:
for x, row in enumerate( csv.reader(f), start=1):
for y, val in enumerate(row, start=1):
ws.cell(row=x,column=y).set_explicit_value(value=val,data_type='n') # Cell.TYPE_NUMERIC
wb.save(plotDir + '\\' + plotFile[:-4] + '.xlsx')

how to do lowercase of 'xl cell value + some content/somethng '?

here my function . I have to do lower what ever coming in newfilename..i.e newfilename.lower()
def my_function(start, end):
sheetname = 'my-sheet'
filepath = "/myxl.xlsx"
try:
work_book=xlrd.open_workbook(filepath)
except:
print 'error'
try:
worksheet = work_book.sheet_by_name(sheetname)
except:
print 'error'
rows=worksheet.nrows
cols=worksheet.ncols
success = []
fail = []
for row in xrange(start,end):
print "row no. : ",row
state = '/home/myfolder/'
if os.path.exists(state):
print "state folder exits"
else:
os.makedirs(state)
district = state + worksheet.cell_value(row,0) + '/'
if os.path.exists(district):
print "district folder exits"
else:
os.makedirs(district)
city = district + worksheet.cell_value(row,2) + '/'
if os.path.exists(city):
print "city folder exits"
else:
os.makedirs(city)
newfilename = city + worksheet.cell_value(row,4).replace (" ", "-") + '.png'
if worksheet.cell_value(row,5) !="":
oldfilename = worksheet.cell_value(row,5)
else:
oldfilename="no-image"
newfullpath = newfilename
oldfullpath = '/home/old/folder/' + oldfilename
try:
os.rename(oldfullpath,newfullpath)
success.append(row)
except Exception as e:
fail.append(row)
print "Error",e
print 'renaming done for row #' ,row , ' file ', oldfilename , ' to ', newfilename
print 'SUCCESS ', success
print 'FAIL ', fail
newfilename.lower() not working
here when I am going to use unicode error coming...
UnicodeEncodeError: 'ascii' codec can't encode character u'\u2019' in position 72: ordinal not in range(128)

Simple way to refactor this Python code to reduce repetition

I'd like help refactoring this code to reduce redundant lines/concepts. The code for this def in basically repeated 3 times.
Restrictions:
- I'm new, so a really fancy list comprehension or turning things into objects with dunders and method overrides is way to advanced for me.
- Built in modules only. This is Pyhton 2.7 code, and only imports os and re.
What the overall script does:
Finds files with a fixed prefix. The files are pipe-delimited text files. The first row is a header. It has a footer which can be 1 or more rows. Based on the prefix, the script throws away "columns" from the text file that aren't needed in another step. It saves the data, comma-separated, in a new file with a .csv extension.
The bulk of the work is done in processRawFiles(). This is what I'd like refactored, since it's wildly repetitive.
def separateTranslationTypes(translationFileList):
'''Takes in list of all files to process and find which are roomtypes
, ratecodes or sourcecodes. The type of file determines how it will be processed.'''
rates = []
rooms = []
sources = []
for afile in translationFileList:
rates.append( [m.group() for m in re.finditer('cf_ratecodeheader+(.*)', afile)] )
rooms.append( [m.group() for m in re.finditer('cf_roomtypes+(.*)', afile)] )
sources.append( [m.group() for m in re.finditer('cf_sourcecodes+(.*)', afile)] )
# empty list equates to False. So if x is True if the list is not empty - thus kept.
rates = [x[0] for x in rates if x]
rooms = [x[0] for x in rooms if x]
sources = [x[0] for x in sources if x]
print '... rateCode files :: ',rates,'\n'
print '... roomType files :: ',rooms,'\n'
print '... sourceCode files :: ',sources, '\n'
return {'rateCodeFiles':rates,
'roomTypeFiles':rooms,
'sourceCodeFiles':sources}
groupedFilestoProcess = separateTranslationTypes(allFilestoProcess)
def processRawFiles(groupedFileDict):
for key in groupedFileDict:
# Process the rateCodes file
if key == 'rateCodeFiles':
for fname_Value in groupedFileDict[key]: # fname_Value is the filename
if os.path.exists(fname_Value):
workingfile = open(fname_Value,'rb')
filedatastring = workingfile.read() # turns entire file contents to a single string
workingfile.close()
outname = 'forUpload_' + fname_Value[:-4:] + '.csv' # removes .txt of any other 3 char extension
outputfile = open(outname,'wb')
filedatalines = filedatastring.split('\n') # a list containing each line of the file
rawheaders = filedatalines[0] # 1st element of the list is the first row of the file, with the headers
parsedheaders = rawheaders.split('|') # turn the header string into a list where | was delimiter
print '\n'
print 'outname: ', outname, '\n'
# print 'rawheaders: ', rawheaders, '\n'
# print 'parsedheaders: ',parsedheaders, '\n'
# print filedatalines[0:2]
print '\n'
ratecodeindex = parsedheaders.index('RATE_CODE')
ratecodemeaning = parsedheaders.index('DESCRIPTION')
for dataline in filedatalines:
if dataline[:4] == 'LOGO':
firstuselessline = filedatalines.index(dataline)
# print firstuselessline
# ignore the first line which was the headers
# stop before the line that starts with LOGO - the first useless line
for dataline in filedatalines[1:firstuselessline-1:]:
# print dataline.split('|')
theratecode = dataline.split('|')[ratecodeindex]
theratemeaning = dataline.split('|')[ratecodemeaning]
# print theratecode, '\t', theratemeaning, '\n'
linetowrite = theratecode + ',' + theratemeaning + '\n'
outputfile.write(linetowrite)
outputfile.close()
# Process the roomTypes file
if key == 'roomTypeFiles':
for fname_Value in groupedFileDict[key]: # fname_Value is the filename
if os.path.exists(fname_Value):
workingfile = open(fname_Value,'rb')
filedatastring = workingfile.read() # turns entire file contents to a single string
workingfile.close()
outname = 'forUpload_' + fname_Value[:-4:] + '.csv' # removes .txt of any other 3 char extension
outputfile = open(outname,'wb')
filedatalines = filedatastring.split('\n') # a list containing each line of the file
rawheaders = filedatalines[0] # 1st element of the list is the first row of the file, with the headers
parsedheaders = rawheaders.split('|') # turn the header string into a list where | was delimiter
print '\n'
print 'outname: ', outname, '\n'
# print 'rawheaders: ', rawheaders, '\n'
# print 'parsedheaders: ',parsedheaders, '\n'
# print filedatalines[0:2]
print '\n'
ratecodeindex = parsedheaders.index('LABEL')
ratecodemeaning = parsedheaders.index('SHORT_DESCRIPTION')
for dataline in filedatalines:
if dataline[:4] == 'LOGO':
firstuselessline = filedatalines.index(dataline)
# print firstuselessline
# ignore the first line which was the headers
# stop before the line that starts with LOGO - the first useless line
for dataline in filedatalines[1:firstuselessline-1:]:
# print dataline.split('|')
theratecode = dataline.split('|')[ratecodeindex]
theratemeaning = dataline.split('|')[ratecodemeaning]
# print theratecode, '\t', theratemeaning, '\n'
linetowrite = theratecode + ',' + theratemeaning + '\n'
outputfile.write(linetowrite)
outputfile.close()
# Process sourceCodes file
if key == 'sourceCodeFiles':
for fname_Value in groupedFileDict[key]: # fname_Value is the filename
if os.path.exists(fname_Value):
workingfile = open(fname_Value,'rb')
filedatastring = workingfile.read() # turns entire file contents to a single string
workingfile.close()
outname = 'forUpload_' + fname_Value[:-4:] + '.csv' # removes .txt of any other 3 char extension
outputfile = open(outname,'wb')
filedatalines = filedatastring.split('\n') # a list containing each line of the file
rawheaders = filedatalines[0] # 1st element of the list is the first row of the file, with the headers
parsedheaders = rawheaders.split('|') # turn the header string into a list where | was delimiter
print '\n'
print 'outname: ', outname, '\n'
# print 'rawheaders: ', rawheaders, '\n'
# print 'parsedheaders: ',parsedheaders, '\n'
# print filedatalines[0:2]
print '\n'
ratecodeindex = parsedheaders.index('SOURCE_CODE')
ratecodemeaning = parsedheaders.index('DESCRIPTION')
for dataline in filedatalines:
if dataline[:4] == 'LOGO':
firstuselessline = filedatalines.index(dataline)
# print firstuselessline
# ignore the first line which was the headers
# stop before the line that starts with LOGO - the first useless line
for dataline in filedatalines[1:firstuselessline-1:]:
# print dataline.split('|')
theratecode = dataline.split('|')[ratecodeindex]
theratemeaning = dataline.split('|')[ratecodemeaning]
# print theratecode, '\t', theratemeaning, '\n'
linetowrite = theratecode + ',' + theratemeaning + '\n'
outputfile.write(linetowrite)
outputfile.close()
processRawFiles(groupedFilestoProcess)
Had to redo my code because there was a new incident where the files in question neither had the header row, nor the footer row. However, since the columns I want still occur in the same order I can keep them only. Also, we stop reading if any next row has fewer columns than the larger of the two indices used.
As for reducing repetition, processRawFiles contains two def's that remove the need to repeat a lot of that parsing code from before.
def separateTranslationTypes(translationFileList):
'''Takes in list of all files to process and find which are roomtypes
, ratecodes or sourcecodes. The type of file determines how it will be processed.'''
rates = []
rooms = []
sources = []
for afile in translationFileList:
rates.append( [m.group() for m in re.finditer('cf_ratecode+(.*)', afile)] )
rooms.append( [m.group() for m in re.finditer('cf_roomtypes+(.*)', afile)] )
sources.append( [m.group() for m in re.finditer('cf_sourcecodes+(.*)', afile)] )
# empty list equates to False. So if x is True if the list is not empty - thus kept.
rates = [x[0] for x in rates if x]
rooms = [x[0] for x in rooms if x]
sources = [x[0] for x in sources if x]
print '... rateCode files :: ',rates,'\n'
print '... roomType files :: ',rooms,'\n'
print '... sourceCode files :: ',sources, '\n'
return {'rateCodeFiles':rates,
'roomTypeFiles':rooms,
'sourceCodeFiles':sources}
groupedFilestoProcess = separateTranslationTypes(allFilestoProcess)
def processRawFiles(groupedFileDict):
def someFixedProcess(bFileList, codeIndex, codeDescriptionIndex):
for fname_Value in bFileList: # fname_Value is the filename
if os.path.exists(fname_Value):
workingfile = open(fname_Value,'rb')
filedatastring = workingfile.read() # turns entire file contents to a single string
workingfile.close()
outname = 'forUpload_' + fname_Value[:-4:] + '.csv' # removes .txt of any other 3 char extension
outputfile = open(outname,'wb')
filedatalines = filedatastring.split('\n') # a list containing each line of the file
# print '\n','outname: ',outname,'\n\n'
# HEADERS ARE NOT IGNORED! Since the file might not have headers.
print outname
for dataline in filedatalines:
# print filedatalines.index(dataline), dataline.split('|')
# e.g. index 13, reuires len 14, so len > index is needed
if len(dataline.split('|')) > codeDescriptionIndex:
thecode_text = dataline.split('|')[codeIndex]
thedescription_text = dataline.split('|')[codeDescriptionIndex]
linetowrite = thecode_text + ',' + thedescription_text + '\n'
outputfile.write(linetowrite)
outputfile.close()
def processByType(aFileList, itsType):
typeDict = {'rateCodeFiles' : {'CODE_INDEX': 4,'DESC_INDEX':7},
'roomTypeFiles' : {'CODE_INDEX': 1,'DESC_INDEX':13},
'sourceCodeFiles': {'CODE_INDEX': 2,'DESC_INDEX':3}}
# print 'someFixedProcess(',aFileList,typeDict[itsType]['CODE_INDEX'],typeDict[itsType]['DESC_INDEX'],')'
someFixedProcess(aFileList,
typeDict[itsType]['CODE_INDEX'],
typeDict[itsType]['DESC_INDEX'])
for key in groupedFileDict:
processByType(groupedFileDict[key],key)
processRawFiles(groupedFilestoProcess)

Read file as list, edit and write back

Say I have a textfile containing the following:
1:Programming:Adam:0
2:Math:Max:0
3:Engineering:James:0
I am trying to read this textfile as a list, then have a user specify which 0 of a line they want to change to 1, then rewrite the changes made back into textfile.
So for example if a user specifies line 2, I want the 0 in line 2 to be changed to 1 and then save the changes made back onto the textfile.
So far I have the following and I just can't get it to over write it:
class Book_list:
def __init__(self,book_ID,book_title,book_author,availability):
self.book_ID = book_ID
self.book_title = book_title
self.book_author = book_author
self.availability = availability
def __str__(self):
return ('ID: ' + self.book_ID + '\nBook_Title: ' + self.book_title +
'\nBook_author: ' + self.book_author +
'\navailability: ' + self.availability + '\n')
def __getitem__(self,book_ID):
return self.book_ID
def __getitem__(self,availability):
return self.availability
x=str(raw_input('enter line number.'))
with open('database.txt','r') as f:
lines = f.readlines()
library = []
for line in lines:
line = line.strip()
data = line.split(':')
b = Book_list(data[0],data[1],data[2],str(data[3]))
library.append(b)
for i in range (0,len(library)):
if (library[i])[0]==x and (library[i])[3]==0:
(library[i])[3]== '1'
with open('database.txt', 'w') as f:
f.writelines( library )
you can read file and store it in a string. then using split make a list from file:
str='a:b:c'
lst=str.split(':') #lst=['a','b','c']
edit as you like and then join them with .join:
str2=':'.join(lst) #str2='a:b:c'

Printing out element in the ith postion in a list in python

f=open('julyTemps.txt')
for li in f.readlines():
data = li.strip().split(' ')
print data[1]
This code give me an out of range error and the list is of length 3.
please help
with open('julyTemps.txt', 'r') as f:
for line in f:
data = line.strip().split(' ')
if len(data) > 1
print data[1]
else:
print 'this line does not split as it should:\n%s' % line