Here's my code:
import networkx as nx
import matplotlib.pyplot as plt
fh =open('one.txt', 'r')
G=nx.read_edgelist(fh, nodetype=int)
fh.close()
print nx.info(G)
nx.draw(G)
plt .show()
But using it I get the following errors:
Traceback (most recent call last): File "graphDeBruijn.py", line 5,
in <module> G=nx.read_edgelist(fh, nodetype=int) File "<decorator-gen-286>", line 2,
in read_edgelist File "C:\PYTHON27\lib\site-packages\networkx-2.0.dev20161201181419-py2.7.egg\networkx\utils\decorators.py", line 221,
in _open_file result = func(*new_args, **kwargs) File "C:\PYTHON27\lib\site-packages\networkx-2.0.dev20161201181419-py2.7.egg\networkx\readwrite\edgelist.py", line 374,
in read_edgelist data=data) File "C:\PYTHON27\lib\site-packages\networkx-2.0.dev20161201181419-py2.7.egg\networkx\readwrite\edgelist.py", line 255,
in parse_edgelist for line in lines: File "C:\PYTHON27\lib\site-packages\networkx-2.0.dev20161201181419-py2.7.egg\networkx\readwrite\edgelist.py", line 371,
in <genexpr> lines = (line.decode(encoding) for line in path) File "C:\PYTHON27\lib\encodings\utf_8.py", line 16,
in decode return codecs.utf_8_decode(input, errors, True) UnicodeDecodeError: 'utf8' codec can't decode byte 0xff in position 0: invalid start byte
Can anyone help me? Thanks!
Related
HI~ I want to query xml data from Oracle db with cx_Oracle, but it doesn't work with Ora-19011 error message. I think size of query data is larger than string buffer, but i don't know how to solve this problem
Oracle DB is an external DB and it's not my own DB, So i can't access directly. Therefore, I want to fix my problem on my code and print query data on python terminal.
(my software version)
windows 10 64bit
python 2.7 64bit
oracle-instant client 19.3 64bit
cx_oracle 7.2.2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cx_Oracle
import sys
import csv
import codecs
printHeader = True
conn = cx_Oracle.connect('id/passwd#ip:port/orcl')
print(conn.version)
curs = conn.cursor()
curs.execute('SELECT * FROM tablename')
for record in curs:
print(record)
Error occured at line 18(for record in curs) and here are error messages.
11.2.0.4.0
We've got an error while stopping in unhandled exception: <class 'cx_Oracle.DatabaseError'>.
Traceback (most recent call last):
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\pydevd.py", line 1740, in do_stop_on_unhandled_exception
self.do_wait_suspend(thread, frame, 'exception', arg, is_unhandled_exception=True)
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\pydevd.py", line 1615, in do_wait_suspend
with self._threads_suspended_single_notification.notify_thread_suspended(thread_id, stop_reason):
File "C:\Python27\lib\contextlib.py", line 17, in __enter__
return self.gen.next()
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\pydevd.py", line 360, in notify_thread_suspended
with AbstractSingleNotificationBehavior.notify_thread_suspended(self, thread_id, stop_reason):
File "C:\Python27\lib\contextlib.py", line 17, in __enter__
return self.gen.next()
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\pydevd.py", line 308, in notify_thread_suspended
self.send_suspend_notification(thread_id, stop_reason)
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\pydevd.py", line 354, in send_suspend_notification
py_db.writer.add_command(py_db.cmd_factory.make_thread_suspend_single_notification(py_db, thread_id, stop_reason))
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\_pydevd_bundle\pydevd_net_command_factory_json.py", line 309, in make_thread_suspend_single_notification
return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, event, is_json=True)
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\_vendored\pydevd\_pydevd_bundle\pydevd_net_command.py", line 57, in __init__
text = json.dumps(as_dict)
File "C:\Python27\lib\json\__init__.py", line 244, in dumps
return _default_encoder.encode(obj)
File "C:\Python27\lib\json\encoder.py", line 207, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Python27\lib\json\encoder.py", line 270, in iterencode
return _iterencode(o, 0)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xb9 in position 11: invalid start byte
Traceback (most recent call last):
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\ptvsd_launcher.py", line 43, in <module>
main(ptvsdArgs)
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\__main__.py", line 432, in main
run()
File "c:\Users\goo41\.vscode\extensions\ms-python.python-2019.8.30787\pythonFiles\lib\python\ptvsd\__main__.py", line 316, in run_file
runpy.run_path(target, run_name='__main__')
File "C:\Python27\lib\runpy.py", line 252, in run_path
return _run_module_code(code, init_globals, run_name, path_name)
File "C:\Python27\lib\runpy.py", line 82, in _run_module_code
mod_name, mod_fname, mod_loader, pkg_name)
File "C:\Python27\lib\runpy.py", line 72, in _run_code
exec code in run_globals
File "c:\PythonWorkspace\oraclePrc\test1.py", line 18, in <module>
for record in curs:
cx_Oracle.DatabaseError: ORA-19011: Character string buffer too small
When you connect to the database, try using this code instead:
conn = cx_Oracle.connect('id/passwd#ip:port/orcl', encoding="UTF-8", nencoding="UTF-8")
This will ensure that you are using a universal encoding -- which may eliminate the first error, and possibly the second as well. If not, adjust the code sample and error messages noted above.
I have Arabic datasets for classification using Python; two directories (negative and positive) in a Twitter directory.
I want to use Python classes to classify the data. When I run the attached code, this error occurs:
>
File "C:\Users\DEV2016\Anaconda2\lib\encodings\utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xc7 in position 0: invalid continuation byte
import sklearn.datasets
import sklearn.metrics
import sklearn.cross_validation
import sklearn.svm
import sklearn.naive_bayes
import sklearn.neighbors
dir_path = "E:\Twitter\Twitter"
# Loading files into memory
files = sklearn.datasets.load_files(dir_path)
# Calculating BOW
count_vector = sklearn.feature_extraction.text.CountVectorizer()
word_counts=count_vector.fit_transform(files.data)
# Calculating TFIDF
tf_transformer = sklearn.feature_extraction.text.TfidfTransformer(use_idf=True).fit(word_counts)
X = tf_transformer.transform(word_counts)
# Create classifier
# clf = sklearn.naive_bayes.MultinomialNB()
# clf = sklearn.svm.LinearSVC()
n_neighbors = 11
weights = 'distance'
clf = sklearn.neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
# Test the classifier
# Train-test split
test_size=0.4
X_train, X_test, y_train, y_test = sklearn.cross_validation.train_test_split(X, files.target, test_size=test_size)
# Test classifier
clf.fit(X_train, y_train)
y_predicted = clf.predict(X_test)
print (sklearn.metrics.classification_report(y_test, y_predicted,
target_names=files.target_names))
print ('Confusion Matrix:')
print (sklearn.metrics.confusion_matrix(y_test, y_predicted))
Traceback
File "<ipython-input-19-8ea269fd9c3d>", line 1, in <module>
runfile('C:/Users/DEV2016/.spyder/clf.py', wdir='C:/Users/DEV2016/.spyder')
File "C:\Users\DEV2016\Anaconda2\lib\site-
packages\spyder\utils\site\sitecustomize.py", line 705, in runfile
execfile(filename, namespace)
File "C:\Users\DEV2016\Anaconda2\lib\site-
packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/Users/DEV2016/.spyder/clf.py", line 18, in <module>
word_counts=count_vector.fit_transform(files.data)
File "C:\Users\DEV2016\Anaconda2\lib\site-
packages\sklearn\feature_extraction\text.py", line 869, in fit_transform
self.fixed_vocabulary_)
File "C:\Users\DEV2016\Anaconda2\lib\site-
packages\sklearn\feature_extraction\text.py", line 792, in _count_vocab
for feature in analyze(doc):
File "C:\Users\DEV2016\Anaconda2\lib\site-
packages\sklearn\feature_extraction\text.py", line 266, in <lambda>
tokenize(preprocess(self.decode(doc))), stop_words)
File "C:\Users\DEV2016\Anaconda2\lib\site-
packages\sklearn\feature_extraction\text.py", line 116, in decode
doc = doc.decode(self.encoding, self.decode_error)
File "C:\Users\DEV2016\Anaconda2\lib\encodings\utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xc7 in position 0:
invalid continuation byte
In the Twitter data you are trying to load, there are characters that are not recognized by utf-8. Try to load it with other encoding formats like
files = sklearn.datasets.load_files(dir_path, encoding="iso-8859-1")
I am trying to load a .mrc file in python. But, I am getting error.
Code(ReadMrcTest.py)::
import numpy as np
import h5py
imgPath = 'usr/Task1/emd-62.mrc' #Even for 'usr/task1/emd-62.mrc', same error
image = h5py.File(imgPath)
print image
Error:
Traceback (most recent call last):
File "ReadMrcTest.py", line 8, in <module>
image = h5py.File(imgPath)
File "/usr/s029/.local/lib/python2.7/site-packages/h5py/_hl/files.py", line 271, in __init__
fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
File "/usr/s029/.local/lib/python2.7/site-packages/h5py/_hl/files.py", line 126, in make_fid
fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper (/tmp/pip-nCYoKW-build/h5py/_objects.c:2840)
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper (/tmp/pip-nCYoKW-build/h5py/_objects.c:2798)
File "h5py/h5f.pyx", line 98, in h5py.h5f.create (/tmp/pip-nCYoKW-build/h5py/h5f.c:2284)
**IOError: Unable to create file (Unable to open file: name = '/usr/task1/emd-62.mrc', errno = 17, error message = 'file exists', flags = 15, o_flags = c2)**
Thanks in advance
def voigt_PD(x,y0,xc,A,wG,wL):
def integconvo(t,x1,xc1,wG1,wL1):
return exp(-t**2)/((sqrt(log(2))*wL1/wG1)**2+ ((sqrt(4*log(2))*(x1- xc1)/wG1)-t)**2)
return y0+(A*(2*log(2)/pi**1.5)*(wL/wG**2)* quad(integconvo,-inf,inf,args=(x,xc,wG,wL)))
I am defining this voigt model function to fit a curve but I am getting an error stating that "Supplied function does not return a valid float". The following is the fitting routine and followed by the error. Can anyone help to find the mistake ? Thanks in advance
import pylab
from lmfit import Model
from readdatafile import readdatafile
from voigt_PD import voigt_PD
X,Y = readdatafile('data.dat')
gmod = Model(voigt_PD)
result = gmod.fit(Y, x=X,y0=0,xc=0,A=0.1,wG=0.5,wL=0.5)
print(result.fit_report())
pylab.plot(X, Y, 'bo')
pylab.plot(X, result.best_fit, 'r-')
pylab.show()
Then it gives
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 685, in runfile
execfile(filename, namespace)
File "C:\Python27\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 71, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "E:/programming/python scripts/test5.py", line 29, in <module>
result = gmod.fit(Y, x=X,y0=0,xc=0,A=0.1,wG=0.5,wL=0.5)
File "build\bdist.win32\egg\lmfit\model.py", line 542, in fit
File "build\bdist.win32\egg\lmfit\model.py", line 746, in fit
File "build\bdist.win32\egg\lmfit\model.py", line 408, in eval
File "voigt_PD.py", line 13, in voigt_PD
return y0+(A*(2*log(2)/pi**1.5)*(wL/wG**2)* (quad(integconvo,-inf,inf,args=(x,xc,wG,wL)))[0])
File "C:\Python27\lib\site-packages\scipy\integrate\quadpack.py", line 311, in quad
points)
File "C:\Python27\lib\site-packages\scipy\integrate\quadpack.py", line 378, in _quad
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
quadpack.error: Supplied function does not return a valid float.
I write a graphml file using nx.write_graphml but when i try to read it back i get the folllowing error
>>> g = nx.DiGraph()
>>> g = nx.read_graphml('cit89.graphml')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 2, in read_graphml
File "/usr/local/lib/python2.7/dist-packages/networkx/utils/decorators.py", line 220, in _open_file
result = func(*new_args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/networkx/readwrite/graphml.py", line 153, in read_graphml
glist=list(reader(path=path))
File "/usr/local/lib/python2.7/dist-packages/networkx/readwrite/graphml.py", line 405, in __call__
yield self.make_graph(g, keys, defaults)
File "/usr/local/lib/python2.7/dist-packages/networkx/readwrite/graphml.py", line 431, in make_graph
self.add_node(G, node_xml, graphml_keys)
File "/usr/local/lib/python2.7/dist-packages/networkx/readwrite/graphml.py", line 456, in add_node
node_id = self.node_type(node_xml.get("id"))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 1-2: ordinal not in range(128)