Local file from Flask Werkzeug file not working - flask
I tried to run my program using Flask the file local from the Werkzeug file is not working. It has errors and I have no idea how to fix it so it works. Here is a copy of the code from the file. Does anyone know what the file is supposed to look like or knows how to correct it. It is the last step I need to get my program working. I tried to reinstall flask and it didn't work. File is still the same.
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import copy
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
#property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Every time the manager cleans up,
it will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
#manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
#implements_bool
import os
import time
import uuid
import logging
import subprocess
from distutils.spawn import find_executableclass LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instantiated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__', '__wrapped__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
if callable(local) and not hasattr(local, '__release_local__'):
# "local" is a callable that is not an instance of Local or
# LocalManager: mark it as a wrapped function.
object.__setattr__(self, '__wrapped__', local)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
#property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object()) # noqa
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
Related
Bound task in celery is not able to access instance variable in django project
I have setup celery in my django project using official documentation at http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html#using-celery-with-django So my MyApp/tasks.py have content from celery import shared_task class Someclass(): def __init__(self, x, y): self.x = x self.y = y #shared_task(bind=True) def func1(self): '''This does not work''' return self.x + self.y #shared_task(bind=True) def func2(self, a, b): '''This works well''' return a + b When I run In [9]: o = Someclass(3, 4) In [10]: o.func1.delay() Out[10]: <AsyncResult: afc6b151-d71c-4f46-a916-6917f98c681f> I get the error AttributeError: 'func1' object has no attribute 'x' When I run In [11]: o.func2.delay(3, 4) Out[11]: <AsyncResult: 3b227f00-8d9c-472b-b7d8-8b4b6261f689> This works perfectly How can I make func1 working so that it can use instance variables e.g. x and y?
from celery import shared_task #shared_task(bind=True) def func(self, a, b): return a + b class Someclass(): def __init__(self, x, y): self.x = x self.y = y def func1(self): return func.delay(self.x, self.y)
convergence methodology is not clear to me
It would be great if something like this could be make to work. It is not clear to me how to implement convergence at the moment unless I use raw input and embed the name of the incoming edge that is 'ticking' into the payload. import bonobo from bonobo.config import Configurable from bonobo.config import use_context #use_context class A(Configurable): def __call__(self, context): context.set_output_fields(['a']) yield {'a': 'a'} #use_context class B(Configurable): def __call__(self, context, a): context.set_output_fields(['b']) yield {'b': 'b'} #use_context class F(Configurable): def __call__(self, context): context.set_output_fields(['f']) yield {'f': 'f'} #use_context class G(Configurable): def __call__(self, context, f): context.set_output_fields(['g']) yield {'g': 'g'} #use_context class Normalize(Configurable): def __call__(self, context, b, f): context.set_output_fields(['n']) yield {'n': 'n'} if __name__ == '__main__': n = Normalize() graph = bonobo.Graph() # Here we mark _input to None, so normalize won't get the "begin" impulsion. graph.add_chain(n, _input=None) # Add two different chains graph.add_chain(A(), B(), _output=n) graph.add_chain(F(), G(), _output=n) bonobo.run(graph) Currently this code will result in an error that is understandable, but unfortunate: bonobo.errors.UnrecoverableTypeError Input of <__main__.Normalize object at 0x00000261F76EDD30> does not bind to the node signature. Args: (<NodeExecutionContext(+Normalize) in=1>,) Input: Bag(b={'b': 'b'}) Kwargs: {} Signature: (context, b, f) - A in=1 out=1 [done] - B in=1 out=1 [done] - F in=1 out=1 [done] - G in=1 out=1 [done] ! Normalize in=1 err=1 [defunct]
multiprocessing - pyodbc IOError: bad message length
I am unexpectedly getting IOError: bad message length error when trying to share pyodbc connection across multiple processes, especially when N is more than 4 (no. of cores). Sometimes I also get cPickle.UnpicklingError: invalid load key, '#'., pyodbc.ProgrammingError: ('24000', '[24000] [FreeTDS][SQL Server]Invalid cursor state (0) (SQLExecDirectW)') as errors. # Import custom python packages import multiprocessing import multiprocessing.managers as mm import pathos.multiprocessing as mp import pyodbc, datetime, time class MyConn(object): def __init__(self): self.conn = None self.cursor = None def connect_to_db(self): self.conn = pyodbc.connect("DSN=cpmeast;UID=dntcore;PWD=dntcorevs2") self.cursor = self.conn.cursor() def run_qry(self, data): print 'Running query', data self.cursor.execute("WAITFOR DELAY '00:00:01';select GETDATE(), '"+str(data)+"';") l = self.cursor.fetchall() _l = [] for i in l: _l.append(list(i)) print 'Result for query', data, _l return _l class MyManagerClass(object): def __init__(self): self.result = multiprocessing.Manager().list() def read_data(self, *args): conn = args[0][0] data = args[0][1] l = conn.run_qry(data) self.result.append(l) class MyManager(mm.BaseManager): pass # Pass is really enough. Nothing needs to be done here. def main(): time_start = time.time() MyManager.register("MyConn", MyConn) manager = MyManager() manager.start() a = manager.MyConn() a.connect_to_db() dbm = MyManagerClass() pool = mp.ProcessingPool(4) jobs = [] N = 5 for i in range(N): jobs.append((a, str(i))) for i in pool.imap(dbm.read_data, jobs): print 'result' pool.close() pool.join() print 'Result', dbm.result print 'Closed' time_stop = time.time() msg = 'runtime: {0}'.format(str(datetime.timedelta (seconds=time_stop-time_start))) print msg if __name__ == '__main__': main()
Cannot Pool.map() function because of UnpickleableError?
So I am trying to multi process function F. Which is accessed by a button press with tkinter. def f(x): global doom,results,info doom = doom + 1 if check(x) == True: results.add(x) info.append(get_column_number(x)) j.step(1) texx = "1/"+doom s.configure(text=texx) root.update() The function is called within a function like so: def dojob(): index = ['URLS'...] pool = Pool(processes=4) s.configure(text="Shifting Workload to cores..") root.update() pool.map(f, index) The button is inside root window. I get the following error: Exception in thread Thread-2: Traceback (most recent call last): File "C:\Python27\lib\threading.py", line 808, in __bootstrap_inner self.run() File "C:\Python27\lib\threading.py", line 761, in run self.__target(*self.__args, **self.__kwargs) File "C:\Python27\lib\multiprocessing\pool.py", line 342, in _handle_tasks put(task) UnpickleableError: Cannot pickle <type 'tkapp'> objects I do not even know what a pickle does? Help? Here is the complete code: from Tkinter import * from ttk import * from tkMessageBox import showinfo from multiprocessing import Pool import random emails = set() import urllib2 import urllib2 as urllib ######## CONSTANT_PAGECOUNT = 20 ###### def f(x): global doom,emails,info doom = doom + 1 if check(x) == True: print "", emails.add(x) info.append(get_column_number(x)) j.step(1) texx = "Sk1nn1n "+str(doom)+'/'+str(CONSTANT_PAGECOUNT)+"" s.configure(text=texx) root.update() return 0 def f(x): print "" def showFile(site,info): top = Toplevel() top.title('Sites') x = Text(top) x.pack() i=0 for site_url in site: x.insert(END,site_url) i=i+1 def get_column_number(url): return True def check(url): return True def getgoogleurl(search,siteurl=False,startr=0): if siteurl==False: return 'http://www.google.com/search?q='+urllib2.quote(search)+'&start='+str(startr)+'&oq='+urllib2.quote(search) else: return 'http://www.google.com/search?q=site:'+urllib2.quote(siteurl)+'%20'+urllib2.quote(search)+'&oq=site:'+urllib2.quote(siteurl)+'%20'+urllib2.quote(search) def getgooglelinks(search,siteurl=False,startr=0): #google returns 403 without user agent headers = {'User-agent':'Mozilla/11.0'} req = urllib2.Request(getgoogleurl(search,siteurl,startr),None,headers) site = urllib2.urlopen(req) data = site.read() site.close() #no beatifulsoup because google html is generated with javascript start = data.find('<div id="res">') end = data.find('<div id="foot">') if data[start:end]=='': #error, no links to find return False else: links =[] data = data[start:end] start = 0 end = 0 while start>-1 and end>-1: #get only results of the provided site if siteurl==False: start = data.find('<a href="/url?q=') else: start = data.find('<a href="/url?q='+str(siteurl)) data = data[start+len('<a href="/url?q='):] end = data.find('&sa=U&ei=') if start>-1 and end>-1: link = urllib2.unquote(data[0:end]) data = data[end:len(data)] if link.find('http')==0: links.append(link) return links def rip(results=15,accuracy=16): global e keyword = ''+str(e.get()) if keyword.strip()=="": s.configure(text="Please enter a keyword") root.update() return 0 linklist = [] counter = 0 doom = 0 while counter < results: links = getgooglelinks(keyword,startr=counter) for link in links: if len(linklist) > CONSTANT_PAGECOUNT: s.configure(text="Proccessing..") root.update() return linklist else: doom = doom + 1 linklist.append(link) texx = str(doom)+"/"+str(CONSTANT_PAGECOUNT) s.configure(text=texx) root.update() root.update() counter = counter+accuracy return linklist def flip(): global e emails = set() info = [] keyword = ''+str(e.get()) if keyword.strip()=="": s.configure(text="Please enter a keyword") root.update() return 0 s.configure(text="Generating index..") root.update() doom = -1 index = rip(CONSTANT_PAGECOUNT,10) if 1: try: pool = Pool(processes=4) #s.configure(text="Shifting Workload to cores..") #root.update() pool.map(f, index) pool.close() except: print "The errors there.." j.config(value=CONSTANT_PAGECOUNT) if len(emails) > 0: filepath='relavant_list_'+str(random.randint(1,9999))+'.emList.txt' #print len(emails), #print "emails found." ggg = open(filepath,'a+') for x in emails: ggg.write(x+"\n") showinfo( str(len(emails))+" key word related sites found!", " sites are saved in "+str(filepath) ) showFile(emails,info) s.configure(text=filepath) else: s.configure(text='No related sites found : (') if __name__ == '__main__': ### CONSTANTS version = '1.0' ### END CONSTANTS root = Tk() root.title('Program v'+version) s = Style() s.theme_use('default') #print s.theme_names() s.configure("black.Horizontal.TProgressbar", foreground='blue', background='blue') j = Progressbar(root, style="black.Horizontal.TProgressbar", orient="vertical", length=200, mode="determinate", maximum=CONSTANT_PAGECOUNT, value=0) j.pack(side='right',fill='y') f = Frame(root) x = Frame(f) e = Entry(x,width=51) s = Label(x,width=50,anchor='center',text='Waiting for task..') Button(f,text='Generate List!',width=50,command=flip).pack(fill='both',expand=True) s.pack(side='bottom',fill='y',expand=True) e.pack(side='top',fill='both',expand=True) x.pack(side='top',fill='y',expand=True) f.pack(side='left',expand=True,fill="both") root.mainloop()
You are leaking a tkinter object. Most likely because you are trying to update the interface from another process with the last line of f() Update based on code You have a name collision between your function f() and a variable f in your __main__ which gets assigned to your main window and causes the tkapp pickle error. Rename the function to def myfunc() or something. Also need to call pool.join() after pool.close()
Access std::complex<> from gdb python macro
I'm trying to write a python script that evaluates a vector Complex numbers so I can do things like plot it via matplotlib. Python code below with comment where code breaks: import sys import gdb import matplotlib.pyplot as plt class Plotter (gdb.Command): """ Plots vector""" # _iterator pulled directly from # /usr/share/gdb/python/libstdcxx/v6/printers.py class _iterator: def __init__ (self, start, finish): self.item = start self.finish = finish self.count = 0 def __iter__(self): return self def next(self): if self.item == self.finish: raise StopIteration count = self.count self.count = self.count + 1 elt = self.item.dereference() self.item = self.item + 1 return ('[%d]' % count, elt) def __init__(self): super(Plotter, self).__init__("plot_test", gdb.COMMAND_OBSCURE) def invoke(self, arg, from_tty): frame = gdb.selected_frame() try: val = gdb.Frame.read_var(frame, arg) if str(val.type).find("vector") != -1: print "Plot vector:", str(val.type) if (str(val.type).find("complex") != -1): self.plot_complex_vector(val) else: self.plot_vector(val) else: print "Not a vector..." except: print "No such variable:", arg return def plot_complex_vector(self, val): try: it = self._iterator(val['_M_impl']['_M_start'], val['_M_impl']['_M_finish']) vector = [] while(True): x = it.next() vector.append(complex(x[1])) # doesn't work... return except StopIteration: pass except: print sys.exc_info()[0] print vector plt.plot(vector) plt.show() # works... def plot_vector(self, val): try: it = self._iterator(val['_M_impl']['_M_start'], val['_M_impl']['_M_finish']) vector = [] while(True): x = it.next() vector.append(float(x[1])) except StopIteration: pass except: print sys.exc_info()[0] print vector plt.plot(vector) plt.show() Plotter() So the question is, how do I access the real/imaginary parts of a a std::complex value? It looks like doing a print x[1] Will print values like : {_M_value = 0 + 1 * I} Update: It looks like I can do a little string editing before doing a typecast: while(True): x = it.next() s = str(x[1]['_M_value']) # convert string to complex format that python understands. c = complex(s.replace('I','j').replace(' ','').replace('*','')) vector.append(c) # Works now... But... is there a better way to do this?
try: it = self._iterator(val['_M_impl']['_M_start'], val['_M_impl']['_M_finish']) vector = [] while(True): x = it.next() vector.append(complex(x[1])) # doesn't work... return except StopIteration: pass except: print sys.exc_info()[0] Is not how iterators are meant to be used in python. Use try: it = self._iterator(val['_M_impl']['_M_start'], val['_M_impl']['_M_finish']) vector = [complex(x[1]) for x in it] except Exception as e: print e, sys.exc_info()[0] If you really still want to wrap it in a try...except block. Edit: Try complex(x.real() + x.imag()). What does print x.type.fields() show?