Python 2.7: Defining default parameters based on globals? - python-2.7

I'm writing a utility where I would like to have global variables that change the way a function operates. By default I'd like all the functions to follow one style, but in certain cases I'd also like the ability to force the way a function operates.
Say I have a file Script_Defaults.py with
USER_INPUT = True
In another python file I have many functions like this:
from Script_Defaults import USER_INPUT
def DoSomething(var_of_data, user_input = USER_INPUT):
if user_input:
... #some code here that asks the user what to change in var_of_data
.... # goes on to do something
The problem I face here is that the default parameter only loads once when the file starts.
I want to be able to set USER_INPUT as False or True during the run of the program. To get this behaviour I'm currently using it like this...
from Script_Defaults import USER_INPUT
def DoSomething(var_of_data, user_input = None):
if user_input is None:
user_input = USER_INPUT
if user_input:
... #some code here that asks the user what to change in var_of_data
.... # goes on to do something
This seems like a lot of unnecessary code, especially if I have a lot of conditions like USER_INPUT, and many functions that need them. Is there a better to get this functionality, or is this the only way?

Using decorators, and manipulation of a function's default arguments, you can use the following solution:
from change_defaults import Default, set_defaults
my_defaults = dict(USER_INPUT=0)
#set_defaults(my_defaults)
def DoSomething(var_of_data, user_input=Default("USER_INPUT")):
return var_of_data, user_input
def main():
print DoSomething("This")
my_defaults["USER_INPUT"] = 1
print DoSomething("Thing")
my_defaults["USER_INPUT"] = 2
print DoSomething("Actually")
print DoSomething("Works", 3)
if __name__ == "__main__":
main()
Which requires the following code:
# change_defaults.py
from functools import wraps
class Default(object):
def __init__(self, name):
super(Default, self).__init__()
self.name = name
def set_defaults(defaults):
def decorator(f):
#wraps(f)
def wrapper(*args, **kwargs):
# Backup original function defaults.
original_defaults = f.func_defaults
# Replace every `Default("...")` argument with its current value.
function_defaults = []
for default_value in f.func_defaults:
if isinstance(default_value, Default):
function_defaults.append(defaults[default_value.name])
else:
function_defaults.append(default_value)
# Set the new function defaults.
f.func_defaults = tuple(function_defaults)
return_value = f(*args, **kwargs)
# Restore original defaults (required to keep this trick working.)
f.func_defaults = original_defaults
return return_value
return wrapper
return decorator
By defining the default parameters with Default(parameter_name) you tell the set_defaults decorator which value to take from the defaults dict.
Also, with a little more code (irrelevant to the solution) you can make it work like:
#set_defaults(my_defaults)
def DoSomething(var_of_data, user_input=Default.USER_INPUT):
...

Related

how to get ticking timer with dynamic label?

What im trying to do is that whenever cursor is on label it must show the time elapsed since when it is created it does well by subtracting (def on_enter(i)) the value but i want it to be ticking while cursor is still on label.
I tried using after function as newbie i do not understand it well to use on dynamic labels.
any help will be appreciated thx
code:
from Tkinter import *
import datetime
date = datetime.datetime
now = date.now()
master=Tk()
list_label=[]
k=[]
time_var=[]
result=[]
names=[]
def delete(i):
k[i]=max(k)+1
time_var[i]='<deleted>'
list_label[i].pack_forget()
def create():#new func
i=k.index(max(k))
for j in range(i+1,len(k)):
if k[j]==0:
list_label[j].pack_forget()
list_label[i].pack(anchor='w')
time_var[i]=time_now()
for j in range(i+1,len(k)):
if k[j]==0:
list_label[j].pack(anchor='w')
k[i]=0
###########################
def on_enter(i):
list_label[i].configure(text=time_now()-time_var[i])
def on_leave(i):
list_label[i].configure(text=names[i])
def time_now():
now = date.now()
return date(now.year,now.month,now.day,now.hour,now.minute,now.second)
############################
for i in range(11):
lb=Label(text=str(i),anchor=W)
list_label.append(lb)
lb.pack(anchor='w')
lb.bind("<Button-3>",lambda event,i=i:delete(i))
k.append(0)
names.append(str(i))
lb.bind("<Enter>",lambda event,i=i: on_enter(i))
lb.bind("<Leave>",lambda event,i=i: on_leave(i))
time_var.append(time_now())
master.bind("<Control-Key-z>",lambda event: create())
mainloop()
You would use after like this:
###########################
def on_enter(i):
list_label[i].configure(text=time_now()-time_var[i])
list_label[i].timer = list_label[i].after(1000, on_enter, i)
def on_leave(i):
list_label[i].configure(text=names[i])
list_label[i].after_cancel(list_label[i].timer)
However, your approach here is all wrong. You currently have some functions and a list of data. What you should do is make a single object that contains the functions and data together and make a list of those. That way you can write your code for a single Label and just duplicate that. It makes your code a lot simpler partly because you no longer need to keep track of "i". Like this:
import Tkinter as tk
from datetime import datetime
def time_now():
now = datetime.now()
return datetime(now.year,now.month,now.day,now.hour,now.minute,now.second)
class Kiran(tk.Label):
"""A new type of Label that shows the time since creation when the mouse hovers"""
hidden = []
def __init__(self, master=None, **kwargs):
tk.Label.__init__(self, master, **kwargs)
self.name = self['text']
self.time_var = time_now()
self.bind("<Enter>", self.on_enter)
self.bind("<Leave>", self.on_leave)
self.bind("<Button-3>", self.hide)
def on_enter(self, event=None):
self.configure(text=time_now()-self.time_var)
self.timer = self.after(1000, self.on_enter)
def on_leave(self, event=None):
self.after_cancel(self.timer) # cancel the timer
self.configure(text=self.name)
def hide(self, event=None):
self.pack_forget()
self.hidden.append(self) # add this instance to the list of hidden instances
def show(self):
self.time_var = time_now() # reset time
self.pack(anchor='w')
def undo(event=None):
'''if there's any hidden Labels, show one'''
if Kiran.hidden:
Kiran.hidden.pop().show()
def main():
root = tk.Tk()
root.geometry('200x200')
for i in range(11):
lb=Kiran(text=i)
lb.pack(anchor='w')
root.bind("<Control-Key-z>",undo)
root.mainloop()
if __name__ == '__main__':
main()
More notes:
Don't use lambda unless you are forced to; it's known to cause bugs.
Don't use wildcard imports (from module import *), they cause bugs and are against PEP8.
Put everything in functions.
Use long, descriptive names. Single letter names just waste time. Think of names as tiny comments.
Add a lot more comments to your code so that other people don't have to guess what the code is supposed to do.
Try a more beginner oriented forum for questions like this, like learnpython.reddit.com

How to modify the return address in Python 2 (or achieve an equivalent result)

Some background on why I want to achieve what I'm trying to achieve:
I am making a long-running server type application. I would like to be able to perform functional and integration style testing on this application with high coverage, especially in the failure and corner-case scenarios. In order to achieve this, I would like to inject various faults which are configurable at run time so that my test can validate the program's behavior when such a condition is hit.
The Problem:
I would like to be able to dynamically decide the return behavior of a function. I would additionally like to do this with only a function call and without pre-processing the source code (macros). Here is a simple example:
from functools import wraps
def decorator(func):
#wraps(func)
def func_wrapper(*args, **kwargs):
print 'in wrapper before %s' % func.__name__
val = func(*args, **kwargs)
print 'in wrapper after %s' % func.__name__
return val
return func_wrapper
#decorator
def grandparent():
val = parent()
assert val == 2
# do something with val here
#decorator
def parent():
foo = 'foo_val'
some_func(foo)
# other statements here
child()
# if the condition in child is met,
# this would be dead (not-executed)
# code. If it is not met, this would
# be executed.
return 1
def child(*args, **kwargs):
# do something here to make
# the assert in grandparent true
return 2
# --------------------------------------------------------------------------- #
class MyClass:
#decorator
def foo(self):
val = self.bar()
assert val == 2
def bar(self):
self.tar()
child()
return 1
def tar(self):
return 42
# --------------------------------------------------------------------------- #
The grandparent() function in the code above calls parent() to get a response. It would then do something based on the value of val. The parent() function calls child() and unconditionally returns the value 1. I would like to write something in child() which causes the value it returns to be returned to grandparent() and skip processing the rest of parent().
Restrictions/Permissions
grandparent() could be function number n in a long chain of function calls, not necessarily the top level function. Only
child() and any new helper functions called solely as a result of calling child() can be modified/created to make this work.
All work must be done at runtime. No pre-processing of the source files is acceptable
The decision about the parent() function's behavior has to be decided inside of the specific child() call.
Using pure Python (2.7) or making use of the cPython API are both acceptable ways to solve this issue.
This is allowed to be a hack. The child() function will be inert in production mode.
I have tried
I have tried modifying the stack list (deleting the parent() frame) retrieved from inspect.stack(), but this seems to not do anything.
I have tried creating new bytecode for the parent() frame and replacing it in the stack. This also does not seem to have an effect.
I have tried looking into the cPython functions related to stack management, but when I added or removed a frame, I kept getting stack under or overflows.
If you know the name(s) of child(), then you can patch all child() callers at runtime by iterating through module and class functions, patching call sites to child() to add your custom logic, and hot-swapping callers of child with the patched version.
Here is a working example:
#!/usr/bin/env python2.7
from six import wraps
def decorator(func):
#wraps(func)
def func_wrapper(*args, **kwargs):
print 'in wrapper before %s' % func.__name__
val = func(*args, **kwargs)
print 'in wrapper after %s' % func.__name__
return val
return func_wrapper
#decorator
def grandparent():
val = parent()
assert val == 2
# do something with val here
#decorator
def parent():
# ...
# ...
child()
# if the condition in child is met,
# this would be dead (not-executed)
# code. If it is not met, this would
# be executed.
return 1
def child(*args, **kwargs):
# do something here to make
# the assert in grandparent true
return 2
# --------------------------------------------------------------------------- #
class MyClass:
#decorator
def foo(self):
val = self.bar()
assert val == 2
def bar(self):
self.tar()
child()
return 1
def tar(self):
return 42
# --------------------------------------------------------------------------- #
import sys
import inspect
import textwrap
import types
import itertools
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
log = logging.getLogger(__name__)
def should_intercept():
# TODO: check system state and return True/False
# just a dummy implementation for now based on # of args
if len(sys.argv) > 1:
return True
return False
def _unwrap(func):
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
return func
def __patch_child_callsites():
if not should_intercept():
return
for module in sys.modules.values():
if not module:
continue
scopes = itertools.chain(
[module],
(clazz for clazz in module.__dict__.values() if inspect.isclass(clazz))
)
for scope in scopes:
# get all functions in scope
funcs = list(fn for fn in scope.__dict__.values()
if isinstance(fn, types.FunctionType)
and not inspect.isbuiltin(fn)
and fn.__name__ != __patch_child_callsites.__name__)
for fn in funcs:
try:
fn_src = inspect.getsource(_unwrap(fn))
except IOError as err:
log.warning("couldn't get source for fn: %s:%s",
scope.__name__, fn.__name__)
continue
# remove common indentations
fn_src = textwrap.dedent(fn_src)
if 'child()' in fn_src:
# construct patched caller source
patched_fn_name = "patched_%s" % fn.__name__
patched_fn_src = fn_src.replace(
"def %s(" % fn.__name__,
"def %s(" % patched_fn_name,
)
patched_fn_src = patched_fn_src.replace(
'child()', 'return child()'
)
log.debug("patched_fn_src:\n%s", patched_fn_src)
# compile patched caller into scope
compiled = compile(patched_fn_src, inspect.getfile(scope), 'exec')
exec(compiled) in fn.__globals__, scope.__dict__
# replace original caller with patched caller
patched_fn = scope.__dict__.get(patched_fn_name)
setattr(scope, fn.__name__, patched_fn)
log.info('patched %s:%s', scope.__name__, fn.__name__)
if __name__ == '__main__':
__patch_child_callsites()
grandparent()
MyClass().foo()
Run with no arguments to get the original behavior (assertion failure). Run with one or more arguments and the assertion disappears.

Does pdb offer watchpoints? [duplicate]

There is large python project where one attribute of one class just have wrong value in some place.
It should be sqlalchemy.orm.attributes.InstrumentedAttribute, but when I run tests it is constant value, let's say string.
There is some way to run python program in debug mode, and run some check (if variable changed type) after each step throught line of code automatically?
P.S. I know how to log changes of attribute of class instance with help of inspect and property decorator. Possibly here I can use this method with metaclasses...
But sometimes I need more general and powerfull solution...
Thank you.
P.P.S. I need something like there: https://stackoverflow.com/a/7669165/816449, but may be with more explanation of what is going on in that code.
Well, here is a sort of slow approach. It can be modified for watching for local variable change (just by name). Here is how it works: we do sys.settrace and analyse the value of obj.attr each step. The tricky part is that we receive 'line' events (that some line was executed) before line is executed. So, when we notice that obj.attr has changed, we are already on the next line and we can't get the previous line frame (because frames aren't copied for each line, they are modified ). So on each line event I save traceback.format_stack to watcher.prev_st and if on the next call of trace_command value has changed, we print the saved stack trace to file. Saving traceback on each line is quite an expensive operation, so you'd have to set include keyword to a list of your projects directories (or just the root of your project) in order not to watch how other libraries are doing their stuff and waste cpu.
watcher.py
import traceback
class Watcher(object):
def __init__(self, obj=None, attr=None, log_file='log.txt', include=[], enabled=False):
"""
Debugger that watches for changes in object attributes
obj - object to be watched
attr - string, name of attribute
log_file - string, where to write output
include - list of strings, debug files only in these directories.
Set it to path of your project otherwise it will take long time
to run on big libraries import and usage.
"""
self.log_file=log_file
with open(self.log_file, 'wb'): pass
self.prev_st = None
self.include = [incl.replace('\\','/') for incl in include]
if obj:
self.value = getattr(obj, attr)
self.obj = obj
self.attr = attr
self.enabled = enabled # Important, must be last line on __init__.
def __call__(self, *args, **kwargs):
kwargs['enabled'] = True
self.__init__(*args, **kwargs)
def check_condition(self):
tmp = getattr(self.obj, self.attr)
result = tmp != self.value
self.value = tmp
return result
def trace_command(self, frame, event, arg):
if event!='line' or not self.enabled:
return self.trace_command
if self.check_condition():
if self.prev_st:
with open(self.log_file, 'ab') as f:
print >>f, "Value of",self.obj,".",self.attr,"changed!"
print >>f,"###### Line:"
print >>f,''.join(self.prev_st)
if self.include:
fname = frame.f_code.co_filename.replace('\\','/')
to_include = False
for incl in self.include:
if fname.startswith(incl):
to_include = True
break
if not to_include:
return self.trace_command
self.prev_st = traceback.format_stack(frame)
return self.trace_command
import sys
watcher = Watcher()
sys.settrace(watcher.trace_command)
testwatcher.py
from watcher import watcher
import numpy as np
import urllib2
class X(object):
def __init__(self, foo):
self.foo = foo
class Y(object):
def __init__(self, x):
self.xoo = x
def boom(self):
self.xoo.foo = "xoo foo!"
def main():
x = X(50)
watcher(x, 'foo', log_file='log.txt', include =['C:/Users/j/PycharmProjects/hello'])
x.foo = 500
x.goo = 300
y = Y(x)
y.boom()
arr = np.arange(0,100,0.1)
arr = arr**2
for i in xrange(3):
print 'a'
x.foo = i
for i in xrange(1):
i = i+1
main()
There's a very simple way to do this: use watchpoints.
Basically you only need to do
from watchpoints import watch
watch(your_object.attr)
That's it. Whenever the attribute is changed, it will print out the line that changed it and how it's changed. Super easy to use.
It also has more advanced features, for example, you can call pdb when the variable is changed, or use your own callback functions instead of print it to stdout.
A simpler way to watch for an object's attribute change (which can also be a module-level variable or anything accessible with getattr) would be to leverage hunter library, a flexible code tracing toolkit. To detect state changes we need a predicate which can look like the following:
import traceback
class MutationWatcher:
def __init__(self, target, attrs):
self.target = target
self.state = {k: getattr(target, k) for k in attrs}
def __call__(self, event):
result = False
for k, v in self.state.items():
current_value = getattr(self.target, k)
if v != current_value:
result = True
self.state[k] = current_value
print('Value of attribute {} has chaned from {!r} to {!r}'.format(
k, v, current_value))
if result:
traceback.print_stack(event.frame)
return result
Then given a sample code:
class TargetThatChangesWeirdly:
attr_name = 1
def some_nested_function_that_does_the_nasty_mutation(obj):
obj.attr_name = 2
def some_public_api(obj):
some_nested_function_that_does_the_nasty_mutation(obj)
We can instrument it with hunter like:
# or any other entry point that calls the public API of interest
if __name__ == '__main__':
obj = TargetThatChangesWeirdly()
import hunter
watcher = MutationWatcher(obj, ['attr_name'])
hunter.trace(watcher, stdlib=False, action=hunter.CodePrinter)
some_public_api(obj)
Running the module produces:
Value of attribute attr_name has chaned from 1 to 2
File "test.py", line 44, in <module>
some_public_api(obj)
File "test.py", line 10, in some_public_api
some_nested_function_that_does_the_nasty_mutation(obj)
File "test.py", line 6, in some_nested_function_that_does_the_nasty_mutation
obj.attr_name = 2
test.py:6 return obj.attr_name = 2
... return value: None
You can also use other actions that hunter supports. For instance, Debugger which breaks into pdb (debugger on an attribute change).
Try using __setattr__ to override the function that is called when an attribute assignment is attempted. Documentation for __setattr__
You can use the python debugger module (part of the standard library)
To use, just import pdb at the top of your source file:
import pdb
and then set a trace wherever you want to start inspecting the code:
pdb.set_trace()
You can then step through the code with n, and investigate the current state by running python commands.
def __setattr__(self, name, value):
if name=="xxx":
util.output_stack('xxxxx')
super(XXX, self).__setattr__(name, value)
This sample code helped me.

Removing boilerplate logging setup

I'm seeing this very common code I'm using to setup logging.
def has_host_running(self):
log = CustomLogger.action_logger(name=sys._getframe().f_code.co_name, **self.menvargs)
result = self.bash_query.check_is_server_available(log)
self.results[sys._getframe().f_code.co_name] = result
log.debug('result: {}'.format(result))
return result
Looking for dry way to implement this behavior.
Key is that I need to be able to reference/call a log statement from within the function and any child function calls.
******************************edit2*******************************
The most elegant sloppy solution I could get to work.
Loosely adapted from: https://wiki.python.org/moin/PythonDecoratorLibrary#Controllable_DIY_debug
heavily based on https://wiki.python.org/moin/PythonDecoratorLibrary#Controllable_DIY_debug
class ActionLog:
def init(self):
pass
def __call__(self, f):
log = self.get_actionlogger(name=f.func_name)
def newf(log, *args, **kwds):
# pre-function call actions:
log.debug('Start.')
log.debug(' info: params= {args}, {kwds}'.format(args=args, kwds=kwds))
# function call
f_result = f(log, *args, **kwds)
# post-function call actions:
log.debug(' info: result= {result}'.format(result=f_result))
log.debug('Complete.')
return f_result
# changes to be made to returned function
newf.__doc__ = f.__doc__
return newf(log)
def get_actionlogger(self, name, **kwargs):
import logging
import ast
from Helper import ConfigManager
logname = 'action.{func_name}'.format(func_name=name)
logger = logging.getLogger(logname)
# value stored in ini file.
# either DEBUG or ERROR right now.
# todo: store actual logging_level
# todo: store an array/dict for log_name in .ini
# this will allow multiple parameters to be stored within the single entry.
# ex:
# action.check_stuff: logging_level=DEBUG,handler_stream=TRUE,handler_file=stuff.log,formatter='{name} - {message}
conf_logging_level = ConfigManager('config.ini').get_section_dict('CustomLogging_Level').get(logname, 'DEBUG')
logging_level = logging.DEBUG
if conf_logging_level == 'DEBUG':
logging_level = logging.DEBUG
if conf_logging_level == 'ERROR':
logging_level = logging.ERROR
logger.setLevel(logging_level)
# very hacky
# while logging.getLogger is a singleton, adding the handler is not.
# without this check, this code will result in duplicate handlers added.
# currently will not edit/replace the existing handler.
# currently will not allow another handler to be added after the first.
# main issue here is that I can't figure out how to determine labels/names within logger.handlers
# todo: properly label handler
# todo: check for existing labels & types (file, stream, etc)
if len(logger.handlers) == 0:
ch = logging.StreamHandler()
ch.setLevel(logging_level)
ch.set_name = logname
# create formatter
formatter = logging.Formatter(' %(name)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
#ActionLog()
def check_stuff(log, *args, **kwds):
result = True
log.debug(' info: text call from within function.')
return result
if check_stuff:
print 'check_stuff is true.'
So it works with the one parameter "log" being passed into the class. does not work if the class does not have the log parameter. I'm not sure how to handle if there are further parameters... likely with *args or **kwargs, but this solution doesn't handle that.
Apologies on the code formatting... I can't seem to get the class decorator in the same block as the decorated func and func call.
* v3.0 *
v2 had a problem with multiple arguments. Solved that and streamlined v3 quite a bit.
def ActionLog(wrapped):
def _wrapper(*args, **kwargs):
log = CustomLogger.action_logger(wrapped.func_name)
newargs = list()
for a in args:
newargs.append(a)
newargs.append(log)
f = wrapped(*newargs, **kwargs)
if f is None:
f = ''
else:
f = '\tResult: {}'.format(f)
log.debug('Complete.{}'.format(f))
return f
return _wrapper
This works better and has replaced most of my boilerplate logging calls for actions that take a single argument.
Still having problems with named args vs kwargs. I'd like to just pass args through and add my custom items to kwargs, but that had a few issues.

how to unittest the template variables passed to jinja2 template from webapp2 request handler

I'm trying to test my webapp2 handlers. To do this, I thought it would be a good idea to send a request to the handler e.g.:
request = webapp2.Request.blank('/')
# Get a response for that request.
response = request.get_response(main.app)
The problem is, response is mostly just a bunch of HTML etc.
I want to look at what was passed to my jinja2 template from the handler before it was turned into HTML.
I want my test to get at the state within the handler class code. I wan't to be able to see what certain variables looked like in the response handler, and then I want to see what the dict templates looks like before it was passed to render_to_response()
I want to test these variables have the correct values.
Here is my test code so far, but I'm stuck because response = request.get_response() just gives me a bunch of html and not the raw variables.
import unittest
import main
import webapp2
class DemoTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNothing(self):
self.assertEqual(42, 21 + 21)
def testHomeHandler(self):
# Build a request object passing the URI path to be tested.
# You can also pass headers, query arguments etc.
request = webapp2.Request.blank('/')
# Get a response for that request.
response = request.get_response(main.app)
# Let's check if the response is correct.
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'Hello, world!')
if __name__ == '__main__':
unittest.main()
and here is my handler:
class HomeHandler(BaseHandler):
def get(self, file_name_filter=None, category_filter=None):
file_names = os.listdir('blog_posts')
blogs = []
get_line = lambda file_: file_.readline().strip().replace("<!--","").replace("-->","")
for fn in file_names:
with open('blog_posts/%s' % fn) as file_:
heading = get_line(file_)
link_name = get_line(file_)
category = get_line(file_)
date_ = datetime.strptime(fn.split("_")[0], "%Y%m%d")
blog_dict = {'date': date_, 'heading': heading,
'link_name': link_name,
'category': category,
'filename': fn.replace(".html", ""),
'raw_file_name': fn}
blogs.append(blog_dict)
categories = Counter(d['category'] for d in blogs)
templates = {'categories': categories,
'blogs': blogs,
'file_name_filter': file_name_filter,
'category_filter': category_filter}
assert(len(file_names) == len(set(d['link_name'] for d in blogs)))
self.render_template('home.html', **templates)
and here is my basehandler:
class BaseHandler(webapp2.RequestHandler):
#webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(app=self.app)
def render_template(self, filename, **kwargs):
#kwargs.update({})
#TODO() datastore caching here for caching of (handlername, handler parameters, changeable parameters, app_upload_date)
#TODO() write rendered page to its own html file, and just serve that whole file. (includes all posts). JQuery can show/hide posts.
self.response.write(self.jinja2.render_template(filename, **kwargs))
Perhaps I have got the wrong idea of how to do unit testing, or perhaps I should have written my code in a way that makes it easier to test? or is there some way of getting the state of my code?
Also if someone were to re-write the code and change the variable names, then the tests would break.
You can mock BaseHandler.render_template method and test its parameters.
See this question for a list of popular Python mocking frameworks.
Thanks to proppy's suggestion I ended up using a mock.
http://www.voidspace.org.uk/python/mock/
(mock is included as part or unittest.mock in python 3)
So here is my main.py code which is similar to what I have in webapp2:
note instead of BaseHandler.render_template i have BaseHandler.say_yo
__author__ = 'Robert'
print "hello from main"
class BaseHandler():
def say_yo(self,some_number=99):
print "yo"
return "sup"
class TheHandler(BaseHandler):
def get(self, my_number=42):
print "in TheHandler's get()"
print self.say_yo(my_number)
return "TheHandler's return string"
and atest.py:
__author__ = 'Robert'
import unittest
import main
from mock import patch
class DemoTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNothing(self):
self.assertEqual(42, 21 + 21)
def testSomeRequests(self):
print "hi"
bh = main.BaseHandler()
print bh.say_yo()
print "1111111"
with patch('main.BaseHandler.say_yo') as patched_bh:
print dir(patched_bh)
patched_bh.return_value = 'double_sup'
bh2 = main.BaseHandler()
print bh2.say_yo()
print "222222"
bh3 = main.BaseHandler()
print bh3.say_yo()
print "3333"
th = main.TheHandler()
print th.get()
print "44444"
with patch('main.BaseHandler.say_yo') as patched_bh:
patched_bh.return_value = 'last_sup'
th = main.TheHandler()
print th.get()
print th.get(123)
print "---"
print patched_bh.called
print patched_bh.call_args_list
print "555555"
if __name__ == '__main__':
unittest.main()
this code gives lots of output, here is a sample:
44444
in TheHandler's get()
last_sup
TheHandler's return string
in TheHandler's get()
last_sup
TheHandler's return string
---
True
[call(42), call(123)]
555555