I'd like you help with this easy/hard question.
I'm working with python threads. This is my code:
import threading
#import class1, class2, class3 . . .
def main():
list = [class1(), class2(), class3() . . .]
for obj in list:
t = threading.Thread(target=obj.run, )
t.start()
if __name__ == "__main__":
main()
I want only executing two of them at the first, and when one of them finishes then the third one and so on
Is there a way to do this?
thanks in advance
You should use the join method
def main():
list = [class1(), class2()] #only two
waiting_list = []
for obj in list:
t = threading.Thread(target=obj.run, )
t.start()
waiting_list.append(t)
for t in waiting_list:
print("Waiting for %s" % t)
t.join()
print("%s done!" % t)
list = [class3(), class4()] #more stuff
for obj in list:
t = threading.Thread(target=obj.run, )
t.start()
#and so on
Also look the concurrent.futures package for a more high level thread pool to avoid the for loops
Related
This is my argparse command line interface which use to options --filter and --count.
i'm (filtering/count) a data.json file, which i convert to list python.
My question is how do I write tests for that section of the code base?
by using unittest or other libraries(modules) python of tests
def main(args):
my_parser = argparse.ArgumentParser(description='This program has to filter a list of elements containing a pattern and counts of People and Animals by counting the number of children ')
my_parser.add_argument('--filter',
metavar='Filter',
type=str,
help='filter a list of elements containing a pattern')
my_parser.add_argument('--count',
action='store_true',
help='the counts of People and Animals by counting the number of children')
args = my_parser.parse_args()
Filter= args.filter
count = args.count
data_filtered =[]
list_count = []
#Path to the data.json
path = /path/to/data.json file
# Python program to read
# json file and convert them to list python
with open(path) as f:
data = json.load(f)
if Filter:
data_filtered=[dico for dico in data for dict1 in dico['people'] for animal in dict1['animals'] if Filter in animal['name']]
if len(data_filtered) != 0:
return data_filtered
elif count:
for dico in data:
children = 0
for ele in dico['people']:
animals = len(ele['animals'])
children += 1 + animals
ele['name'] += f" [{animals}]"
dico['name'] += f" [{children}]"
list_count.append(dico)
return list_count
if __name__ == '__main__':
print(main(sys.argv[1:]))
I trayed to do this but it doesn't work for me, i want to make tests of non regression, sincerlly i have never write those kind of tests and i dont know clearly how to do it
import unittest
from collections import namedtuple
from command_line import main
class TestMycommand_line(unittest.TestCase):
args=['--filter=ry','--count']
def test_filter(self):
args = TestMycommand_line.args[0]
print(args)
res = main(args)
def test_count(self):
args = TestMycommand_line.args[1]
res = main(args)
if __name__ == '__main__':
unittest.main()
I have written the SNMP agent code by configuring two different context names at the SnmpContext class instance. It will register a MIB tree under particular contextName like below,
class Test():
def readVars (self , varBinds, acInfo=(None, None)):
retItem = []
for ov in varBinds:
if str(ov[0]) == '1.3.6.1.4.1.26262.1.1.1.0':
retItem.extend([(ov[0], v2c.OctetString('%s' % 'value1'))])
elif str(ov[0]) == '1.3.6.1.4.1.26262.1.1.2.0':
retItem.extend([(ov[0], v2c.OctetString('%s' % 'value2'))])
.
.
.
return retItem
def readNextVars (self , varBinds, acInfo=(None, None)):
retItem = []
for ov in varBinds:
if str(ov[0]) == '1.3.6.1.4.1.26262.1.1.1.0':
retItem.extend([('1.3.6.1.4.1.26262.1.1.2.0', v2c.OctetString('%s' % 'value2'))])
elif str(ov[0]) == '1.3.6.1.4.1.26262.1.1.2.0':
retItem.extend([('1.3.6.1.4.1.26262.1.1.3.1.1.0', v2c.OctetString('%s' % 'value3'))])
.
.
.
.
else:
retItem.extend([('1.3.6.1.4.1.26262.1.1.1.0', v2c.OctetString('%s' % 'value1'))])
return retItem
mibTreeA = Test()
snmpContext.registerContextName(v2c.OctetString('context-a'), mibTreeA)
snmpContext.registerContextName(v2c.OctetString('context-b'), mibTreeA)
As in the example sample
Here next OID is returned by checking condition using if statement. But I have multiple OIDs and incrementing that using the if statement would be painful.
So I would like to know is there a way to increment the OIDs dynamically? Any example script will be helpful.
Thanks in advance.
May be an alternative approach could be to have a dict of OID-value pairs (where value can be a callable), then .nextKey method would do the increment part?
In other words, may be all those if conditions could be folded into some ordered data structure supporting essentially two operations:
get the next item
get the value associated with an item
I have one function where I am calculating the CPU usage of a test case. The function works, but I would like to append the result of the subtraction in a list for the further usage.
For example, first I subtract 10 and 15, which is -5. At this point the list looks like [-5]. Next I subtract 20 and 30, which is -10. Now I want the list to look like [-5, -10]. My current code is (python 2.7):
import psutil
class CPU():
def __init__(self):
self.cpu_start()
def cpu_start(self):
global a
a= psutil.cpu_percent(interval=1, percpu=False)
print a
def cpu_end(self):
global b
b = psutil.cpu_percent(interval=1, percpu=False)
print b
def diff(self):
c= a-b
list = []
list.append(c)
print list
def main():
CPU()
if __name__ == '__main__':
main()
Just make the diff function return a-b, and append that to an array:
import psutil
class CPU:
def __init__(self):
self.cpu_start()
self.list = []
self.a = 0
self.b = 0
self.c = 0
def cpu_start(self):
self.a = psutil.cpu_percent(interval=1, percpu=False)
return self.a
def cpu_end(self):
self.b = psutil.cpu_percent(interval=1, percpu=False)
return self.b
def diff(self):
self.c = self.cpu_start() - self.cpu_start()
return self.c
def main():
cpu = CPU()
results = []
while True:
results.append(cpu.diff())
print results
if __name__ == '__main__':
main()
Remember that when you're using a class function, you need to create an object of that class, such as cpu = CPU() - I'm creating an object called cpu of class CPU, initialised with nothing. Then the __init__ function will create a and b(created as self.a and self.b, because they're local) and store them locally in that class. The diff() function, takes no arguments, but returns the difference of a and b which are stored locally in that class. Then I create a list called results with no elements. I run cpu.diff(), which gets the difference from cpu_start() and cpu_end(), and append the result to the results array. This is run in a loop, constantly appending to the array and printing it.
Hope this helps.
I am creating a sub-process for reading a growing log file. I passed a counter ( inside of a list) into the log_file_reader function, and append 1 to the counter list if the line is valid. I check the counter in the main process every 5 seconds. The counter in the increases as expected in the sub-process, but it is always 0 in the main process. I checked the id of the counter; it is identical both in sub-process and main process. Why isn't the counter increasing in the main process? If i change counter to counter = multiprocessing.Queue() and check the qsize() in log_file_reader(...) or the main thread, everything is working fine.
import subprocess
import select
import multiprocessing
import time
def log_file_reader(filename, counter):
f = subprocess.Popen(['tail', '-F',filename], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
line = f.stdout.readline().strip()
if line:
'''appends 1 to counter if line is valid'''
counter.append(1)
def main():
counter = list() # initializes a counter in type list
# starts up a process keep tailing file
reader_process = multiprocessing.Process(target=log_file_reader, args=("/home/haifzhan/logfile.log", counter))
reader_process.start()
# main thread check the counter every 5 seconds
while True:
time.sleep(5)
print "periodically check---counter:{0},id:{1}".format(len(counter), id(counter))
if __name__ == "__main__":
# everything starts here
main()
Plain list objects are not shared between processes, so the counter in the child process is actually a completely distinct object from the counter in the parent. Changes you make to one will not affect the other. If you want to share the list between processes, you need to use a multiprocessing.Manager().list:
import subprocess
import select
import multiprocessing
import time
def log_file_reader(filename, counter):
f = subprocess.Popen(['tail', '-F',filename], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
line = f.stdout.readline().strip()
if line:
'''appends 1 to counter if line is valid'''
counter.append(1)
def main():
m = multiprocessing.Manager()
counter = m.list() # initializes a counter in type list
# starts up a process keep tailing file
reader_process = multiprocessing.Process(target=log_file_reader, args=("/home/haifzhan/logfile.log", counter))
reader_process.start()
# main thread check the counter every 5 seconds
while True:
time.sleep(5)
print "periodically check---counter:{0},id:{1}".format(len(counter), id(counter))
if __name__ == "__main__":
# everything starts here
main()
If you're just using the list as a counter, though, you might as well use a multiprocessing.Value, rather than a list, which really is meant to be used for counting purposes, and doesn't require starting a Manager process:
import subprocess
import select
import multiprocessing
import time
def log_file_reader(filename, counter):
f = subprocess.Popen(['tail', '-F',filename], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
line = f.stdout.readline().strip()
if line:
'''appends 1 to counter if line is valid'''
with counter.get_lock():
counter.value += 1
def main():
m = multiprocessing.Manager()
counter = multiprocessing.Value('i', 0) # A process-safe int, initialized to 0
# starts up a process keep tailing file
reader_process = multiprocessing.Process(target=log_file_reader, args=("/home/haifzhan/logfile.log", counter))
reader_process.start()
# main thread check the counter every 5 seconds
while True:
time.sleep(5)
with counter.get_lock():
print "periodically check---counter:{0},id:{1}".format(counter.value, id(counter))
I'm trying to write a python script that evaluates a vector Complex numbers so I can do things like plot it via matplotlib. Python code below with comment where code breaks:
import sys
import gdb
import matplotlib.pyplot as plt
class Plotter (gdb.Command):
""" Plots vector"""
# _iterator pulled directly from
# /usr/share/gdb/python/libstdcxx/v6/printers.py
class _iterator:
def __init__ (self, start, finish):
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def next(self):
if self.item == self.finish:
raise StopIteration
count = self.count
self.count = self.count + 1
elt = self.item.dereference()
self.item = self.item + 1
return ('[%d]' % count, elt)
def __init__(self):
super(Plotter, self).__init__("plot_test", gdb.COMMAND_OBSCURE)
def invoke(self, arg, from_tty):
frame = gdb.selected_frame()
try:
val = gdb.Frame.read_var(frame, arg)
if str(val.type).find("vector") != -1:
print "Plot vector:", str(val.type)
if (str(val.type).find("complex") != -1):
self.plot_complex_vector(val)
else:
self.plot_vector(val)
else:
print "Not a vector..."
except:
print "No such variable:", arg
return
def plot_complex_vector(self, val):
try:
it = self._iterator(val['_M_impl']['_M_start'],
val['_M_impl']['_M_finish'])
vector = []
while(True):
x = it.next()
vector.append(complex(x[1])) # doesn't work...
return
except StopIteration:
pass
except:
print sys.exc_info()[0]
print vector
plt.plot(vector)
plt.show()
# works...
def plot_vector(self, val):
try:
it = self._iterator(val['_M_impl']['_M_start'],
val['_M_impl']['_M_finish'])
vector = []
while(True):
x = it.next()
vector.append(float(x[1]))
except StopIteration:
pass
except:
print sys.exc_info()[0]
print vector
plt.plot(vector)
plt.show()
Plotter()
So the question is, how do I access the real/imaginary parts of a a std::complex value?
It looks like doing a
print x[1]
Will print values like : {_M_value = 0 + 1 * I}
Update: It looks like I can do a little string editing before doing a typecast:
while(True):
x = it.next()
s = str(x[1]['_M_value'])
# convert string to complex format that python understands.
c = complex(s.replace('I','j').replace(' ','').replace('*',''))
vector.append(c) # Works now...
But... is there a better way to do this?
try:
it = self._iterator(val['_M_impl']['_M_start'],
val['_M_impl']['_M_finish'])
vector = []
while(True):
x = it.next()
vector.append(complex(x[1])) # doesn't work...
return
except StopIteration:
pass
except:
print sys.exc_info()[0]
Is not how iterators are meant to be used in python. Use
try:
it = self._iterator(val['_M_impl']['_M_start'],
val['_M_impl']['_M_finish'])
vector = [complex(x[1]) for x in it]
except Exception as e:
print e, sys.exc_info()[0]
If you really still want to wrap it in a try...except block.
Edit: Try complex(x.real() + x.imag()). What does print x.type.fields() show?