Multiprocessing, how to run processes in parallel without creating zombies? - python-2.7

I'd like to run the processes in parallel, so I commented out a p.join from the __main__ section.
What are the consequences of not have a .join, or better yet, should I be using a different approach for parallel multiprocessing?
import multiprocessing
def worker(num):
x = 0
for i in range(10000):
x+=1
print x, num
if __name__ == '__main__':
for i in range(4):
p = multiprocessing.Process(target=worker, args=(i,))
p.start()
# p.join()

Join the processes after starting them.
if __name__ == '__main__':
procs = []
for i in range(4):
p = multiprocessing.Process(target=worker, args=(i,))
p.start()
procs.append(p)
for p in procs:
p.join()
If you run multiple similar tasks, you can use multiprocessing.Pool.
if __name__ == '__main__':
pool = multiprocessing.Pool()
pool.map(worker, range(4))
pool.close()
pool.join()

Related

Run parallel op with different inputs and same placeholder

I have the necessity to calculate more then one accuracy in the same time, concurrently.
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
The piece of code is the same of the mnist example in the tutorial of TensorFlow but instead of having:
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
I have two placeolder because I already calculated and stored them.
W = tf.placeholder(tf.float32, [784, 10])
b = tf.placeholder(tf.float32, [10])
I want to fill the network with the values I aready have and then calculate the accuracy and this have to happen for each network I loaded.
So if I load 20 networks I want to calculate in parallel the accuracy for each one. There is a way with the session run to execute the same operation with different input?
You have multiple options to make things happen in parallel:
Parallelize using multiple python threads / subprocesses. (See Python's "multiprocessing" library.)
Batch up the operations into single larger operations. (e.g. Similar to the image operations that operate on a batch of images simultaneously https://www.tensorflow.org/api_docs/python/image/resizing#resize_bilinear.)
Make a single graph that has the 20 network accuracy calculations.
I think the last one is the easiest, so I've included a bit of sample code below to get you started:
import tensorflow as tf
def construct_accuracy_calculation(i):
W = tf.placeholder(tf.float32, [784, 10], name=("%d_W" % i))
b = tf.placeholder(tf.float32, [10], name=("%d_b" % i))
# ...
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return (W, b, accuracy)
def main():
accuracy_computations = []
feed_dict={}
for i in xrange(NUM_NETWORKS):
(W, b) = load_network(i)
(W_op, b_op, accuracy) = construct_accuracy_calculation(i)
feed_dict[W_op] = W
feed_dict[b_op] = b
accuracy_computations.append(accuracy)
# sess = ...
accuracy_values = sess.run(accuracy_computations, feed_dict=feed_dict)
if __name__ == "__main__":
main()
One approach to parallelizing TF computations is to execute run calls in parallel using threads (TF is incompatible with multiprocessing). It's a bit more complicated than other approaches because you have to handle parallelism yourself on the Python side.
Here's an example that runs same matmul op in same session in different Python threads with different fed inputs and runs about 4x faster with 4 threads compared to 1 thread
import os, sys, queue, threading, time
import tensorflow as tf
import numpy as np
def p(s):
# helper function for printing from multiple threads
# need to append \n or results get intermixed in notebook
print(s+"\n", flush=True, end="")
num_threads = 4
data_size = 32 # number of data points to enqueue
work_per_thread = data_size/num_threads
timeout = 10 # grace period for dequeing
input_queue = queue.Queue(data_size)
output_queue = queue.Queue(data_size)
dtype = np.float32
# use matrix vector matmul since it's compute intensive and uses single core
# see issue #6752
n = 16*1024
with tf.device("/cpu:0"):
x = tf.placeholder(dtype)
matrix = tf.Variable(tf.ones((n, n)))
vector = tf.Variable(tf.ones((n, 1)))
y = tf.matmul(matrix, vector)[0, 0] + x
# turn off graph-rewriting optimizations
sess = tf.Session(config=tf.ConfigProto(graph_options=tf.GraphOptions(optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0))))
sess.run(tf.global_variables_initializer())
done = False
def runner(runner_id):
p("Starting runner %s" % (runner_id,))
count = 0
while not done:
try:
x_val = input_queue.get(timeout=1)
except queue.Empty:
# retry on empty queue
continue
p("Start computing %d on %d" %(x_val, runner_id))
out = sess.run(y, {x: x_val})
count+=1
output_queue.put(out)
if count>=work_per_thread:
break
else:
p("Stopping runner "+str(runner_id))
threads = []
print("Creating threads.")
for i in range(num_threads):
t = threading.Thread(target=runner, args=(i,))
threads.append(t)
for i in range(data_size):
input_queue.put(i, timeout=timeout)
# start threads
p("Launching runners.")
start_time = time.time()
for t in threads:
t.start()
p("Reading results.")
for i in range(data_size):
try:
p("Main thread: obtained %.2f" % (output_queue.get(timeout=timeout),))
except queue.Empty:
print("No results after %d, terminating computation."%(timeout,))
break
else:
p("Computed successfully.")
done = True
p("Waiting for threads to finish.")
for t in threads:
t.join()
print("Done in %.2f seconds" %(time.time() - start_time))

How to handle the number of threads to be executed python?

I'd like you help with this easy/hard question.
I'm working with python threads. This is my code:
import threading
#import class1, class2, class3 . . .
def main():
list = [class1(), class2(), class3() . . .]
for obj in list:
t = threading.Thread(target=obj.run, )
t.start()
if __name__ == "__main__":
main()
I want only executing two of them at the first, and when one of them finishes then the third one and so on
Is there a way to do this?
thanks in advance
You should use the join method
def main():
list = [class1(), class2()] #only two
waiting_list = []
for obj in list:
t = threading.Thread(target=obj.run, )
t.start()
waiting_list.append(t)
for t in waiting_list:
print("Waiting for %s" % t)
t.join()
print("%s done!" % t)
list = [class3(), class4()] #more stuff
for obj in list:
t = threading.Thread(target=obj.run, )
t.start()
#and so on
Also look the concurrent.futures package for a more high level thread pool to avoid the for loops

How to run independent transformations in parallel using PySpark?

I am trying to run 2 functions doing completely independent transformations on a single RDD in parallel using PySpark. What are some methods to do the same?
def doXTransforms(sampleRDD):
(X transforms)
def doYTransforms(sampleRDD):
(Y Transforms)
if __name__ == "__main__":
sc = SparkContext(appName="parallelTransforms")
sqlContext = SQLContext(sc)
hive_context = HiveContext(sc)
rows_rdd = hive_context.sql("select * from tables.X_table")
p1 = Process(target=doXTransforms , args=(rows_rdd,))
p1.start()
p2 = Process(target=doYTransforms, args=(rows_rdd,))
p2.start()
p1.join()
p2.join()
sc.stop()
This does not work and I now understand this will not work.
But is there any alternative way to make this work? Specifically are there any python-spark specific solutions?
Just use threads and make sure that cluster have enough resources to process both tasks at the same time.
from threading import Thread
import time
def process(rdd, f):
def delay(x):
time.sleep(1)
return f(x)
return rdd.map(delay).sum()
rdd = sc.parallelize(range(100), int(sc.defaultParallelism / 2))
t1 = Thread(target=process, args=(rdd, lambda x: x * 2))
t2 = Thread(target=process, args=(rdd, lambda x: x + 1))
t1.start(); t2.start()
Arguably this is not that often useful in practice but otherwise should work just fine.
You can further use in-application scheduling with FAIR scheduler and scheduler pools for a better control over execution strategy.
You can also try pyspark-asyncactions (disclaimer - the author of this answer is also the author of the package) which provides a set of wrappers around Spark API and concurrent.futures:
import asyncactions
import concurrent.futures
f1 = rdd.filter(lambda x: x % 3 == 0).countAsync()
f2 = rdd.filter(lambda x: x % 11 == 0).countAsync()
[x.result() for x in concurrent.futures.as_completed([f1, f2])]

multiprocessing - pyodbc IOError: bad message length

I am unexpectedly getting IOError: bad message length error when trying to share pyodbc connection across multiple processes, especially when N is more than 4 (no. of cores). Sometimes I also get cPickle.UnpicklingError: invalid load key, '#'., pyodbc.ProgrammingError: ('24000', '[24000] [FreeTDS][SQL Server]Invalid cursor state (0) (SQLExecDirectW)') as errors.
# Import custom python packages
import multiprocessing
import multiprocessing.managers as mm
import pathos.multiprocessing as mp
import pyodbc, datetime, time
class MyConn(object):
def __init__(self):
self.conn = None
self.cursor = None
def connect_to_db(self):
self.conn = pyodbc.connect("DSN=cpmeast;UID=dntcore;PWD=dntcorevs2")
self.cursor = self.conn.cursor()
def run_qry(self, data):
print 'Running query', data
self.cursor.execute("WAITFOR DELAY '00:00:01';select GETDATE(), '"+str(data)+"';")
l = self.cursor.fetchall()
_l = []
for i in l:
_l.append(list(i))
print 'Result for query', data, _l
return _l
class MyManagerClass(object):
def __init__(self):
self.result = multiprocessing.Manager().list()
def read_data(self, *args):
conn = args[0][0]
data = args[0][1]
l = conn.run_qry(data)
self.result.append(l)
class MyManager(mm.BaseManager):
pass # Pass is really enough. Nothing needs to be done here.
def main():
time_start = time.time()
MyManager.register("MyConn", MyConn)
manager = MyManager()
manager.start()
a = manager.MyConn()
a.connect_to_db()
dbm = MyManagerClass()
pool = mp.ProcessingPool(4)
jobs = []
N = 5
for i in range(N):
jobs.append((a, str(i)))
for i in pool.imap(dbm.read_data, jobs):
print 'result'
pool.close()
pool.join()
print 'Result', dbm.result
print 'Closed'
time_stop = time.time()
msg = 'runtime: {0}'.format(str(datetime.timedelta
(seconds=time_stop-time_start)))
print msg
if __name__ == '__main__':
main()

Why are changes to a list made in a sub-process not showing up in the parent process?

I am creating a sub-process for reading a growing log file. I passed a counter ( inside of a list) into the log_file_reader function, and append 1 to the counter list if the line is valid. I check the counter in the main process every 5 seconds. The counter in the increases as expected in the sub-process, but it is always 0 in the main process. I checked the id of the counter; it is identical both in sub-process and main process. Why isn't the counter increasing in the main process? If i change counter to counter = multiprocessing.Queue() and check the qsize() in log_file_reader(...) or the main thread, everything is working fine.
import subprocess
import select
import multiprocessing
import time
def log_file_reader(filename, counter):
f = subprocess.Popen(['tail', '-F',filename], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
line = f.stdout.readline().strip()
if line:
'''appends 1 to counter if line is valid'''
counter.append(1)
def main():
counter = list() # initializes a counter in type list
# starts up a process keep tailing file
reader_process = multiprocessing.Process(target=log_file_reader, args=("/home/haifzhan/logfile.log", counter))
reader_process.start()
# main thread check the counter every 5 seconds
while True:
time.sleep(5)
print "periodically check---counter:{0},id:{1}".format(len(counter), id(counter))
if __name__ == "__main__":
# everything starts here
main()
Plain list objects are not shared between processes, so the counter in the child process is actually a completely distinct object from the counter in the parent. Changes you make to one will not affect the other. If you want to share the list between processes, you need to use a multiprocessing.Manager().list:
import subprocess
import select
import multiprocessing
import time
def log_file_reader(filename, counter):
f = subprocess.Popen(['tail', '-F',filename], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
line = f.stdout.readline().strip()
if line:
'''appends 1 to counter if line is valid'''
counter.append(1)
def main():
m = multiprocessing.Manager()
counter = m.list() # initializes a counter in type list
# starts up a process keep tailing file
reader_process = multiprocessing.Process(target=log_file_reader, args=("/home/haifzhan/logfile.log", counter))
reader_process.start()
# main thread check the counter every 5 seconds
while True:
time.sleep(5)
print "periodically check---counter:{0},id:{1}".format(len(counter), id(counter))
if __name__ == "__main__":
# everything starts here
main()
If you're just using the list as a counter, though, you might as well use a multiprocessing.Value, rather than a list, which really is meant to be used for counting purposes, and doesn't require starting a Manager process:
import subprocess
import select
import multiprocessing
import time
def log_file_reader(filename, counter):
f = subprocess.Popen(['tail', '-F',filename], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
line = f.stdout.readline().strip()
if line:
'''appends 1 to counter if line is valid'''
with counter.get_lock():
counter.value += 1
def main():
m = multiprocessing.Manager()
counter = multiprocessing.Value('i', 0) # A process-safe int, initialized to 0
# starts up a process keep tailing file
reader_process = multiprocessing.Process(target=log_file_reader, args=("/home/haifzhan/logfile.log", counter))
reader_process.start()
# main thread check the counter every 5 seconds
while True:
time.sleep(5)
with counter.get_lock():
print "periodically check---counter:{0},id:{1}".format(counter.value, id(counter))