I have a dataset like this:
DataSet image
DataSet can be found here: https://ucr.fbi.gov/crime-in-the-u.s/2013/crime-in-the-u.s.-2013/tables/1tabledatadecoverviewpdf/table_1_crime_in_the_united_states_by_volume_and_rate_per_100000_inhabitants_1994-2013.xls
And i want to plot line chart containing line for every crime rate by year.
Something like this:
Crime Rate graph
But the graph shows continuous years on x-axis like 2005.5 2007.5.
Any one can help? or suggest a better approach to do this. Thanks
and here is the code:
%matplotlib inline
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import plotly.plotly as py
import seaborn as sns
cd =pd.read_clipboard() #after copying the dataset from given url above
yearRate = cd[['Year','ViolentCrimeRate','MurderRate','RapeRate','RobberyRate','AggravatedAssaultRate','PropertyCrimeRate','BurglaryRate','LarcenyTheftRate','MotorVehicleTheftRate']]
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plt.figure(figsize=(20,15))
ax = plt.subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.ylim(0,5000)
plt.xlim(1994, 2013)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
for y in range(0, 5000, 1000):
plt.plot(range(1994, 2013), [y] * len(range(1994, 2013)), "--", lw=0.5, color="black", alpha=0)
rates=['ViolentCrimeRate','MurderRate','RapeRate','RobberyRate','AggravatedAssaultRate','PropertyCrimeRate','BurglaryRate','LarcenyTheftRate','MotorVehicleTheftRate']
for rank, column in enumerate(rates):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
plt.plot(yearRate.Year.values,yearRate[column.replace("\n", " ")].values,lw=2.5, color=tableau20[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = yearRate[column.replace("\n", " ")].values[-1] - 0.5
if column == "MotorVehicleTheftRate":
y_pos -= 50
elif column == "MurderRate":
y_pos -= 50
plt.text(2013, y_pos, column, fontsize=14, color=tableau20[rank])
Adding:
plt.xticks(cd['Year'])
solved the issue.
Related
Im trying to group four bars around the xticks in a bar chart. Heres some sample data (mind you, Im running this in Python 2.7) and my code.
import matplotlib.pyplot as plt
import numpy as np
xps_s1 = range(2008, 2019)
xps_s2 = range(2012, 2019)
xps_s3 = range(2013, 2019)
xps_s4 = range(2014, 2019)
yps_s1 = [94.6, 93.9, 93, 94.7, 94.6, 95.4, 95, 93.6, 93, 93.6, 92.2]
yps_s2 = [81.5, 90.2, 91.5, 94, 95, 94.3, 95.3]
yps_s3 = [83.9, 92.7, 93.3, 94.4, 94.4, 94.6]
yps_s4 = [90.6, 95, 94.8, 94, 93.9]
y_means = [94.6, 93.9, 93, 94.7, np.mean([81.5, 94.6]),
np.mean([83.9, 90.2, 95.4]), np.mean([92.7, 91.5, 95, 90.6]),
np.mean([93.3, 94, 93.6, 95]), np.mean([94.4, 95, 93, 94.8]),
np.mean([94.4, 94.3, 93.6, 94]), np.mean([91.4, 94.6, 95.3, 92.2, 93.9])]
fig = plt.subplots()
ax = plt.axes(xlim=(2007,2019), ylim=(75, 100))
w = 0.2
plt.xticks(np.arange(2008, 2019, step = 1))
rects1 = ax.bar([x-w for x in xps_s1], yps_s1, width=w, align="center",
color='goldenrod', label='Sample1')
rects2 = ax.bar([x-w*2 for x in xps_s2], yps_s2, width=w, align="center",
color='grey', label='Sample2')
rects3 = ax.bar([x+w for x in xps_s3], yps_s3, width=w, align="center",
color='silver', label='Sample3')
rects4 = ax.bar([x+w*2 for x in xps_s4], yps_s4, width=w, align="center",
color='thistle', label='Sample4')
mean_line =ax.plot(xps_s1,y_means, label='Overall',
linestyle='-', color = "indianred")
legend = ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
When I had three bars I set w = 0.3and the bars grouped nicely around the ticks (I had rects1 sit snuggly atop the tick, the other two right up against its flanks, the remaining .09 of width set the years apart)
Now with the above code they dont seem to be related to any tick really and they dont group properly.
What am I doing wrong?
Thanks a lot in advance!
I think you want to use align='edge' to simplify the calculations. Is this what you are trying to obtain?
import matplotlib.pyplot as plt
import numpy as np
xps_s1 = range(2008, 2019)
xps_s2 = range(2012, 2019)
xps_s3 = range(2013, 2019)
xps_s4 = range(2014, 2019)
yps_s1 = [94.6, 93.9, 93, 94.7, 94.6, 95.4, 95, 93.6, 93, 93.6, 92.2]
yps_s2 = [81.5, 90.2, 91.5, 94, 95, 94.3, 95.3]
yps_s3 = [83.9, 92.7, 93.3, 94.4, 94.4, 94.6]
yps_s4 = [90.6, 95, 94.8, 94, 93.9]
y_means = [94.6, 93.9, 93, 94.7, np.mean([81.5, 94.6]),
np.mean([83.9, 90.2, 95.4]), np.mean([92.7, 91.5, 95, 90.6]),
np.mean([93.3, 94, 93.6, 95]), np.mean([94.4, 95, 93, 94.8]),
np.mean([94.4, 94.3, 93.6, 94]), np.mean([91.4, 94.6, 95.3, 92.2, 93.9])]
fig = plt.subplots()
ax = plt.axes(xlim=(2007,2019), ylim=(75, 100))
w = 0.2
plt.xticks(np.arange(2008, 2019, step = 1))
rects1 = ax.bar([x-w for x in xps_s1], yps_s1, width=w, align="edge",
color='goldenrod', label='Sample1')
rects2 = ax.bar([x-w*2 for x in xps_s2], yps_s2, width=w, align="edge",
color='grey', label='Sample2')
rects3 = ax.bar([x for x in xps_s3], yps_s3, width=w, align="edge",
color='silver', label='Sample3')
rects4 = ax.bar([x+w for x in xps_s4], yps_s4, width=w, align="edge",
color='thistle', label='Sample4')
mean_line =ax.plot(xps_s1,y_means, label='Overall',
linestyle='-', color = "indianred")
legend = ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
output the dictionary:
{u'person': [(95, 11, 474, 466)],
u'chair': [(135, 410, 276, 587)],
u'book': [(127, 380, 161, 396)]}
I need only u'person': [(95, 11, 474, 466)]
how to filter this?
this is part of a dictionary in my code:
detected_objects = {}
# analyze all worthy detections
for x in range(worthy_detections):
# capture the class of the detected object
class_name = self._categories[int(classes[0][x])]
# get the detection box around the object
box_objects = boxes[0][x]
# positions of the box are between 0 and 1, relative to the size of the image
# we multiply them by the size of the image to get the box location in pixels
ymin = int(box_objects[0] * height)
xmin = int(box_objects[1] * width)
ymax = int(box_objects[2] * height)
xmax = int(box_objects[3] * width)
if class_name not in detected_objects:
detected_objects[class_name] = []
detected_objects[class_name].append((ymin, xmin, ymax, xmax))
detected_objects = detected_objects
print detected_objects
please help me
Thank you in advance
You can simply copy the keys you are interested in over into a new dict:
detected_objects = {u'person': [(95, 11, 474, 466)],
u'chair': [(135, 410, 276, 587)],
u'book': [(127, 380, 161, 396)]}
keys_to_keep = {u'person'}
# dictionary comprehension
filtered_results = { k:v for k,v in detected_objects.items() if k in keys_to_keep}
print filtered_results
Output:
{u'person': [(95, 11, 474, 466)]}
See Python Dictionary Comprehension
When plotting a serie of subgraphs with matplotlib, I can't set the ylim range properly.
Here's part of the code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
(...) # loading npy data
titles = ["basestr1", "basestr2", "basestr3", "basestr4", "basestr5"]
labels = ["baselab1", "baselab2", "baselab3", "baselab4", "baselab5"]
linew = 2.24
ms = 10
mw = 2
fc = (1,1,1)
bc = (1,1,1)
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=[(1,0.4,0.4), (0.1,0.6,0.1), (0.04,0.2,0.04)])
mpl.rcParams.update({'font.size': 12})
fig2 = plt.subplots(2, 2, figsize=(12,9), facecolor=fc)
plt.rc('font', family='serif')
ax0 = plt.subplot(221)
ax1 = plt.subplot(222)
ax2 = plt.subplot(223)
ax3 = plt.subplot(224)
axl = [ax0, ax1, ax2, ax3]
em = []
fp = []
fn = []
gm = []
for c,element in enumerate(elements):
em.append([i[0] for i in element])
fp.append([i[1][1] if 1 in i[1] else 0 for i in element]) # red
fn.append([i[1][2] if 2 in i[1] else 0 for i in element]) # light green
gm.append([i[1][3] if 3 in i[1] else 0 for i in element]) # dark green
axl[c].semilogy(em[c], fp[c], "-x", lw=linew, markersize=ms, mew=mw) # red
axl[c].semilogy(em[c], fn[c], "-x", lw=linew, markersize=ms, mew=mw) # light green
axl[c].semilogy(em[c], gm[c], "-o", lw=linew, markersize=ms, mew=mw, mfc='None') # dark green
axl[c].set_ylim([-10, 200]) # <-- Here's the issue; it seems not to work properly.
axl[c].grid(True,which="both")
axl[c].set_title(titles[c])
axl[c].set_xlabel(labels[c])
axl[c].set_ylabel(r'Count')
plt.legend(['False', 'True', 'Others'], loc=3, bbox_to_anchor=(.62, 0.4), borderaxespad=0.)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig('/home/username/Desktop/figure.png',
facecolor=fig2.get_facecolor(),edgecolor='w',orientation='landscape',papertype=None,
format=None, transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
plt.show() # block=False
Where elements is a list containing 4 arrays.
Each of these array looks like:
elements[0]
Out[16]:
array([[1, {0.0: 1252, 1.0: 11, 2.0: 170, 3.0: 11}],
[2, {0.0: 1251, 1.0: 12, 2.0: 163, 3.0: 18}],
[3, {0.0: 1229, 1.0: 34, 2.0: 148, 3.0: 33}],
...,
[6, {0.0: 1164, 1.0: 99, 2.0: 125, 3.0: 56}],
[7, {0.0: 1111, 1.0: 152, 2.0: 105, 3.0: 76}],
[8, {0.0: 1056, 1.0: 207, 2.0: 81, 3.0: 100}]], dtype=object)
Where am I wrong?
I can set any values I want in axl[c].set_ylim([-10, 200]) it doesn't change anything on the output graph.
Update:
Ok, it seems not possible to set other value as 1 as starting y-axis value here.
Hi I am trying to run a conv. neural network addapted from MINST2 tutorial in tensorflow.
I am having the following error, but i am not sure what is going on:
W tensorflow/core/framework/op_kernel.cc:909] Invalid argument: Shape mismatch in tuple component 0. Expected [784], got [6272]
W tensorflow/core/framework/op_kernel.cc:909] Invalid argument: Shape mismatch in tuple component 0. Expected [784], got [6272]
Traceback (most recent call last):
File "4_Treino_Rede_Neural.py", line 161, in <module>
train_accuracy = accuracy.eval(feed_dict={keep_prob: 1.0})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 555, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 3498, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 372, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 636, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 708, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 728, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors.OutOfRangeError: RandomShuffleQueue '_0_input/shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 100, current size 0)
[[Node: input/shuffle_batch = QueueDequeueMany[_class=["loc:#input/shuffle_batch/random_shuffle_queue"], component_types=[DT_FLOAT, DT_INT32], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](input/shuffle_batch/random_shuffle_queue, input/shuffle_batch/n)]]
Caused by op u'input/shuffle_batch', defined at:
File "4_Treino_Rede_Neural.py", line 113, in <module>
x, y_ = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)
File "4_Treino_Rede_Neural.py", line 93, in inputs
min_after_dequeue=1000)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/input.py", line 779, in shuffle_batch
dequeued = queue.dequeue_many(batch_size, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/data_flow_ops.py", line 400, in dequeue_many
self._queue_ref, n=n, component_types=self._dtypes, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 465, in _queue_dequeue_many
timeout_ms=timeout_ms, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 704, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2260, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1230, in __init__
self._traceback = _extract_stack()
My program is:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy as np
import tensorflow as tf
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_string('train_dir', '/root/data', 'Directory with the training data.')
#flags.DEFINE_string('train_dir', '/root/data2', 'Directory with the training data.')
# Constants used for dealing with the files, matches convert_to_records.
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
# Set-up dos pacotes
sess = tf.InteractiveSession()
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([784])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, 30,26,1]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, char letras).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#Variaveis
x, y_ = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)
#onehot_y_ = tf.one_hot(y_, 36, dtype=tf.float32)
#y_ = tf.string_to_number(y_, out_type=tf.int32)
#Layer 1
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#Layer 2
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#Densely Connected Layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#Dropout - reduz overfitting
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#Readout layer
W_fc2 = weight_variable([1024, 36])
b_fc2 = bias_variable([36])
#y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
#Train and evaluate
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(onehot_y_ * tf.log(y_conv), reduction_indices=[1]))
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(20000):
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={keep_prob: 0.5})
x, y_ = inputs(train=True, batch_size=2000)
#y_ = tf.string_to_number(y_, out_type=tf.int32)
print("test accuracy %g"%accuracy.eval(feed_dict={keep_prob: 1.0}))
coord.join(threads)
sess.close()
Can anyone explain me whats going on? And how to fix it?
Thanks!
Marcelo V
I had similar problems in the past, and it was due to that I was storing and reading the data in incorrect data types. For example, I had casted the data first as type float when converting original png data to tfrecords. Then when I read the data out from tfrecords, I once again casted it as float (assuming the data coming out was uint8. Hence I had mismatch of 3136 (784*4) when expected 784. I'm guessing that may also be the case for you here.
In the line:
filename_queue = tf.train.string_input_producer([filename], num_epochs=num_epochs)
You specify the number of epochs the queue will run through the filenames. The documentation explains it well:
num_epochs: An integer (optional). If specified, string_input_producer produces each string from num_epochs times before generating an OutOfRange error. If not specified, string_input_producer can cycle through the strings in string_tensor an unlimited number of times.
In flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.'), you specify a default number of epochs 2. You should either increase that, or remove the num_epochs argument in string_input_producer.
Task:
Plot a donut chart with two legends outside of the axis (first legend - on the right side with respect to the figure, second - on the bottom).
Problem:
When saving the figure, part of the 1st legend is cut off [especially when it contains a long text, see example below]
Desired result:
Make a tight layout of the figure by taking into consideration the dimensions of both legends.
Code:
import matplotlib.pyplot as plt
from pylab import *
ioff() # don't show figures
colors = [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138,195),
(166, 216, 84), (255, 217, 47), (171, 197, 233), (252, 205, 229)]
for icol in range(len(colors)):
red,green,blue = colors[icol]
colors[icol] = (red / 255., green / 255., blue / 255.)
fig = plt.figure(1, figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
sizes_component_1 = [12, 23, 100, 46]
sizes_component_2 = [15, 30, 45, 10, 44, 45, 50, 70]
component_1 = 'exampleofalongtextthatiscutoff', '2', '3', '4'
component_2 = 'Unix', 'Mac', 'Windows7', 'Windows10', 'WindowsXP', 'Linux', 'FreeBSD', 'Android'
patches1, texts1, autotexts1 = ax.pie(sizes_component_1, radius=1, pctdistance=0.9, colors=colors, autopct='%1.1f%%', shadow=False, startangle=90)
patches2, texts2, autotexts2 = ax.pie(sizes_component_2, radius=0.8, pctdistance=0.6, colors=colors, autopct='%1.1f%%', shadow=False, startangle=90)
# To draw circular donuts
ax.axis('equal')
# Draw white circle
centre_circle = plt.Circle((0,0),0.6,color='black', fc='white')
ax.add_artist(centre_circle)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
lgd1=ax.legend(patches1,component_1, frameon=False, loc='center left', bbox_to_anchor=(1.0, 0.8), borderaxespad=0.1)
lgd2=ax.legend(patches2,component_2, frameon=False, loc='center left', ncol=len(patches2)/2, bbox_to_anchor=(0.0, -0.005), borderaxespad=0)
ax_elem = ax.add_artist(lgd1)
fig.suptitle('Title', fontsize=16)
fig.savefig('donut.png',bbox_extra_artists=(lgd1,lgd2,), bbox_inches='tight')
plt.gcf().clear() # clears buffer
This issue is come with pie chart: https://github.com/matplotlib/matplotlib/issues/4251
And it is not fixed.