Normalizing data and applying colormap results in rotated image using matplotlib? - python-2.7

So I wanted to see if I could make fractal flames using matplotlib and figured a good test would be the sierpinski triangle. I modified a working version I had that simply performed the chaos game by normalizing the x range from -2, 2 to 0, 400 and the y range from 0, 2 to 0, 200. I also truncated the x and y coordinates to 2 decimal places and multiplied by 100 so that the coordinates could be put in to a matrix that I could apply a color map to. Here's the code I'm working on right now (please forgive the messiness):
import numpy as np
import matplotlib.pyplot as plt
import math
import random
def f(x, y, n):
N = np.array([[x, y]])
M = np.array([[1/2.0, 0], [0, 1/2.0]])
b = np.array([[.5], [0]])
b2 = np.array([[0], [.5]])
if n == 0:
return np.dot(M, N.T)
elif n == 1:
return np.dot(M, N.T) + 2*b
elif n == 2:
return np.dot(M, N.T) + 2*b2
elif n == 3:
return np.dot(M, N.T) - 2*b
def norm_x(n, minX_1, maxX_1, minX_2, maxX_2):
rng = maxX_1 - minX_1
n = (n - minX_1) / rng
rng_2 = maxX_2 - minX_2
n = (n * rng_2) + minX_2
return n
def norm_y(n, minY_1, maxY_1, minY_2, maxY_2):
rng = maxY_1 - minY_1
n = (n - minY_1) / rng
rng_2 = maxY_2 - minY_2
n = (n * rng_2) + minY_2
return n
# Plot ranges
x_min, x_max = -2.0, 2.0
y_min, y_max = 0, 2.0
# Even intervals for points to compute orbits of
x_range = np.arange(x_min, x_max, (x_max - x_min) / 400.0)
y_range = np.arange(y_min, y_max, (y_max - y_min) / 200.0)
mat = np.zeros((len(x_range) + 1, len(y_range) + 1))
random.seed()
x = 1
y = 1
for i in range(0, 100000):
n = random.randint(0, 3)
V = f(x, y, n)
x = V.item(0)
y = V.item(1)
mat[norm_x(x, -2, 2, 0, 400), norm_y(y, 0, 2, 0, 200)] += 50
plt.xlabel('x0')
plt.ylabel('y')
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
The mathematics seem solid here so I suspect something weird is going on with how I'm handling where things should go into the 'mat' matrix and how the values in there correspond to the colormap.

If I understood your problem correctly, you need to transpose your matrix using the method .T. So just replace
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
by
fig = plt.figure(figsize=(10,10))
ax = gca()
ax.imshow(mat.T, cmap="spectral", extent=[-2,2, 0, 2], origin="bottom")
plt.show()
The argument origin=bottom tells to imshow to have the origin of your matrix at the bottom of the figure.
Hope it helps.

Related

How iterate over a 2x2 matrix in Python

Im trying to recreate Conways game of life where the average color of the surrounding cells will be the color of the new dead cell created, although I'm having issues trying to count the surrounding neighbors of a certain cell in order to determine if that cell should be dead or alive.
def count_neighbors(self, i, j):
neighbors = []
r_sum = 0
g_sum = 0
b_sum = 0
''' The range(-1, 2) in the for loop allows the loop to check the cells in the positions
relative to the current cell, which are the eight cells surrounding it.'''
for x in range(-1, 2):
for y in range(-1, 2):
if (x, y) == (0, 0):
continue
if 0 <= int(i) + x < len(self._board) and 0 <= int(j) + y < len(self._board):
if self._board[i + x][j + y] != (0, 0, 0):
neighbors.append(self._board[i + x][j + y])
r_sum += self._board[i + x][j + y][0]
g_sum += self._board[i + x][j + y][1]
b_sum += self._board[i + x][j + y][2]
num_neighbors = len(neighbors)
if num_neighbors == 0:
return 0, (0, 0, 0)
return num_neighbors, (r_sum // num_neighbors, g_sum // num_neighbors, b_sum // num_neighbors)

How to fill the two sides of a line with seaborn?

I need to fill with different colors (green and red maybe) the two sides of a graph. I'm using the following code:
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
range_x = [-1, 0, 1, 2]
range_y = [-5, -3, -1, 1]
ax = sns.lineplot(x = range_x, y = range_y, markers = True)
sns.lineplot(ax = ax, x = [range_x[0], range_x[-1]], y = [0, 0], color = 'black')
sns.lineplot(ax = ax, x = [0, 0], y = [range_y[0], range_y[-1]], color = 'black')
ax.fill_between(range_x, range_y, facecolor = 'red', alpha = 0.5)
plt.savefig('test_fig', bbox_inches = 'tight')
plt.close()
With that code I get the following figure:
But obviously this is a fail, because I want red color all above the blue line. Besides I want my x and y axis in a remarkable way, I get it with x axis but I don't know why I can't get it with y axis.
Thanks you very much in advance!
Something like this?:
ax = sns.lineplot(x = range_x, y = range_y, markers = True)
sns.lineplot(ax = ax, x = [range_x[0], range_x[-1]], y = [0, 0], color = 'black')
sns.lineplot(ax = ax, x = [0, 0], y = [range_y[0], range_y[-1]], color = 'black')
ax.fill_between(range_x, range_y,[ax.get_ylim()[1]]*len(range_x), facecolor = 'red', alpha = 0.5)
ax.fill_between(range_x, range_y,[ax.get_ylim()[0]]*len(range_x), facecolor = 'green', alpha = 0.5)
From the documentation of fill_between:
y2 : array (length N) or scalar, optional, default: 0
The y coordinates of the nodes defining the second curve.

Perceptron implementation, decision boundary will not plot

I am attempting to implement a perceptron. I have loaded a 100x2 array of values between 0 and 100. Each item in the array has a label of either -1 or 1.
I believe the perceptron is working, however I cannot plot decision boundary as shown here: plot decision boundary matplotlib
When I run my code I only see a single color background. I would expect to see two colors, one color for each label in my data set (-1 and 1).
My current output, I expect to see 2 colors for the background (-1 or 1)
An example of what I hope to see, from the sklearn documentation
import numpy as np
from matplotlib import pyplot as plt
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
def perceptron_sgd(X, l, c, epochs):
#initialize weights
w = np.zeros(3)
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print "epoch " + str(epoch) + ": " + str(w)
return w, errors
def classify(X, l, w):
z = np.dot(X, w)
print z
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X, l, .001, 36)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .2), np.arange(y_min, y_max, .2))
# here "model" is your model's prediction (classification) function
Z = classify(np.c_[xx.ravel(), yy.ravel()], l, w[:-1]) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)
I got it to work.
Standardized your X
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
Better initialization than zero.
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
Add learned biases during prediction
z = np.dot(X, w[:-1]) + w[-1]
Standardize during prediction as well (using standardization learned from input)
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]),
l, w) #strip the bias from weights
Generally, always a good idea to standardize the inputs.
Entire code:
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
def perceptron_sgd(X, l, c, epochs):
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print("epoch " + str(epoch) + ": " + str(w))
return w, errors
def classify(X, l, w):
z = np.dot(X, w[:-1]) + w[-1]
print(z)
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X_trans, l, .01, 25)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
# here "model" is your model's prediction (classification) function
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]), l, w) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
#plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)

Python 3D array. Calculate R squared

I have 2 ndarrays with 3 dimensions. I need to calculate the Rsquared over these ndarrays. To clarify.
Array1.shape = Array2.shape = (100, 100, 10)
So...
resultArray = np.ones(100*100).reshape(100,100)
for i in range(Array1.shape[0]:
for j in range(Array1.shape[1]:
slope, intercept, r_value, p_value, std_err = scipy.stats.stats.linregress(Array1[i:i+1,j:j+1,:],Array1[i:i+1,j:j+1,:])
R2 = r_value**2
result[ i , j ] = R2
If passed two arrays, stats.linregress expects the two arrays to be 1-dimensional.
Array1[i:i+1,j:j+1,:] has shape (1, 1, 10), so it is 3-dimensional. So instead use Array1[i, j, :]:
import numpy as np
import scipy.stats as stats
Array1 = np.random.random((100, 100, 10))
Array2 = np.random.random((100, 100, 10))
resultArray = np.ones(100*100).reshape(100,100)
for i in range(Array1.shape[0]):
for j in range(Array1.shape[1]):
slope, intercept, r_value, p_value, std_err = stats.linregress(
Array1[i, j, :],
Array1[i, j, :])
R2 = r_value**2
resultArray[ i , j ] = R2
print(resultArray)

Different results between python map and numpy vectorize

My understanding is that (one use of) numpy's vectorize allows me to send an array to a function that normally only takes scalars, instead of using the built in map function (in combination with a lambda function or the like). However, under the following scenario I am getting different results when I use map vs numpy.vectorize and I can't seem to figure out why.
import numpy as np
def basis2(dim, k, x):
y = np.array([-0.2, -0.13, -0.06, 0, 0.02, 0.06, 0.15, 0.3, 0.8,
1.6, 3.1, 6.1, 10.1, 15.1, 23.1, 30.1, 35.0, 40.0, 45.0, 50.0, 55.0])
if x < y[k] or x > y[k + dim + 1]:
return 0
elif dim != 0:
ret = ((x - y[k]) / (y[k + dim] - y[k])) * basis2(dim - 1, k, x) + (
(y[k + dim + 1] - x) / (y[k + dim + 1] - y[k + 1])) * basis2(dim - 1, k + 1, x)
return ret
else:
return 1.0
w = np.array([20.0, 23.1, 30.0])
func = lambda x: basis2(3, 14, x)
vec = map(func, w)
func2 = np.vectorize(basis2)
vec2 = func2(3, 14, w)
print vec # = [0, 0.0, 0.23335417007039491]
print vec2 # = [0 0 0]
As the docstring says:
The data type of the output of vectorized is determined by calling
the function with the first element of the input. This can be avoided
by specifying the otypes argument.
you need to add a otypes argument:
func2 = np.vectorize(basis2, otypes="d")
or change return 0 to return 0.0 in basis2().