How iterate over a 2x2 matrix in Python - list

Im trying to recreate Conways game of life where the average color of the surrounding cells will be the color of the new dead cell created, although I'm having issues trying to count the surrounding neighbors of a certain cell in order to determine if that cell should be dead or alive.
def count_neighbors(self, i, j):
neighbors = []
r_sum = 0
g_sum = 0
b_sum = 0
''' The range(-1, 2) in the for loop allows the loop to check the cells in the positions
relative to the current cell, which are the eight cells surrounding it.'''
for x in range(-1, 2):
for y in range(-1, 2):
if (x, y) == (0, 0):
continue
if 0 <= int(i) + x < len(self._board) and 0 <= int(j) + y < len(self._board):
if self._board[i + x][j + y] != (0, 0, 0):
neighbors.append(self._board[i + x][j + y])
r_sum += self._board[i + x][j + y][0]
g_sum += self._board[i + x][j + y][1]
b_sum += self._board[i + x][j + y][2]
num_neighbors = len(neighbors)
if num_neighbors == 0:
return 0, (0, 0, 0)
return num_neighbors, (r_sum // num_neighbors, g_sum // num_neighbors, b_sum // num_neighbors)

Related

Perceptron implementation, decision boundary will not plot

I am attempting to implement a perceptron. I have loaded a 100x2 array of values between 0 and 100. Each item in the array has a label of either -1 or 1.
I believe the perceptron is working, however I cannot plot decision boundary as shown here: plot decision boundary matplotlib
When I run my code I only see a single color background. I would expect to see two colors, one color for each label in my data set (-1 and 1).
My current output, I expect to see 2 colors for the background (-1 or 1)
An example of what I hope to see, from the sklearn documentation
import numpy as np
from matplotlib import pyplot as plt
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
def perceptron_sgd(X, l, c, epochs):
#initialize weights
w = np.zeros(3)
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print "epoch " + str(epoch) + ": " + str(w)
return w, errors
def classify(X, l, w):
z = np.dot(X, w)
print z
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X, l, .001, 36)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .2), np.arange(y_min, y_max, .2))
# here "model" is your model's prediction (classification) function
Z = classify(np.c_[xx.ravel(), yy.ravel()], l, w[:-1]) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)
I got it to work.
Standardized your X
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
Better initialization than zero.
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
Add learned biases during prediction
z = np.dot(X, w[:-1]) + w[-1]
Standardize during prediction as well (using standardization learned from input)
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]),
l, w) #strip the bias from weights
Generally, always a good idea to standardize the inputs.
Entire code:
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
def generate_data():
#generate a dataset that is linearly seperable
group_1 = np.random.randint(50, 100, size=(50,2))
group_1_labels = np.full((50,1), 1)
group_2 = np.random.randint(0, 49, size =(50,2))
group_2_labels = np.full((50,1), -1)
#add a bias value of -1
bias = np.full((50,1), -1)
#add labels, upper right quadrant are 1, lower left are -1
group_1_with_bias = np.hstack((group_1, bias))
group_2_with_bias = np.hstack((group_2, bias))
group_1_labeled = np.hstack((group_1_with_bias, group_1_labels))
group_2_labeled = np.hstack((group_2_with_bias, group_2_labels))
#merge our labeled data and shuffle!
merged_data = np.vstack((group_1_labeled, group_2_labeled))
np.random.shuffle(merged_data)
return merged_data
data = generate_data()
#load data, strip labels, add a -1 bias value
X = data[:, :3]
#create labels matrix
l = np.ravel(data[:, 3:])
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X[:, :-1])
X_trans = np.column_stack((scaler.transform(X[:, :-1]), X[:, -1]))
def perceptron_sgd(X, l, c, epochs):
#initialize weights
r = np.sqrt(2)
w = np.random.uniform(-r, r, (3,))
errors = []
for epoch in range(epochs):
total_error = 0
for i, x in enumerate(X):
if (np.dot(x, w) * l[i]) <= 0:
total_error += (np.dot(x, w) * l[i])
w = w + c * (x * l[i])
errors.append(total_error * -1)
print("epoch " + str(epoch) + ": " + str(w))
return w, errors
def classify(X, l, w):
z = np.dot(X, w[:-1]) + w[-1]
print(z)
z[z <= 0] = -1
z[z > 0] = 1
#return a matrix of predicted labels
return z
w, errors = perceptron_sgd(X_trans, l, .01, 25)
# X - some data in 2dimensional np.array
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
# here "model" is your model's prediction (classification) function
Z = classify(scaler.transform(np.c_[xx.ravel(), yy.ravel()]), l, w) #strip the bias from weights
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
#plt.axis('off')
#Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=l, cmap=plt.cm.Paired)

Largest Area Axis-Aligned Rectangle Inside Convex Polygon [duplicate]

Given an NxN binary matrix (containing only 0's or 1's), how can we go about finding largest rectangle containing all 0's?
Example:
I
0 0 0 0 1 0
0 0 1 0 0 1
II->0 0 0 0 0 0
1 0 0 0 0 0
0 0 0 0 0 1 <--IV
0 0 1 0 0 0
IV
For the above example, it is a 6×6 binary matrix. the return value in this case will be Cell 1:(2, 1) and Cell 2:(4, 4). The resulting sub-matrix can be square or rectangular. The return value can also be the size of the largest sub-matrix of all 0's, in this example 3 × 4.
Here's a solution based on the "Largest Rectangle in a Histogram" problem suggested by #j_random_hacker in the comments:
[Algorithm] works by iterating through
rows from top to bottom, for each row
solving this problem, where the
"bars" in the "histogram" consist of
all unbroken upward trails of zeros
that start at the current row (a
column has height 0 if it has a 1 in
the current row).
The input matrix mat may be an arbitrary iterable e.g., a file or a network stream. Only one row is required to be available at a time.
#!/usr/bin/env python
from collections import namedtuple
from operator import mul
Info = namedtuple('Info', 'start height')
def max_size(mat, value=0):
"""Find height, width of the largest rectangle containing all `value`'s."""
it = iter(mat)
hist = [(el==value) for el in next(it, [])]
max_size = max_rectangle_size(hist)
for row in it:
hist = [(1+h) if el == value else 0 for h, el in zip(hist, row)]
max_size = max(max_size, max_rectangle_size(hist), key=area)
return max_size
def max_rectangle_size(histogram):
"""Find height, width of the largest rectangle that fits entirely under
the histogram.
"""
stack = []
top = lambda: stack[-1]
max_size = (0, 0) # height, width of the largest rectangle
pos = 0 # current position in the histogram
for pos, height in enumerate(histogram):
start = pos # position where rectangle starts
while True:
if not stack or height > top().height:
stack.append(Info(start, height)) # push
elif stack and height < top().height:
max_size = max(max_size, (top().height, (pos - top().start)),
key=area)
start, _ = stack.pop()
continue
break # height == top().height goes here
pos += 1
for start, height in stack:
max_size = max(max_size, (height, (pos - start)), key=area)
return max_size
def area(size):
return reduce(mul, size)
The solution is O(N), where N is the number of elements in a matrix. It requires O(ncols) additional memory, where ncols is the number of columns in a matrix.
Latest version with tests is at https://gist.github.com/776423
Please take a look at Maximize the rectangular area under Histogram and then continue reading the solution below.
Traverse the matrix once and store the following;
For x=1 to N and y=1 to N
F[x][y] = 1 + F[x][y-1] if A[x][y] is 0 , else 0
Then for each row for x=N to 1
We have F[x] -> array with heights of the histograms with base at x.
Use O(N) algorithm to find the largest area of rectangle in this histogram = H[x]
From all areas computed, report the largest.
Time complexity is O(N*N) = O(N²) (for an NxN binary matrix)
Example:
Initial array F[x][y] array
0 0 0 0 1 0 1 1 1 1 0 1
0 0 1 0 0 1 2 2 0 2 1 0
0 0 0 0 0 0 3 3 1 3 2 1
1 0 0 0 0 0 0 4 2 4 3 2
0 0 0 0 0 1 1 5 3 5 4 0
0 0 1 0 0 0 2 6 0 6 5 1
For x = N to 1
H[6] = 2 6 0 6 5 1 -> 10 (5*2)
H[5] = 1 5 3 5 4 0 -> 12 (3*4)
H[4] = 0 4 2 4 3 2 -> 10 (2*5)
H[3] = 3 3 1 3 2 1 -> 6 (3*2)
H[2] = 2 2 0 2 1 0 -> 4 (2*2)
H[1] = 1 1 1 1 0 1 -> 4 (1*4)
The largest area is thus H[5] = 12
Here is a Python3 solution, which returns the position in addition to the area of the largest rectangle:
#!/usr/bin/env python3
import numpy
s = '''0 0 0 0 1 0
0 0 1 0 0 1
0 0 0 0 0 0
1 0 0 0 0 0
0 0 0 0 0 1
0 0 1 0 0 0'''
nrows = 6
ncols = 6
skip = 1
area_max = (0, [])
a = numpy.fromstring(s, dtype=int, sep=' ').reshape(nrows, ncols)
w = numpy.zeros(dtype=int, shape=a.shape)
h = numpy.zeros(dtype=int, shape=a.shape)
for r in range(nrows):
for c in range(ncols):
if a[r][c] == skip:
continue
if r == 0:
h[r][c] = 1
else:
h[r][c] = h[r-1][c]+1
if c == 0:
w[r][c] = 1
else:
w[r][c] = w[r][c-1]+1
minw = w[r][c]
for dh in range(h[r][c]):
minw = min(minw, w[r-dh][c])
area = (dh+1)*minw
if area > area_max[0]:
area_max = (area, [(r-dh, c-minw+1, r, c)])
print('area', area_max[0])
for t in area_max[1]:
print('Cell 1:({}, {}) and Cell 2:({}, {})'.format(*t))
Output:
area 12
Cell 1:(2, 1) and Cell 2:(4, 4)
Here is J.F. Sebastians method translated into C#:
private Vector2 MaxRectSize(int[] histogram) {
Vector2 maxSize = Vector2.zero;
int maxArea = 0;
Stack<Vector2> stack = new Stack<Vector2>();
int x = 0;
for (x = 0; x < histogram.Length; x++) {
int start = x;
int height = histogram[x];
while (true) {
if (stack.Count == 0 || height > stack.Peek().y) {
stack.Push(new Vector2(start, height));
} else if(height < stack.Peek().y) {
int tempArea = (int)(stack.Peek().y * (x - stack.Peek().x));
if(tempArea > maxArea) {
maxSize = new Vector2(stack.Peek().y, (x - stack.Peek().x));
maxArea = tempArea;
}
Vector2 popped = stack.Pop();
start = (int)popped.x;
continue;
}
break;
}
}
foreach (Vector2 data in stack) {
int tempArea = (int)(data.y * (x - data.x));
if(tempArea > maxArea) {
maxSize = new Vector2(data.y, (x - data.x));
maxArea = tempArea;
}
}
return maxSize;
}
public Vector2 GetMaximumFreeSpace() {
// STEP 1:
// build a seed histogram using the first row of grid points
// example: [true, true, false, true] = [1,1,0,1]
int[] hist = new int[gridSizeY];
for (int y = 0; y < gridSizeY; y++) {
if(!invalidPoints[0, y]) {
hist[y] = 1;
}
}
// STEP 2:
// get a starting max area from the seed histogram we created above.
// using the example from above, this value would be [1, 1], as the only valid area is a single point.
// another example for [0,0,0,1,0,0] would be [1, 3], because the largest area of contiguous free space is 3.
// Note that at this step, the heigh fo the found rectangle will always be 1 because we are operating on
// a single row of data.
Vector2 maxSize = MaxRectSize(hist);
int maxArea = (int)(maxSize.x * maxSize.y);
// STEP 3:
// build histograms for each additional row, re-testing for new possible max rectangluar areas
for (int x = 1; x < gridSizeX; x++) {
// build a new histogram for this row. the values of this row are
// 0 if the current grid point is occupied; otherwise, it is 1 + the value
// of the previously found historgram value for the previous position.
// What this does is effectly keep track of the height of continous avilable spaces.
// EXAMPLE:
// Given the following grid data (where 1 means occupied, and 0 means free; for clairty):
// INPUT: OUTPUT:
// 1.) [0,0,1,0] = [1,1,0,1]
// 2.) [0,0,1,0] = [2,2,0,2]
// 3.) [1,1,0,1] = [0,0,1,0]
//
// As such, you'll notice position 1,0 (row 1, column 0) is 2, because this is the height of contiguous
// free space.
for (int y = 0; y < gridSizeY; y++) {
if(!invalidPoints[x, y]) {
hist[y] = 1 + hist[y];
} else {
hist[y] = 0;
}
}
// find the maximum size of the current histogram. If it happens to be larger
// that the currently recorded max size, then it is the new max size.
Vector2 maxSizeTemp = MaxRectSize(hist);
int tempArea = (int)(maxSizeTemp.x * maxSizeTemp.y);
if (tempArea > maxArea) {
maxSize = maxSizeTemp;
maxArea = tempArea;
}
}
// at this point, we know the max size
return maxSize;
}
A few things to note about this:
This version is meant for use with the Unity API. You can easily make this more generic by replacing instances of Vector2 with KeyValuePair. Vector2 is only used for a convenient way to store two values.
invalidPoints[] is an array of bool, where true means the grid point is "in use", and false means it is not.
Solution with space complexity O(columns) [Can be modified to O(rows) also] and time complexity O(rows*columns)
public int maximalRectangle(char[][] matrix) {
int m = matrix.length;
if (m == 0)
return 0;
int n = matrix[0].length;
int maxArea = 0;
int[] aux = new int[n];
for (int i = 0; i < n; i++) {
aux[i] = 0;
}
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
aux[j] = matrix[i][j] - '0' + aux[j];
maxArea = Math.max(maxArea, maxAreaHist(aux));
}
}
return maxArea;
}
public int maxAreaHist(int[] heights) {
int n = heights.length;
Stack<Integer> stack = new Stack<Integer>();
stack.push(0);
int maxRect = heights[0];
int top = 0;
int leftSideArea = 0;
int rightSideArea = heights[0];
for (int i = 1; i < n; i++) {
if (stack.isEmpty() || heights[i] >= heights[stack.peek()]) {
stack.push(i);
} else {
while (!stack.isEmpty() && heights[stack.peek()] > heights[i]) {
top = stack.pop();
rightSideArea = heights[top] * (i - top);
leftSideArea = 0;
if (!stack.isEmpty()) {
leftSideArea = heights[top] * (top - stack.peek() - 1);
} else {
leftSideArea = heights[top] * top;
}
maxRect = Math.max(maxRect, leftSideArea + rightSideArea);
}
stack.push(i);
}
}
while (!stack.isEmpty()) {
top = stack.pop();
rightSideArea = heights[top] * (n - top);
leftSideArea = 0;
if (!stack.isEmpty()) {
leftSideArea = heights[top] * (top - stack.peek() - 1);
} else {
leftSideArea = heights[top] * top;
}
maxRect = Math.max(maxRect, leftSideArea + rightSideArea);
}
return maxRect;
}
But I get Time Limite exceeded excpetion when I try this on LeetCode. Is there any less complex solution?
I propose a O(nxn) method.
First, you can list all the maximum empty rectangles. Empty means that it covers only 0s. A maximum empty rectangle is such that it cannot be extended in a direction without covering (at least) one 1.
A paper presenting a O(nxn) algorithm to create such a list can be found at www.ulg.ac.be/telecom/rectangles as well as source code (not optimized). There is no need to store the list, it is sufficient to call a callback function each time a rectangle is found by the algorithm, and to store only the largest one (or choose another criterion if you want).
Note that a proof exists (see the paper) that the number of largest empty rectangles is bounded by the number of pixels of the image (nxn in this case).
Therefore, selecting the optimal rectangle can be done in O(nxn), and the overall method is also O(nxn).
In practice, this method is very fast, and is used for realtime video stream analysis.
Here is a version of jfs' solution, which also delivers the position of the largest rectangle:
from collections import namedtuple
from operator import mul
Info = namedtuple('Info', 'start height')
def max_rect(mat, value=0):
"""returns (height, width, left_column, bottom_row) of the largest rectangle
containing all `value`'s.
Example:
[[0, 0, 0, 0, 0, 0, 0, 0, 3, 2],
[0, 4, 0, 2, 4, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 0, 3, 0, 0, 4],
[0, 0, 0, 0, 4, 2, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0, 0, 0, 0],
[4, 3, 0, 0, 1, 2, 0, 0, 0, 0],
[3, 0, 0, 0, 2, 0, 0, 0, 0, 4],
[0, 0, 0, 1, 0, 3, 2, 4, 3, 2],
[0, 3, 0, 0, 0, 2, 0, 1, 0, 0]]
gives: (3, 4, 6, 5)
"""
it = iter(mat)
hist = [(el==value) for el in next(it, [])]
max_rect = max_rectangle_size(hist) + (0,)
for irow,row in enumerate(it):
hist = [(1+h) if el == value else 0 for h, el in zip(hist, row)]
max_rect = max(max_rect, max_rectangle_size(hist) + (irow+1,), key=area)
# irow+1, because we already used one row for initializing max_rect
return max_rect
def max_rectangle_size(histogram):
stack = []
top = lambda: stack[-1]
max_size = (0, 0, 0) # height, width and start position of the largest rectangle
pos = 0 # current position in the histogram
for pos, height in enumerate(histogram):
start = pos # position where rectangle starts
while True:
if not stack or height > top().height:
stack.append(Info(start, height)) # push
elif stack and height < top().height:
max_size = max(max_size, (top().height, (pos - top().start), top().start), key=area)
start, _ = stack.pop()
continue
break # height == top().height goes here
pos += 1
for start, height in stack:
max_size = max(max_size, (height, (pos - start), start), key=area)
return max_size
def area(size):
return size[0] * size[1]
To be complete, here's the C# version which outputs the rectangle coordinates.
It's based on dmarra's answer but without any other dependencies.
There's only the function bool GetPixel(int x, int y), which returns true when a pixel is set at the coordinates x,y.
public struct INTRECT
{
public int Left, Right, Top, Bottom;
public INTRECT(int aLeft, int aTop, int aRight, int aBottom)
{
Left = aLeft;
Top = aTop;
Right = aRight;
Bottom = aBottom;
}
public int Width { get { return (Right - Left + 1); } }
public int Height { get { return (Bottom - Top + 1); } }
public bool IsEmpty { get { return Left == 0 && Right == 0 && Top == 0 && Bottom == 0; } }
public static bool operator ==(INTRECT lhs, INTRECT rhs)
{
return lhs.Left == rhs.Left && lhs.Top == rhs.Top && lhs.Right == rhs.Right && lhs.Bottom == rhs.Bottom;
}
public static bool operator !=(INTRECT lhs, INTRECT rhs)
{
return !(lhs == rhs);
}
public override bool Equals(Object obj)
{
return obj is INTRECT && this == (INTRECT)obj;
}
public bool Equals(INTRECT obj)
{
return this == obj;
}
public override int GetHashCode()
{
return Left.GetHashCode() ^ Right.GetHashCode() ^ Top.GetHashCode() ^ Bottom.GetHashCode();
}
}
public INTRECT GetMaximumFreeRectangle()
{
int XEnd = 0;
int YStart = 0;
int MaxRectTop = 0;
INTRECT MaxRect = new INTRECT();
// STEP 1:
// build a seed histogram using the first row of grid points
// example: [true, true, false, true] = [1,1,0,1]
int[] hist = new int[Height];
for (int y = 0; y < Height; y++)
{
if (!GetPixel(0, y))
{
hist[y] = 1;
}
}
// STEP 2:
// get a starting max area from the seed histogram we created above.
// using the example from above, this value would be [1, 1], as the only valid area is a single point.
// another example for [0,0,0,1,0,0] would be [1, 3], because the largest area of contiguous free space is 3.
// Note that at this step, the heigh fo the found rectangle will always be 1 because we are operating on
// a single row of data.
Tuple<int, int> maxSize = MaxRectSize(hist, out YStart);
int maxArea = (int)(maxSize.Item1 * maxSize.Item2);
MaxRectTop = YStart;
// STEP 3:
// build histograms for each additional row, re-testing for new possible max rectangluar areas
for (int x = 1; x < Width; x++)
{
// build a new histogram for this row. the values of this row are
// 0 if the current grid point is occupied; otherwise, it is 1 + the value
// of the previously found historgram value for the previous position.
// What this does is effectly keep track of the height of continous avilable spaces.
// EXAMPLE:
// Given the following grid data (where 1 means occupied, and 0 means free; for clairty):
// INPUT: OUTPUT:
// 1.) [0,0,1,0] = [1,1,0,1]
// 2.) [0,0,1,0] = [2,2,0,2]
// 3.) [1,1,0,1] = [0,0,1,0]
//
// As such, you'll notice position 1,0 (row 1, column 0) is 2, because this is the height of contiguous
// free space.
for (int y = 0; y < Height; y++)
{
if (!GetPixel(x, y))
{
hist[y]++;
}
else
{
hist[y] = 0;
}
}
// find the maximum size of the current histogram. If it happens to be larger
// that the currently recorded max size, then it is the new max size.
Tuple<int, int> maxSizeTemp = MaxRectSize(hist, out YStart);
int tempArea = (int)(maxSizeTemp.Item1 * maxSizeTemp.Item2);
if (tempArea > maxArea)
{
maxSize = maxSizeTemp;
maxArea = tempArea;
MaxRectTop = YStart;
XEnd = x;
}
}
MaxRect.Left = XEnd - maxSize.Item1 + 1;
MaxRect.Top = MaxRectTop;
MaxRect.Right = XEnd;
MaxRect.Bottom = MaxRectTop + maxSize.Item2 - 1;
// at this point, we know the max size
return MaxRect;
}
private Tuple<int, int> MaxRectSize(int[] histogram, out int YStart)
{
Tuple<int, int> maxSize = new Tuple<int, int>(0, 0);
int maxArea = 0;
Stack<Tuple<int, int>> stack = new Stack<Tuple<int, int>>();
int x = 0;
YStart = 0;
for (x = 0; x < histogram.Length; x++)
{
int start = x;
int height = histogram[x];
while (true)
{
if (stack.Count == 0 || height > stack.Peek().Item2)
{
stack.Push(new Tuple<int, int>(start, height));
}
else if (height < stack.Peek().Item2)
{
int tempArea = (int)(stack.Peek().Item2 * (x - stack.Peek().Item1));
if (tempArea > maxArea)
{
YStart = stack.Peek().Item1;
maxSize = new Tuple<int, int>(stack.Peek().Item2, (x - stack.Peek().Item1));
maxArea = tempArea;
}
Tuple<int, int> popped = stack.Pop();
start = (int)popped.Item1;
continue;
}
break;
}
}
foreach (Tuple<int, int> data in stack)
{
int tempArea = (int)(data.Item2 * (x - data.Item1));
if (tempArea > maxArea)
{
YStart = data.Item1;
maxSize = new Tuple<int, int>(data.Item2, (x - data.Item1));
maxArea = tempArea;
}
}
return maxSize;
}
An appropriate algorithm can be found within Algorithm for finding the largest inscribed rectangle in polygon (2019).
I implemented it in python:
import largestinteriorrectangle as lir
import numpy as np
grid = np.array([[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0]],
"bool")
grid = ~grid
lir.lir(grid) # [1, 2, 4, 3]
the result comes as x, y, width, height

Normalizing data and applying colormap results in rotated image using matplotlib?

So I wanted to see if I could make fractal flames using matplotlib and figured a good test would be the sierpinski triangle. I modified a working version I had that simply performed the chaos game by normalizing the x range from -2, 2 to 0, 400 and the y range from 0, 2 to 0, 200. I also truncated the x and y coordinates to 2 decimal places and multiplied by 100 so that the coordinates could be put in to a matrix that I could apply a color map to. Here's the code I'm working on right now (please forgive the messiness):
import numpy as np
import matplotlib.pyplot as plt
import math
import random
def f(x, y, n):
N = np.array([[x, y]])
M = np.array([[1/2.0, 0], [0, 1/2.0]])
b = np.array([[.5], [0]])
b2 = np.array([[0], [.5]])
if n == 0:
return np.dot(M, N.T)
elif n == 1:
return np.dot(M, N.T) + 2*b
elif n == 2:
return np.dot(M, N.T) + 2*b2
elif n == 3:
return np.dot(M, N.T) - 2*b
def norm_x(n, minX_1, maxX_1, minX_2, maxX_2):
rng = maxX_1 - minX_1
n = (n - minX_1) / rng
rng_2 = maxX_2 - minX_2
n = (n * rng_2) + minX_2
return n
def norm_y(n, minY_1, maxY_1, minY_2, maxY_2):
rng = maxY_1 - minY_1
n = (n - minY_1) / rng
rng_2 = maxY_2 - minY_2
n = (n * rng_2) + minY_2
return n
# Plot ranges
x_min, x_max = -2.0, 2.0
y_min, y_max = 0, 2.0
# Even intervals for points to compute orbits of
x_range = np.arange(x_min, x_max, (x_max - x_min) / 400.0)
y_range = np.arange(y_min, y_max, (y_max - y_min) / 200.0)
mat = np.zeros((len(x_range) + 1, len(y_range) + 1))
random.seed()
x = 1
y = 1
for i in range(0, 100000):
n = random.randint(0, 3)
V = f(x, y, n)
x = V.item(0)
y = V.item(1)
mat[norm_x(x, -2, 2, 0, 400), norm_y(y, 0, 2, 0, 200)] += 50
plt.xlabel('x0')
plt.ylabel('y')
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
The mathematics seem solid here so I suspect something weird is going on with how I'm handling where things should go into the 'mat' matrix and how the values in there correspond to the colormap.
If I understood your problem correctly, you need to transpose your matrix using the method .T. So just replace
fig = plt.figure(figsize=(10,10))
plt.imshow(mat, cmap="spectral", extent=[-2,2, 0, 2])
plt.show()
by
fig = plt.figure(figsize=(10,10))
ax = gca()
ax.imshow(mat.T, cmap="spectral", extent=[-2,2, 0, 2], origin="bottom")
plt.show()
The argument origin=bottom tells to imshow to have the origin of your matrix at the bottom of the figure.
Hope it helps.

C++ Points of Vertices in Cuboid (Bitwise AND)

I'm trying to calculate the points in a cuboid given its centre (which is a Vector3) and the lengths of the sides along the x, y and z axis. I found the following on math.stackexchange.com: https://math.stackexchange.com/questions/107778/simplest-equation-for-drawing-a-cube-based-on-its-center-and-or-other-vertices which says I can use the following formulae:
The constructor for the World class is:
World::World(Vector3 o, float d1, float d2, float d3) : origin(o)
{
// If we consider an edge length to be d, we need to find r such that
// 2r = d in order to calculate the positions of each vertex in the world.
float r1 = d1 / 2,
r2 = d2 / 2,
r3 = d3 / 2;
for (int i = 0; i < 8; i++)
{
/* Sets up the vertices of the cube.
*
* #see http://bit.ly/1cc2RPG
*/
float x = o.getX() + (std::pow(-1, i&1) * r1),
y = o.getY() + (std::pow(-1, i&2) * r2),
z = o.getZ() + (std::pow(-1, i&4) * r3);
points[i] = Vector3(x, y, z);
std::cout << points[i] << "\n";
}
}
And I passing the following parameters to the constructor:
Vector3 o(0, 0, 0);
World w(o, 100.f, 100.f, 100.f);
The coordinates being output for all 8 vertices are:
(50, 50, 50)
(-50, 50, 50)
(50, 50, 50)
(-50, 50, 50)
(50, 50, 50)
(-50, 50, 50)
(50, 50, 50)
(-50, 50, 50)
Which cannot be correct. Any guidance would be very much appreciated!
The problem lies in the bitwise & inside your pow calls:
In the y and z components, they always return 0 and 2 or 4, respectively. -1^2 = -1^4 = 1, which is why the sign of these components is always positive. You could try (i&2)!=0 or (i&2) >> 1 for the y component instead. The same goes for the z component.
Change this:
float x = o.getX() + (std::pow(-1, i&1) * r1),
y = o.getY() + (std::pow(-1, i&2) * r2),
z = o.getZ() + (std::pow(-1, i&4) * r3);
To this:
float x = o.getX() + (std::pow(-1, (i ) & 1) * r1), // pow(-1, 0) == 1, pow(-1, 1) == -1
y = o.getY() + (std::pow(-1, (i >> 1) & 1) * r2), // pow(-1, 0) == 1, pow(-1, 1) == -1
z = o.getZ() + (std::pow(-1, (i >> 2) & 1) * r3); // pow(-1, 0) == 1, pow(-1, 1) == -1
Or even to this:
float x = o.getX() + (std::pow(-1, (i )) * r1), // pow(-1, {0, 2, 4, 6}) == 1, pow(-1, {1, 3, 5, 7}) == -1
y = o.getY() + (std::pow(-1, (i >> 1)) * r2), // pow(-1, {0, 2}) == 1, pow(-1, {1, 3}) == -1
z = o.getZ() + (std::pow(-1, (i >> 2)) * r3); // pow(-1, 0) == 1, pow(-1, 1) == -1
The problem is that as written even though the values you mask out identify weather or not the lengths need to be negated. They are not in the correct place value to get the desired properties from the exponentiation of -1.
Rewriting the code as I have above will solve this issue, however it would be more readable and in general more permanent just to unroll the loop and manually write if each one is an addition or subtraction without using the pow function.

Finding Neighbourhood in Matrices [duplicate]

This question already has an answer here:
Conway's Game of Life, counting neighbors [closed]
(1 answer)
Closed 9 years ago.
I am working on project containing cellular automat methods. What I am trying to figure is how to write function helping to find all the neighbours in a 2d array.
for example i ve got size x size 2d array [size = 4 here]
[x][n][ ][n]
[n][n][ ][n]
[ ][ ][ ][ ]
[n][n][ ][n]
Field marked as x [0,0 index] has neighbours marked as [n] -> 8 neighbours. What Im trying to do is to write a function which can find neighbours wo writting tousands of if statements
Does anybody have an idea how to do it ?
thanks
For the neighbours of element (i,j) in NxM matrix:
int above = (i-1) % N;
int below = (i+1) % N;
int left = (j-1) % M;
int right = (j+1) % M;
decltype(matrix[0][0]) *indices[8];
indices[0] = & matrix[above][left];
indices[1] = & matrix[above][j];
indices[2] = & matrix[above][right];
indices[3] = & matrix[i][left];
// Skip matrix[i][j]
indices[4] = & matrix[i][right];
indices[5] = & matrix[below][left];
indices[6] = & matrix[below][j];
indices[7] = & matrix[below][right];
Suppose you are in cell (i, j). Then, on an infinite grid, your neighbors should be [(i-1, j-1), (i-1,j), (i-1, j+1), (i, j-1), (i, j+1), (i+1, j-1), (i+1, j), (i+1, j+1)].
However, since the grid is finite some of the above values will get outside the bounds. But we know modular arithmetic: 4 % 3 = 1 and -1 % 3 = 2. So, if the grid is of size n, m you only need to apply %n, %m on the above list to get the proper list of neighbors: [((i-1) % n, (j-1) % m), ((i-1) % n,j), ((i-1) % n, (j+1) % m), (i, (j-1) % m), (i, (j+1) % m), ((i+1) % n, (j-1) % m), ((i+1) % n, j), ((i+1) % n, (j+1) % m)]
That works if your coordinates are between 0 and n and between 0 and m. If you start with 1 then you need to tweak the above by doing a -1 and a +1 somewhere.
For your case n=m=4 and (i, j) = (0, 0). The first list is [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]. Applying the modulus operations you get to [(3, 3), (3, 0), (3, 1), (0, 3), (0, 1), (1, 3), (1, 0), (1, 1)] which are exactly the squares marked [n] in your picture.
Add and subtract one from the coordinates, in all possible permutations. Results outside the boundaries wrap around (e.g. -1 becomes 3 and 4 becomes 0). Just a couple of simple loops needed basically.
Something like
// Find the closest neighbours (one step) from the coordinates [x,y]
// The max coordinates is max_x,max_y
// Note: Does not contain any error checking (for valid coordinates)
std::vector<std::pair<int, int>> getNeighbours(int x, int y, int max_x, int max_y)
{
std::vector<std::pair<int, int>> neighbours;
for (int dx = -1; dx <= 1; ++dx)
{
for (int dy = -1; dy <= 1; ++dy)
{
// Skip the coordinates [x,y]
if (dx == 0 && dy == 0)
continue;
int nx = x + dx;
int ny = y + dy;
// If the new coordinates goes out of bounds, wrap them around
if (nx < 0)
nx = max_x;
else if (nx > max_x)
nx = 0;
if (ny < 0)
ny = max_y;
else if (ny > max_y)
ny = 0;
// Add neighbouring coordinates to result
neighbours.push_back(std::make_pair(nx, ny));
}
}
return neighbours;
}
Example use for you:
auto n = getNeighbours(0, 0, 3, 3);
for (const auto& p : n)
std::cout << '[' << p.first << ',' << p.second << "]\n";
Prints out
[3,3]
[3,0]
[3,1]
[0,3]
[0,1]
[1,3]
[1,0]
[1,1]
which is the correct answer.