Import 2D matrix in 3D matrix using OpenCV - c++

This function:
filtro.kernel(n, mat)
returns a 2D matrix with 15x15 size, is there a way to add all 12 matrices that are calculated from the for cycle to a 3D matrix with size 12,15,15?
#include <opencv2/core/core.hpp>
#include <opencv2/core/core.hpp>
#include "filter.h"
#include <iostream>
int main(){
using namespace cv;
using namespace std;
cv::Mat mat = cv::Mat::zeros(15, 15, CV_32S);
filter filtro;
for (int n = 0; n < 12; n++){
filtro.kernel(n, mat);
cout<<"Angle Matrix"<<endl;
cout<< n*15 <<endl;
cout<< mat <<endl;
}
return 0;
}

You can use cv::merge to create a multi-channels matrix. But notice, the channels dimension is the last.12 (15,15) => (15,15,12)
Try this:
#include <opencv2/core/core.hpp>
#include <opencv2/core/core.hpp>
#include "filter.h"
#include <iostream>
int main(){
using namespace cv;
using namespace std;
filter filtro;
// Create a vector of Mat
vector<Mat> mats;
for (int n = 0; n < 12; n++){
cv::Mat mat = cv::Mat::zeros(15, 15, CV_32S);
filtro.kernel(n, mat);
cout<<"Angle Matrix"<<endl;
cout<< n*15 <<endl;
cout<< mat <<endl;
mats.push_back(mat);
}
// Merge the Mats
Mat dst;
merge(mats, dst);
return 0;
}

Related

Extract subimage in a image with borders wrapped around

Say I have an image, and I'd like to extract a subimage assuming the original image is wrapped like a toroid.
My guess was doing something like
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <cstdio>
#include <vector>
#include <Windows.h>
using namespace cv;
using namespace std;
int main(int argc, char **argv) {
Mat image = imread("image.jpg", CV_LOAD_IMAGE_GRAYSCALE);
const int & rows = image.rows;
const int & cols = image.cols;
Rect roi = Rect(rows - 1, cols - 1, 51, 51);
Mat subImage = image(roi);
namedWindow("Window", CV_WINDOW_AUTOSIZE);
imshow("Window", subImage);
waitKey(0);
return 0;
}
But that didn't work. Is there anything you can suggest? maybe there's a function I'm missing that I can use?
If not what's the easiest way?
Negative values did not work as apparently the library in question does not support toroid-like image handling...
I now see two options for you:
either extract (up to) four sub images and recombine them
or do the extraction by hand
For the latter:
int cx = 0, cy = 0;
for(int y = lowerBoundY; y < upperBoundY; ++y)
{
for(int x = lowerBoundX; x < upperBoundX; ++x)
{
subImage[cy][cx++] = image[(y + height) % height][(x + width) % width];
}
++cy;
cx = 0;
}
(Silently assuming you do not exceed the intervals [-width;2*width) and [-height;2*height)...)
Try openCV copyMakeBorder; it extracts subimages, offering several options for border treatment.

cv::Exception at memory location 0x0000000296AFDAD0 in visual studio

I'm trying to get eigenvalues from an image matrix. I tried with a streaming video and did not get an error. When I tried to find eigenvalues of a stored image I get cv::Exception at memory location 0x0000000296AFDAD0 error.
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
void calceigen(Mat covarm){
Mat eigenvalues;
Mat eigenvectors;
eigen(covarm, eigenvalues, eigenvectors);
cout << eigenvalues;
cout << eigenvectors;
}
void covard(Mat g)
{
Mat covarm;
Mat b;
calcCovarMatrix(g, covarm, b, CV_COVAR_COLS, CV_64F);
calceigen(covarm);
}
int main(int argc, const char** argv)
{
cv::Mat image = cv::imread("C:/Users/DellPc/Desktop/images1.jpg");
//setup image files used in the capture process
Mat grayscaleFrame;
Mat graysc;
//convert captured image to gray scale and equalize
//cvtColor(image, grayscaleFrame, CV_BGR2GRAY);
//resize(grayscaleFrame, graysc, Size(16, 16), 0, 0, INTER_LANCZOS4);
int rows = image.rows;
int cols = image.cols;
cv::Size s = image.size();
rows = s.height;
cols = s.width;
cout << rows << endl;
cout << cols << endl;
covard(image);
//find faces and store them in the vector array
imshow("small", image);
waitKey(33);
return 0;
}

thrust::max_element() returns incorrect values for a 2D matrix

I try to use thrust to find the max element from a 2D matrix. However, I always get incorrent results. The codes work well for 1D matrix but behave unpredictably when using 2D matrix.
I use opencv GpuMat for 2D matrix. Here are my codes. I wonder if someone met the same problem?
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <iostream>
#include <opencv2\opencv.hpp>
#include <opencv2\cuda.hpp>
using namespace std;
using namespace cv;
using namespace cv::cuda;
ushort thrust_find_max_idx(const GpuMat& in_, int* p_r_, int* p_c_){
thrust::device_ptr<ushort> ptr((ushort*)in_.data);
unsigned int N = in_.cols * in_.rows;
thrust::device_vector<ushort>::iterator iter = thrust::max_element(ptr, ptr + N); //find max element
int pos = thrust::device_pointer_cast(&(iter[0])) - ptr;
*p_r_ = pos / in_.cols;
*p_c_ = pos % in_.cols;
return *iter;
}
int main(void)
{
Mat cpu_matrix; cpu_matrix.create(10, 10, CV_16UC1);
randu(cpu_matrix, 1, 256); //generate random sequences
GpuMat matrix; matrix.upload(cpu_matrix);
int r, c;
ushort max = thrust_find_max_idx( matrix, &r, &c);
matrix.download(cpu_matrix);
cout << cpu_matrix << endl; //output testing sequences
cout << max << " r " << r << " c " << c << endl; //output max element and positions
return 0;
}
Thank to Robert's reply, I realize that GpuMat is not continuously allocated as Mat by default, but luckily, the Function cuda::minMaxLoc() can be used for quickly identifying max element in a GpuMat.
double max; cv::Point loc;
cv::cuda::minMaxLoc(matrix, 0, &max, 0, &loc);

Creating bigger image out of small image Mat [opencv, background subtraction]

I have an image i1. I am supposed to create another Mat m1 of size (image.rows*3, image.cols*3).
In m1, I'm supposed to fill the pixel value in the following way. (Please do see the image):
Here is my code-
#include <highgui.h>
#include "opencv2/opencv.hpp"
#include <fstream>
using namespace cv;
static Mat NeurMap1, NeurMap2, NeurMap3, frame, hsv_Frame;
std::ofstream myfile;
void InitializeNeurMap(cv::Mat Channel[3])
{
int i=0,j=0,m_i=0,m_j=0, t1=0, t2=0;
for(i=0; i < frame.rows; i++)
{
for(j=0;j < frame.cols;j++)
{
t1= i*n+1; t2 = j*n+1;
for(m_i=t1-1; m_i <= t1+1;m_i++)
{
for(m_j=t2-1; m_j <= t2+1; m_j++)
{
NeurMap1.at<uchar>(m_i, m_j)= frame.at<uchar>(i,j);
}
}
}
}
std::cout<<m_j;
myfile<<frame;
}
int main()
{
myfile.open("NeurMaptext.txt");
String filename="BootStrap/b%05d.bmp";// sequence of frames are read
VideoCapture cap(filename);
if(!cap.isOpened()) // check if we succeeded
return -1;
namedWindow("edges",1);
//namedWindow("frames",1);
Mat Channel[3];
cap>>frame;
NeurMap1 = Mat::zeros(frame.rows*n, frame.cols*n, frame.type());
InitializeNeurMap(Channel);
imshow("edges",NeurMap1);waitKey(33);
for(;;)
{
cap>>frame;
if(frame.empty())
break;
}
system("pause");
return 0;
}
The input image is RGB[160*120]. Why am I not getting the columns in the output image given in the link above?.
You can simply call resize() by passing the INTER_NEAREST parameter, i.e. using the nearest-neighbor interpolation.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
unsigned char data[] = { 1, 2, 3, 4, 5, 6 };
Mat img(2, 3, CV_8UC1, data);
cout << img << endl;
Mat res(6, 9, CV_8UC1);
resize(img, res, res.size(), 0, 0, INTER_NEAREST);
cout << res << endl;
return 0;
}
You will get:
In you are getting three only one-third of image filled because, probably you are passing 3 channel(colour) image to the function and treat it as a single channel image. So change the above code to,
void InitializeNeurMap(cv::Mat Channel[3])
{
for(int i=0; i < frame.rows; i++){
for(int j=0;j < frame.cols;j++){
for(int k=0;k<n;k++){
for(int l=0;l<n;l++){
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[0] = frame.at<Vec3b>(i,j)[0]; //Access Blue channel
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[1] = frame.at<Vec3b>(i,j)[1];//Access green channel
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[2] = frame.at<Vec3b>(i,j)[2]; //Access red channel
}
}
}
}
myfile<<frame;
}
See the reult

Colour reduction in images not working

Please have a look at the following code
#include <iostream>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\core\core.hpp>
using namespace std;
using namespace cv;
void reduceColor(Mat&,int=64);
int main()
{
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Koala.jpg");
namedWindow("Image");
imshow("Image",image);
//reduceColor(image,64);
waitKey(0);
}
void reduceColor(Mat &image,int division)
{
int numberOfRows = image.rows;
int numberOfColumns = image.cols * image.channels();
for(int i=0;i<numberOfRows;i++)
{
uchar *data = image.ptr<uchar>(i);
for(int pixel=0;pixel<numberOfColumns;pixel++)
{
data[i] = data[i]/division*division + division/2;
}
}
namedWindow("Image2");
imshow("Image2",image);
}
This is Computer Vision. I am trying to read an image and reduce it's color by navigating through all the pixels and channels. But, the colour is not reduced! It simply displays the original image! Pleas help!
Variable i is never incremented in your nested for loop, but you're setting data[i]. So in all likelihood, a few pixels in the first column are changing after the function call, but nothing else is.