Create a single image from images array - c++

Hi I'm trying to create a single image from multiple images in opencv.
images I use are the same size.
what I do is reshaping them to single line and then try to merge them together with my new image.
I create new image with size of 2 images and pass the array but I recieve error EXC_BAD_ACCESS(code=1, address = ..)
note: sizes of images are correct
size of single image : [170569 x 1]
size of new_image : [170569 x 2]
my code is below.
thank you
int main(){
Mat image[2];
image[0]= imread("image1.jpg",0);
image[1]= imread("image2.jpg",0);
image[0] = image[0].reshape(0, 1); //SINGLE LINE
image[1] = image[1].reshape(0, 1); //SINGLE LINE
int size = sizeof(image)/sizeof(Mat);
Mat new_image(image[0].cols,size,CV_32FC1,image);
}

Mat new_image;
vconcat(image[0],image[1],new_image);

If I understand well than you need to concatenate 2 image of same size into one Mat. I wrote this a very quick code to perform this task.
U can change the argument to the function to be a pointer and add other handlers to care about the variant size image.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
cv::Mat cvConcatenateMat(const cv::Mat &image1, const cv::Mat &image2, bool isCol CV_DEFAULT(true)){
if (isCol) {
cv::Mat mergeMat = cv::Mat(image1.rows, image1.cols + image2.cols, image1.type());
for (int j = 0; j < image1.rows; j++) {
for (int i = 0; i < image1.cols; i++) {
mergeMat.at<cv::Vec3b>(j,i) = image1.at<cv::Vec3b>(j,i);
}
for (int i = image1.cols; i < mergeMat.cols; i++) {
mergeMat.at<cv::Vec3b>(j,i) = image2.at<cv::Vec3b>(j,i);
}
}
return mergeMat;
} else {
cv::Mat mergeMat = cv::Mat(image1.rows + image2.rows, image1.cols, image1.type());
for (int j = 0; j < image1.cols; j++) {
for (int i = 0; i < image1.rows; i++) {
mergeMat.at<cv::Vec3b>(i,j) = image1.at<cv::Vec3b>(i,j);
}
for (int i = image1.rows; i < mergeMat.rows; i++) {
mergeMat.at<cv::Vec3b>(i,j) = image2.at<cv::Vec3b>(i-image1.rows,j);
}
}
return mergeMat;
}
}
int main(int argc, const char * argv[]) {
cv::Mat image1 = cv::imread("img1.jpg");
cv::Mat image2 = cv::imread("img2.jpg");
cv::resize(image2, image2, image1.size());
cv::Mat outImage = cvConcatenateMat(image1, image2, false);
cv::imshow("out image", outImage);
cv::waitKey(0);
return 0;
}

Related

Coding dedicated function Average Filter to color Images C++ OpenCV

So basically, I have to code my own function in C++ with OpenCV, that will apply average filter on both gray and color images.
The function returns a Mat Object, have a mat Object and the size of the average filter (3 for 3x3 matrix of filtering for example).
I did this for the moment, it doesn't work, and I don't know how to extend it to color.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat filtrageMoyen(Mat image, int tailleZonage) {
Mat imageRetour;
imageRetour = image.clone();
Scalar intensite = 0;
int cadrillage = tailleZonage / 2;
int valeurMoyenne = 0;
for (size_t x = 0; x < imageRetour.rows; x++)
{
for (size_t y = 0; y < imageRetour.cols; y++)
{
for (size_t xZonage = 0; xZonage < cadrillage; xZonage++)
{
for (size_t yZonage = 0; yZonage < cadrillage; yZonage++)
{
valeurMoyenne += (image.at<unsigned char>(x+xZonage, y + yZonage));
}
}
imageRetour.at<unsigned char>(x, y) = valeurMoyenne;
valeurMoyenne = 0;
}
}
return imageRetour;
}
int main() {
Mat img;
string filename = "imageRickRoll.png";
img = imread(filename, cv::IMREAD_GRAYSCALE);
imshow("Image filtree", filtrageMoyen(img, 5));
waitKey(0);
return 0;
}

Change RGB to GrayScale in c++

I am trying to convert RGB image to gray scale using average method. But the output that is get is different from the desired output. I'm taking the image and getting the rgb values. I perform average operation and store the averaged and another array of same size of the image. Finally i'm converting the array to Mat and displaying the image.
Input image:
Desired output:
My output:
int main()
{
Mat image;
image =imread("<image_path>");
int rows=image.rows;
int cols=image.cols;
int myArray[rows][cols];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
myArray[i][j] = 0;
}
}
uint8_t* pixelPtr = (uint8_t*)image.data;
int cn = image.channels();
Scalar_<uint8_t> bgrPixel;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
{
bgrPixel.val[0] = pixelPtr[i*image.cols*cn + j*cn + 0]; // B
bgrPixel.val[1] = pixelPtr[i*image.cols*cn + j*cn + 1]; // G
bgrPixel.val[2] = pixelPtr[i*image.cols*cn + j*cn + 2]; // R
int average = (bgrPixel.val[0]+bgrPixel.val[1]+bgrPixel.val[2])/3;
myArray[i][j]=average;
}
}
Mat averaged_image(Size(rows, cols), CV_8UC3, myArray, Mat::AUTO_STEP);
imwrite("<path to save the image>",averaged_image);
imshow("averaged_image",averaged_image);
waitKey(0);
return 0;
}
When creating Mat averaged_image,
Mat averaged_image(Size(rows, cols), CV_8UC3, myArray, Mat::AUTO_STEP);
you need to use CV_32S not CV_8UC3 because your array element is not three chars, it's one 32-bit int.
You can also use the function cvtColor:
cv::Mat gray;
cv::cvtColor(image, gray, CV_BGR2GRAY);
Bonus: this function does correct weighting of the channels, because simple averaging may not be the right thing to do.

Opencv only process the parts of image

I want to make a negative transformation for the image which is a very simple program.
But when I run the program. I want to transform all of the pixels in the image, but only 1/3 parts of that are processed. I don't make sure where is wrong. all the code I followed the book. But the result is different.
I think there is something wrong about the columns, but when I change the value of I.cols in negativeImage function with the actual value of image. the output still keep the same. only 1/3 parts of image are processed. If I 3 times the I.cols all of the pixels in the iamge could be processed.
vector<uchar> getNegativeLUT() {
vector<uchar> LUT(256, 0);
for (int i = 0; i < 256; ++i)
LUT[i] = (uchar)(255 - i);
return LUT;
}
void negativeImage(Mat& I) {
vector<uchar> LUT = getNegativeLUT();
for (int i = 0; i < I.rows; ++i) {
for (int j = 0; j < I.cols; ++j) {
I.at<uchar>(i, j) = LUT[I.at<uchar>(i, j)];
//stack overflow
}
}
}
int main() {
Mat image = imread("1.png");
Mat processed_image2 = image.clone();
negativeImage(processed_image2);
printf("%d", image.cols);
imshow("Input Image", image);
imshow("Negative Image", processed_image2);
waitKey(0);
return 0;
}
Output Image
You need to put correct type with at<> operator. Your PNG image has to be converted to 8UC1 to then use uchar type to access each pixel. I suppose your image has 3 channels, so you only iterate over 1/3 of the image. Also, I suggest you to use ptr<> operator in rows loop and then access to pixel as an array.
Mat M;
cvtColor(I, M, CV_BGR2GRAY);
// M is CV_8UC1 type
for(int i = 0; i < M.rows; i++)
{
uchar* p = M.ptr<uchar>(i);
for(int j = 0; j < I.cols; j++)
{
p[j] = LUT[p[j]];
}
}
EDIT: you should use cv::LUT instead of doing it yourself.
cv::Mat lut(1, 256, CV_8UC1);
for( int i = 0; i < 256; ++i)
{
lut.at<uchar>(0,i) = uchar(255-i);
}
cv::LUT(M, lut, result);

from float array to mat , concatenate blocks of image

I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).

OpenCV counting number of pixels in a horizontal line

I wish to find number of white pixels in every row of binary image. And if that count is greater than 90, I wish to delete the entire row by changing each pixel value in that row to 0. The code that I wrote is not working. And apparently, I am getting the same binary image at output.
Please help me out in fixing the problem. BTW, am using openCV 2.0.
using namespace std;
double a = 15;
double b = 255;
Mat I1;
int main(int argv, char **argc)
{
cv: Mat I = imread("abc.bmp");
if (I.empty())
{
std::cout << "!!! Failed imread(): image not found" << std::endl;
}
threshold(I, I1, a, b, THRESH_BINARY);
int r = I.rows;
int c = I.cols;
for (int j = 0; j < r; j++)
{
int count = 0;
for (int i = 0; i < c; i++)
{
if (I1.at<uchar>(j, i) == 255)
count = count + 1;
}
if (count > 90)
{
for (int i = 0; i < c; i++)
I1.at<uchar>(j, i) = 0;
}
}
namedWindow("Display window", 0);// Create a window for display.
imshow("Display window", I1);
waitKey(0);
return 0;
}
By default imread returns 3 channel BGR image. If you want to load grayscale/binary image use cv::IMREAD_GRAYSCALE parameter:
cv::Mat I = cv::imread("abc.bmp", cv::IMREAD_GRAYSCALE);