Say I have an image, and I'd like to extract a subimage assuming the original image is wrapped like a toroid.
My guess was doing something like
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <cstdio>
#include <vector>
#include <Windows.h>
using namespace cv;
using namespace std;
int main(int argc, char **argv) {
Mat image = imread("image.jpg", CV_LOAD_IMAGE_GRAYSCALE);
const int & rows = image.rows;
const int & cols = image.cols;
Rect roi = Rect(rows - 1, cols - 1, 51, 51);
Mat subImage = image(roi);
namedWindow("Window", CV_WINDOW_AUTOSIZE);
imshow("Window", subImage);
waitKey(0);
return 0;
}
But that didn't work. Is there anything you can suggest? maybe there's a function I'm missing that I can use?
If not what's the easiest way?
Negative values did not work as apparently the library in question does not support toroid-like image handling...
I now see two options for you:
either extract (up to) four sub images and recombine them
or do the extraction by hand
For the latter:
int cx = 0, cy = 0;
for(int y = lowerBoundY; y < upperBoundY; ++y)
{
for(int x = lowerBoundX; x < upperBoundX; ++x)
{
subImage[cy][cx++] = image[(y + height) % height][(x + width) % width];
}
++cy;
cx = 0;
}
(Silently assuming you do not exceed the intervals [-width;2*width) and [-height;2*height)...)
Try openCV copyMakeBorder; it extracts subimages, offering several options for border treatment.
Related
code is working properly on Debug mode. However, it does not work on Release mode.
I don't know why... I can't find reason..
I've been having some troubles with using LUT function
I believe the issue is related to applying a LUT to a single channel image,
Unhandled exception at 0x00007FFA61523FE9 in HW2.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000EFCABFE750.
Here is my code. Thank you in advance.
#include <opencv2/opencv.hpp>
#include <iostream>
#include <cmath>
#include <ctime>
#define COUNT 1000000
using namespace std;
using namespace cv;
void GammaCorrection(Mat& src, Mat& dst, double gamma) {
Size dims{ 1,256 };
int channels{ 3 };
Mat bgr_planes[3];
// lookup table
Mat lut(dims ,CV_8UC1);
for (int i = 0; i < dims.height; i++) {
double v = pow((i / 255.0) ,gamma);
lut.at<uint8_t>(i) = cvRound( v * 255);
}
// B,G,R color plane
split(src, bgr_planes);
// ERROR
LUT(bgr_planes[0], lut, bgr_planes[0]);
LUT(bgr_planes[1], lut, bgr_planes[1]);
LUT(bgr_planes[2], lut, bgr_planes[2]);
//B,G,R color plane merge
merge(bgr_planes, channels, dst);
}
int main() {
clock_t start, end;
Mat origin = imread("..\\origin.png", IMREAD_COLOR);
Mat result = Mat(origin.size(), CV_8UC3);
start = clock();
//----------
//for (int i = 0; i < COUNT; i++) {
GammaCorrection(origin, result, 0.4);
//}
//-----------
end = clock();
cout << (double)(end - start) / COUNT << endl;
}
I'm doing this project in OpenCV C++ where i make the reflection of a given image, just like the flip function but with the coordinates of each pixel. the problem is that the image output that i get is all blue with a line horizontally, i believe that my code is only affecting the first channel.
I tried to do imageReflectionFinal.at<Vec3b>(r,c) = image.at<Vec3b>(r,c); in order to solve it, but nothing changed. I'll leave the code below, thanks in advance.
Mat image = imread("image_dir/image.jpg");
Mat imageReflectionFinal = Mat::zeros(image.size(), image.type());
for(unsigned int r=0; r<image.rows; r++) {
for(unsigned int c=0; c<image.cols; c++) {
imageReflectionFinal.at<Vec3b>(r,c) = image.at<Vec3b>(r,c);
Vec3b sourcePixel = image.at<Vec3b>(r,c);
imageReflectionFinal.at<Vec3b>(r, c) = (uchar)(c, -r + (220)/2);
}
}
If you don't want to use flip function, you can change the x-coordinates(cols) of each rows mirrorly. Here is the code:
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
int main() {
//You can change as "Mat3b" for the 3-channel images
Mat1b image = imread("/ur/image/directory/image.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat1b imageReflectionFinal = Mat::zeros(image.size(), image.type());
for(unsigned int r=0; r<image.rows; r++) {
for(unsigned int c=0; c<image.cols; c++) {
imageReflectionFinal(r, c) = image(r, image.cols - 1 - c);
//y-axis(r) doesnt change only x-axis(cols) mirroring
}
}
imshow("Result",imageReflectionFinal);
waitKey(0);
return 0;
}
This answer is also my reference.
I am new to OpenCV. I am trying to create a random color image. Firstly I tried to create a random grayscale image. The code I have attached below
void random_colour(Mat input_image) {
for (int i = 0; i < image.rows; i++)
for (int j = 0; j < image.cols; j++)
image.at<uchar>(i,j)= rand()%255;
imwrite("output.tif",image);
}
int main( int argc, char** argv )
{
Mat img=Mat::zeros(100,100,CV_8UC1);
random_colour(img);
waitKey(0);
return 0;
}
The output obtained is
Now I changed my above code to create a random colour image as.
void random_colour(Mat input_image) {
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
image.at<Vec3b>(i,j)[0] = rand()%255;
image.at<Vec3b>(i,j)[1] = rand()%255;
image.at<Vec3b>(i,j)[2] = rand()%255;
}
}
imwrite("output.tif",image);
}
The main function remains same. While doing so, I get a runtime error. Please help me on what should I do. What I understood is that each pixel in colour space has three component RGB. So therefore I am changing all the three component of each pixel. I am not getting the output what I wanted.
no need to reinvent the wheel, please use cv::randu()
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
int main(int argc, char** argv)
{
Mat img(100, 100, CV_8UC3);
randu(img, Scalar(0, 0, 0), Scalar(255, 255, 255));
imshow("random colors image", img);
waitKey();
return 0;
}
This line creates a greyscale image, it doesn't have three channels.
Mat img=Mat::zeros(100,100,CV_8UC1);
That means when you use this line, its going to crash:
image.at<Vec3b>(i,j)[0] = rand()%255;
You need to not use CV_8UC1 because that creates 1 channel (C1) try it with CV_8UC3 instead
Mat img=Mat::zeros(100,100,CV_8UC3);
FYI 8u means 8 bit so values 0 to 255, CX is the number of channels in the image. If you want BGR you need C3.
I'm trying to get eigenvalues from an image matrix. I tried with a streaming video and did not get an error. When I tried to find eigenvalues of a stored image I get cv::Exception at memory location 0x0000000296AFDAD0 error.
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
void calceigen(Mat covarm){
Mat eigenvalues;
Mat eigenvectors;
eigen(covarm, eigenvalues, eigenvectors);
cout << eigenvalues;
cout << eigenvectors;
}
void covard(Mat g)
{
Mat covarm;
Mat b;
calcCovarMatrix(g, covarm, b, CV_COVAR_COLS, CV_64F);
calceigen(covarm);
}
int main(int argc, const char** argv)
{
cv::Mat image = cv::imread("C:/Users/DellPc/Desktop/images1.jpg");
//setup image files used in the capture process
Mat grayscaleFrame;
Mat graysc;
//convert captured image to gray scale and equalize
//cvtColor(image, grayscaleFrame, CV_BGR2GRAY);
//resize(grayscaleFrame, graysc, Size(16, 16), 0, 0, INTER_LANCZOS4);
int rows = image.rows;
int cols = image.cols;
cv::Size s = image.size();
rows = s.height;
cols = s.width;
cout << rows << endl;
cout << cols << endl;
covard(image);
//find faces and store them in the vector array
imshow("small", image);
waitKey(33);
return 0;
}
Please have a look at the following code
#include <iostream>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\core\core.hpp>
using namespace std;
using namespace cv;
void reduceColor(Mat&,int=64);
int main()
{
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Koala.jpg");
namedWindow("Image");
imshow("Image",image);
//reduceColor(image,64);
waitKey(0);
}
void reduceColor(Mat &image,int division)
{
int numberOfRows = image.rows;
int numberOfColumns = image.cols * image.channels();
for(int i=0;i<numberOfRows;i++)
{
uchar *data = image.ptr<uchar>(i);
for(int pixel=0;pixel<numberOfColumns;pixel++)
{
data[i] = data[i]/division*division + division/2;
}
}
namedWindow("Image2");
imshow("Image2",image);
}
This is Computer Vision. I am trying to read an image and reduce it's color by navigating through all the pixels and channels. But, the colour is not reduced! It simply displays the original image! Pleas help!
Variable i is never incremented in your nested for loop, but you're setting data[i]. So in all likelihood, a few pixels in the first column are changing after the function call, but nothing else is.