Contour segmentation using opencv - c++

I am currently working on separating overlapped objects using C++ and OpenCV 3.0 so far I have been able to find contours and corner points. Now I need to separate these objects based on corner points and put them into separate vectors.
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include<algorithm>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int thresh = 100;
int max_thresh = 255;
int maxCorners =14;
int maxTrackbar =14;
RNG rng(12345);
char* source_window = "Image";
/// Function header
void goodFeaturesToTrack_Demo(int, void*);
/// Function header
void thresh_callback(int, void*);
void printvec(vector<int>& contours){
for (int i = 0; i < contours.size(); i++){
cout << contours[i] << " ";
}
cout << endl;
}
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("demo.png", 1);
cvtColor(src, src_gray, COLOR_BGR2GRAY);
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3,3));
/// Create Window
namedWindow(source_window, WINDOW_AUTOSIZE);
/// Create Trackbar to set the number of corners
createTrackbar("Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow(source_window, src);
goodFeaturesToTrack_Demo(0, 0);
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo(int, void*)
{
if (maxCorners < 1) { maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
/// Copy the source image
Mat copy;
copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack(src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k);
/// Draw corners detected
cout << "** Number of corners detected: " << corners.size() << endl;
int r = 4;
for (int i = 0; i < corners.size(); i++)
{
circle(copy, corners[i], r, Scalar(rng.uniform(0, 255), rng.uniform(0, 255),
rng.uniform(0, 255)), -1, 8, 0);
}
/// Show what you got
namedWindow(source_window, WINDOW_AUTOSIZE);
imshow(source_window, copy);
/// Set the neeed parameters to find the refined corners
Size winSize = Size(5, 5);
Size zeroZone = Size(-1, -1);
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 40, 0.001);
/// Calculate the refined corner locations
cornerSubPix(src_gray, corners, winSize, zeroZone, criteria);
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny(src_gray, canny_output, thresh, thresh * 2, 3);
/// Find contours
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Write them down
for (int i = 0; i < corners.size(); i++)
{
cout << " -- Refined Corner [" << i << "] (" << corners[i].x << "," << corners[i].y << ")" << endl;
float valx = corners[i].x;
float valy = corners[i].y;
int xp = round(valx);
int yp = round(valy);
cout << " -- Refined Corner [" << i << "] (" << xp << "," << yp << ")" << endl;
}
for (int i = 0; i < contours.size(); i++)
{
for (int j = 0; j < contours[i].size(); j++)
{
cout << "Point(x,y)=" << contours[i][j].x << "," << contours[i][j].y << endl;
}
}
}
This is the result of the code:
and this is what I want to get:

Related

Add a button with functionality in c++ using Visual Studio

I am a newbie to c++ and the IDE I am using is Visual Studio '22. I have written a code to detect a face (eyes and mouth too) and save the roi to a folder on the pc. Now what it does can be thought of as an auto-capture of the roi as soon as the face is detected.
I now want to create the function for "force capture", for which I will need to have a button and add pretty much the same code I wrote for auto-capture to give it functionality.
How do I add the button and make it perform its task?
I found related answers but they use Qt not sure how to apply that here.
Thanks a ton! Really need help.
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
//(1) include face header
#include "opencv2/face.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
//(2) include face header
#include "opencv2/objdetect.hpp"
#include <iostream>
#include <opencv2/imgproc/types_c.h>
//file handling
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
using namespace ml;
using namespace cv::face;
//(3) Global variables
Ptr<Facemark> facemark; //mark detection
CascadeClassifier faceDetector, mouth, eye, eye1; //face detection
string name, filename;
void process(Mat img, Mat imgcol) {
vector<Rect> faces;
faceDetector.detectMultiScale(img, faces);
Mat imFace;
if (faces.size() != 0) {
for (size_t i = 0; i < faces.size(); i++)
{
cv::rectangle(imgcol, faces[i], Scalar(255, 0, 0));
imFace = imgcol(faces[i]);
resize(imFace, imFace, Size(imFace.cols * 5, imFace.rows * 5));
faces[i] = Rect(faces[i].x = 0, faces[i].y = 0, faces[i].width * 5,
(faces[i].height) * 5);
}
vector< vector<Point2f> > shapes;
//vector < Rect > measures;
if (facemark->fit(imFace, faces, shapes)) //fiiting predef shapes in faces// // imface is the size of faces
{
for (unsigned long i = 0; i < faces.size(); i++) {
for (unsigned long k = 0; k < shapes[i].size(); k++) {
cv::circle(imFace, shapes[i][k], 5, cv::Scalar(0, 0, 255), FILLED);
}
}
}
namedWindow("Detected_shape");
imshow("Detected_shape", imFace);
waitKey(5);
}
else {
cout << "Faces not detected." << endl;
}
}
int main()
{
facemark = FacemarkLBF::create();
facemark->loadModel("C:/Dev/HeadPose/HeadPose/lbfmodel.yml");
faceDetector.load("D:/opencv/build/install/etc/haarcascades/haarcascade_frontalface_alt2.xml");
mouth.load("D:/opencv/build/install/etc/haarcascades/haarcascade_smile.xml");
eye.load("D:/opencv/build/install/etc/haarcascades/haarcascade_eye.xml");
cout << "Loaded model" << endl;
Mat frame, grayframe, testframe, faceROI;
int x_axis, y_axis;
namedWindow("Detecting");
VideoCapture cap(0); //1 for diff cam
while (1)
{
cap.read(frame);
if (!cap.read(frame))
{
cout << "an error while taking the frame from cap" << endl;
}
//face
vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, grayframe, CV_BGR2GRAY);
equalizeHist(grayframe, testframe);
faceDetector.detectMultiScale(testframe, faces, 1.1, 3, CASCADE_SCALE_IMAGE, Size(30, 30));
Rect roi_b;
Rect roi_c;
size_t ic = 0;
int ac = 0;
size_t ib = 0;
int ab = 0;
for (int ic = 0; ic < faces.size(); ic++)
{
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR);
cvtColor(crop, gray, COLOR_BGR2GRAY);
stringstream ssfn;
filename = "C:\\Users\\Hp\\Desktop\\Faces\\";
ssfn << filename.c_str() << name <<"_"<< roi_b.width<<"_"<< roi_b.height << ".jpg";
filename = ssfn.str();
imwrite(filename, res);
rectangle(frame, faces[ic], Scalar(255, 0, 255), 2, 8, 0);
Mat face = frame(faces[ic]);
cvtColor(face, face, CV_BGR2GRAY);
//mouth
vector <Rect> mouthi;
mouth.detectMultiScale(face, mouthi);
for (int k = 0; k < mouthi.size(); k++)
{
Point pt1(mouthi[0].x + faces[ic].x, mouthi[0].y + faces[ic].y);
Point pt2(pt1.x + mouthi[0].width, pt1.y + mouthi[0].height);
rectangle(frame, pt1, pt2, Scalar(255, 0, 0), 1, 8, 0);
}
//eyes
faceROI = frame(faces[ic]);//Taking area of the face as Region of Interest for eyes//
vector<Rect>eyes;//declaring a vector named eyes//
eye.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5)); //detect eyes in every face//
/*eye1.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5));*/
for (size_t j = 0; j < eyes.size(); j++)
{ //for locating eyes//
Point center(faces[ic].x + eyes[j].x + eyes[j].width * 0.5, faces[ic].y + eyes[j].y + eyes[j].height * 0.5);//getting the centers of both eyes//
int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25); //declaring radius of the eye enclosing circles//
// cout << "radius" << radius << endl;
circle(frame, center, radius, Scalar(255, 0, 0), 1, 8, 0);//drawing circle around both eyes//
x_axis = eyes[j].x;//storing x axis location of eyes in x_axis//
y_axis = eyes[j].y;//storing y axis location of eyes in y_axis//
cout << "Position of the eyes is:" << "(" << x_axis << "," << y_axis << ")" << endl;//showing co-ordinate values//
}
}
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
if (!crop.empty()) {
imshow("detected", crop);
}
else destroyWindow("detected");
cout << "Name\n";
cin >> name;
Mat img; //image containers
Mat imgbw;
cap >> img; //image from webcam
resize(img, img, Size(460, 460), 0, 0, INTER_LINEAR_EXACT);
cvtColor(img, imgbw, COLOR_BGR2GRAY);
process(imgbw, img);
imshow("Detecting", frame);
if (waitKey(30) == 27) {
break;
}
}
return 0;
}

Filtering For Only Red Contours Pixel By Pixel With an HSV Range

I'm trying to calculate the Mean & Std Deviation for red only contours. I suspect that HSV pixels for red Hue values of a Vec3b are stored from 0-10 and 165-179.
Here is my code:
#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>
#include <cmath>
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
// Mat Declarations
// Mat img = imread("white.jpg");
// Mat src = imread("Rainbro.png");
Mat src = imread("multi.jpg");
// Mat src = imread("DarkRed.png");
Mat Hist;
Mat HSV;
Mat Edges;
Mat Grey;
vector<vector<Vec3b>> hueMEAN;
vector<vector<Point>> contours;
// Variables
int edgeThreshold = 1;
int const max_lowThreshold = 100;
int ratio = 3;
int kernel_size = 3;
int lowThreshold = 0;
// Windows
namedWindow("img", WINDOW_NORMAL);
namedWindow("HSV", WINDOW_AUTOSIZE);
namedWindow("Edges", WINDOW_AUTOSIZE);
namedWindow("contours", WINDOW_AUTOSIZE);
// Color Transforms
cvtColor(src, HSV, CV_BGR2HSV);
cvtColor(src, Grey, CV_BGR2GRAY);
// Perform Hist Equalization to help equalize Red hues so they stand out for
// better Edge Detection
equalizeHist(Grey, Grey);
// Image Transforms
blur(Grey, Edges, Size(3, 3));
Canny(Edges, Edges, max_lowThreshold, lowThreshold * ratio, kernel_size);
findContours(Edges, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
//Rainbro MAT
//Mat drawing = Mat::zeros(432, 700, CV_8UC1);
//Multi MAT
Mat drawing = Mat::zeros(630, 1200, CV_8UC1);
//Red variation Mat
//Mat drawing = Mat::zeros(600, 900, CV_8UC1);
vector <vector<Point>> ContourPoints;
/* This code for loops through all contours and assigns the value of the y coordinate as a parameter
for the row pointer in the HSV mat. The value vec3b pointer pointing to the pixel in the mat is accessed
and stored for any Hue value that is between 0-10 and 165-179 as Red only contours.*/
for (int i = 0; i < contours.size(); i++) {
vector<Vec3b> vf;
vector<Point> points;
bool isContourRed = false;
for (int j = 0; j < contours[i].size(); j++) {
//Row Y-Coordinate of Mat from Y-Coordinate of Contour
int MatRow = int(contours[i][j].y);
//Row X-Coordinate of Mat from X-Coordinate of Contour
int MatCol = int(contours[i][j].x);
Vec3b *HsvRow = HSV.ptr <Vec3b>(MatRow);
int h = int(HsvRow[int(MatCol)][0]);
int s = int(HsvRow[int(MatCol)][1]);
int v = int(HsvRow[int(MatCol)][2]);
cout << "Coordinate: ";
cout << contours[i][j].x;
cout << ",";
cout << contours[i][j].y << endl;
cout << "Hue: " << h << endl;
// Get contours that are only in the red spectrum Hue 0-10, 165-179
if ((h <= 10 || h >= 165 && h <= 180) && ((s > 0) && (v > 0))) {
cout << "Coordinate: ";
cout << contours[i][j].x;
cout << ",";
cout << contours[i][j].y << endl;
cout << "Hue: " << h << endl;
vf.push_back(Vec3b(h, s, v));
points.push_back(contours[i][j]);
isContourRed = true;
}
}
if (isContourRed == true) {
hueMEAN.push_back(vf);
ContourPoints.push_back(points);
}
}
drawContours(drawing, ContourPoints, -1, Scalar(255, 255, 255), 2, 8);
// Calculate Mean and STD for each Contour
cout << "contour Means & STD of Vec3b:" << endl;
for (int i = 0; i < hueMEAN.size(); i++) {
Scalar meanTemp = mean(hueMEAN.at(i));
Scalar sdTemp;
cout << i << ": " << endl;
cout << meanTemp << endl;
cout << " " << endl;
meanStdDev(hueMEAN.at(i), meanTemp, sdTemp);
cout << sdTemp << endl;
cout << " " << endl;
}
cout << "Actual Contours: " << contours.size() << endl;
cout << "# Contours: " << hueMEAN.size() << endl;
imshow("img", src);
imshow("HSV", HSV);
imshow("Edges", Edges);
imshow("contours", drawing);
waitKey(0);
return 0;
}
I've come across an issue in this particular case:
On the right is the original Image, The left displays the HSV mat, the Edge detection and an arrow is pointing to a contours Mat that I drew after the filtering.
Here is the source image:
After the filtering is complete I just calculate the Mean and STD.
I have a feeling that my range is incorrect for 0-10 and 165-179. Any suggestions or further improvements would help a lot.
Thanks.
A quick test shows me that the range is correct. Without all the contour extraction stuff, if I just filter the colors using 0-10 and 165-179 ranges, I get the two red boxes in the lower-middle range of your input image.
The contour artifact that you see might actually be coming from both a JPEG artifact (if you zoom in at the limit between the white and red box, you can see that it is gradual and not sharp, due to JPEG compression), and the fact that you are only thresholding in the Hue channel. At low saturation, many grey-ish colors which you don't want will actually be fitting within your hue threshold. The solution for that is to filter pixel values in the S and V channel as well.
In your code, that means changing the line if ((h <= 10 || h >= 165 && h <= 180) && ((s > 0) && (v > 0))) { to if ((h <= 10 || h >= 165 && h <= 180) && ((s > 50) && (v > 50))) {
The value 50 is working on that specific sample image, but of course the correct value will depend on your input image.

My opencv does not work

I make cognizing number on ptz camera using Opencv.
But my program has big problem.
Error in `./main.out': double free or corruption (!prev):
I think it is leaking memory. So I try to 'clear()' for desallocating memory. But it does not work.
How to fix it?
#include "opencv2/opencv.hpp"
#include <iostream>
#include <unistd.h>
#include <time.h>
using namespace cv;
using namespace std;
Mat3b canvas;
string buttonText("Click me!");
Mat frame1;
Mat frame2;
Mat frame3;
Rect rect, temp_rect;
double ratio, delta_x, delta_y, gradient;
int count, friend_count = 0, refinery_count = 0;
void testfunc()
{
vector<Vec4i> hierarchy;
vector<vector<Point> > contours;
findContours(frame3, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point());
vector<vector<Point> > contours_poly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Rect> boundRect2(contours.size());
for (int i = 0; i < contours.size(); i++) {
approxPolyDP(Mat(contours[i]), contours_poly[i], 1, true);
boundRect[i] = boundingRect(Mat(contours_poly[i]));
}
Mat drawing = Mat::zeros(frame3.size(), CV_8UC3);
for (int i = 0; i < contours.size(); i++)
{
ratio = (double)boundRect[i].height / boundRect[i].width;
if ((ratio <= 2.5) && (ratio >= 0.5) && (boundRect[i].area() <= 700) && (boundRect[i].area() >= 100))
{
drawContours(drawing, contours, i, Scalar(0, 255, 255), 1, 8, hierarchy, 0, Point());
rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), Scalar(255, 0, 0), 1, 8, 0);
boundRect2[refinery_count] = boundRect[i];
refinery_count++;
}
}
boundRect2.resize(refinery_count);
imshow("camera4", drawing);
contours_poly.clear();
boundRect.clear();
boundRect2.clear();
contours.clear();
hierarchy.clear();
return;
}
int main(int argc, char** argv)
{
VideoCapture cap1(0);
if (!cap1.isOpened())
{
printf("ERROR. \n");
return -1;
}
// ************* This is notebook camera **********
// ************* This is PTZ camera **********
/*VideoCapture cap2;
string vStreamArs = "rtsp://root:pass#192.168.10.235/ufirststream";
Mat video;
if (!cap2.open(vStreamArs))
{
cout << "[-] ERROR CODE 0 : Not connect camera!!" << endl;
return -1;
}
else
{
cout << "[+] Camera is connected!!" << endl;
}*/
// ************* This is PTZ camera **********
// ************* This is notebook camera **********
int select, plate_width;
while (1)
{
clock_t start = clock();
while (1)
{
if (!cap1.read(frame1))
{
cout << "[-] ERROR CODE 2 : No camera" << endl;
break;
}
imshow("output", frame1);
if ((clock() - start) / CLOCKS_PER_SEC > 10)
{
cout << "[+]Find" << endl;
break;
}
waitKey(1);
}
cvtColor(frame1, frame2, CV_BGR2GRAY);
Canny(frame2, frame3, 100, 300, 3);
imshow("camera3", frame3);
testfunc();
}
getchar();
return 0;
}
Try to use release() method over Mat images after using them. This will force to release memory. Please try and tell me.

Wrong mass center point (opencv and moment function)

I'm trying to calculate the mass center of images using OpenCV and I got errors, as you can see in the images (the mass center must not be to closest of any side in this cases). Also, I got mass centers that depends of the rotation and that's incorrect.
Next, you can see the code, input image and output image.
I tried with different example codes, and the results are the same.
Output image: Mass center calculated by the program
Input image: Image Input
Example code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat srcGray;
RNG rng(12345);
int main(int argc, char **argv)
{
// Load source image and convert it to gray
src = imread(argv[1], 1);
// Convert image to gray and blur it
cvtColor(src, srcGray, CV_BGR2GRAY);
blur(srcGray, srcGray, Size(3, 3));
Mat srcThresh;
double otsu;
otsu = threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
Mat cannyOut;
Canny(srcGray, cannyOut, otsu, otsu * 1 / 2, 3, 1);
// Find contours
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(cannyOut, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
// Get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
// Get the mass centers:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
// Draw contours
Mat drawing = Mat::zeros(cannyOut.size(), CV_8UC3);
string sObjectNumber; // string which will contain the result
ostringstream sContourNumber; // stream used for the conversion
for (int i = 0; i< contours.size(); i++)
{
// drawing.setTo(Scalar(0.0,0.0,0.0));
sContourNumber << i;
sObjectNumber = sContourNumber.str(); // Convert int to string
Point pCoordinates(mc[i].x + 3, mc[i].y - 3); // Text's coordinates (A little bit off from mass center)
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, color, -1, 8, 0); // Draw mass center
putText(drawing, sObjectNumber, pCoordinates, CV_FONT_HERSHEY_COMPLEX, 1, color, 2, 8); // Write object number
sContourNumber.str(""); // Clear string
sContourNumber.clear(); // Clear any error flags
// imshow("Contours", drawing);
// waitKey();
}
double hu[7];
for (int i = 0; i < contours.size(); i++)
{
cout << "Contour: " << i << " Area: " << contourArea(contours[i]) << " Length: " << arcLength(contours[i], true) << "\n";
for (int j = 0; j < 7; j++)
{
HuMoments(mu[i], hu);
cout << "Contour: " << i << " Hu: " << j << " Result: " << hu[j] << "\n";
}
cout << "\n";
}
imshow("Contours", drawing);
waitKey(0);
return(0);
}
Very thanks for all!
Diego

OpenCV Prewitt from scratch weirdness

Because this is a project for an image processing class, I have to implement a couple of linear filters from scratch( I'm not supposed to use already implemented features of OpenCV like Sobel, not even the 2D filter function). The code is at the end of the question.
Since images processed with the Sobel operator give similar results with the Prewitt ones, I used as a test a window where I display a Sobel-processed image.
I only got to the point where I applied an operator on the horizontal direction, but I'm already getting weird results. Images speak for themselves:
Original image:
Original image
My result using the Prewitt operator on the horizontal direction:
My processed image
I get a weird blue-beige pattern instead of a black-and-white horizontal lines. What is happening?
Here is the code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int, char** argv)
{
int Hprewitt[3][3] = { { -1, 0, 1 }, { -1, 0, 1 }, { -1, 0, 1 } };
int Vprewitt[3][3] = { { -1, -1, -1 }, { 0, 0, 0 }, { 1, 1, 1 } };
int tempInput[3][3];
int tempPixel=0;
Mat src, src_gray;
Mat grad;
const char* window_name = "Sobel Edge Detector";
const char* window_name2 = "Prewitt";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int computedIntensity;
src = imread(argv[1]);
if (src.empty())
{
return -1;
}
namedWindow(window_name2, WINDOW_AUTOSIZE);
Mat HprewittMat(src.rows, src.cols, CV_8UC3, Scalar(0, 0, 0));
GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT);
cvtColor(src, src_gray, COLOR_RGB2GRAY);
namedWindow(window_name, WINDOW_AUTOSIZE);
Scalar intensity = src.at<uchar>(Point(50, 50)); // this is how to access intensity at a certain pixel
Vec3b scalarTempPixel = src.at<Vec3b>(Point(1, 1));
cout << "Pixel (50,50) has intensity: " << intensity.val[0] << endl;
// applying horizontal prewitt operator
cout << "\n Image has resolution: " << src.cols << "x" << src.rows << "\n";
for (int i = 2; i < src.cols-1; i++){ // currently going from column 2 to n-2, same for row
for (int j = 2; j < src.rows-1; j++){
// storing a temporary 3x3 input matrix centered on the current pixel
// cout << "Matrix centered on pixel: [" << i << "," << j << "] \n";
for (int k = -1; k < 2; k++){
for (int l = -1; l < 2; l++){
intensity = src.at<uchar>(Point(i + k, j + l));
tempInput[k+1][l+1] = intensity.val[0];
// cout << "[" << intensity.val[0] << "]";
}
// cout << " \n";
}
// convolution of horizontal prewitt kernel with current 3x3 matrix
for (int x = 0; x < 3; x++){
for (int y = 0; y < 3; y++){
tempPixel = tempPixel + tempInput[x][y] * Hprewitt[x][y];
}
}
scalarTempPixel[0] = tempPixel;
HprewittMat.at<Vec3b>(Point(i, j)) = scalarTempPixel;
}
}
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
imshow(window_name, grad);
imshow(window_name2, HprewittMat);
waitKey(0);
return 0;
}
So at this point I'm doing the following:
I'm reading an image in "src", then I create the HprewittMat which I initialize it with the number of rows and cols of the original image, but with black pixels. I then convert the src image to a gray one. Then I iterate through each pixel of the original image, and for each pixel I'm doing a convolution mask with the the surrounding pixels and the horizontal prewitt kernel. Then I store that value in "tempPixel" and put it in the HprewittMat image.
The next step would be to do the same but with the vertical kernel, and then calculate the gradient kernel.
I'm asking this question because I've found similar questions on how to manipulate individual pixels but usually for python or java. This might also be some flaw in the logic I'm using.