My task is to perform some operations on the roi of an image. But after performing these, I want the changes also to be made visible in the same region of the original image (in code called "image"), not just in the roi as seperate image (which is "image_roi2"). How could I achieve this?
My code looks like this:
Mat image;
Mat image_roi2;
float thresh;
Rect roi = Rect(x, y, widh, height);
Mat image_roi = image(roi);
threshold(image_roi, image_roi2, thresh, THRESH_TOZERO, CV_THRESH_BINARY_INV);
You just need an additional image_roi2.copyTo( image_roi );
Below is an entire example.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
int main(int argc, char** argv) {
if (argc != 2) {
std::cout << " Usage: " << argv[0] << " imagem.jpg" << std::endl;
return -1;
}
cv::Mat image;
cv::Mat image_roi2;
image = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR); // Read the file
if (!image.data) // Check for invalid input
{
std::cout << "Could not open or find the image" << std::endl;
return -1;
}
cv::Rect roi( 100, 100,200, 200);
cv::Mat image_roi = image( roi );
cv::threshold(image_roi, image_roi2, 250, 255, CV_THRESH_BINARY_INV );
image_roi2.copyTo( image_roi );
cv::namedWindow("Imagem", CV_WINDOW_NORMAL | CV_WINDOW_KEEPRATIO);
cv::resizeWindow("Imagem", 600, 400);
cv::imshow("Imagem", image); // Show our image inside it.
cv::waitKey(0); // Wait for a keystroke in the window
return 0;
}
I think this is what you want - image_roi.copyTo(image(roi));
Related
I am new to Realsense & C++. I know this question might be easy on.
However, I cannot solve this even I searched half of day.
I am trying to use Realsense with OpenCV face detection(Haarcascade).
But when I use 'face_cascade,detectMultiScale', the project gets access violation error.
(just like the picture underneath)
and my codes are this :
// License: Apache 2.0.See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include <rs.hpp> // Include RealSense Cross Platform API
#include <opencv2/opencv.hpp> // Include OpenCV API
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace std;
using namespace cv;
//String face_cascade_name;
CascadeClassifier face_cascade;
string window_name = "Face detection";
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
if (frame.empty()) {
printf("error, no data");
}
else {
printf("no problem");
}
//face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
face_cascade.detectMultiScale(frame_gray, faces , 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(500, 500));
for (size_t i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2),
0, 0, 360, Scalar(0, 0, 255), 4, 8, 0);
}
imshow(window_name, frame);
}
int main(int argc, char * argv[]) try
{
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
face_cascade.load("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
//if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading face cascade\n"); };
// Start streaming with default recommended configuration
pipe.start();
const auto window_name = "Display Image";
namedWindow(window_name, WINDOW_AUTOSIZE);
while (waitKey(1) < 0 && cvGetWindowHandle(window_name))
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
//rs2::frame depth = color_map(data.get_depth_frame());
rs2::frame color = data.get_color_frame();
// Query frame size (width and height)
//const int w = depth.as<rs2::video_frame>().get_width();
//const int h = depth.as<rs2::video_frame>().get_height();
const int color_w = color.as<rs2::video_frame>().get_width();
const int color_h = color.as<rs2::video_frame>().get_height();
// Create OpenCV matrix of size (w,h) from the colorized depth data
//Mat image(Size(w, h), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
Mat image(Size(color_w, color_h), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
// Update the window with new data
//imshow(window_name, image);
detectAndDisplay(image);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
enter code here
I have written a program in opencv(c++) to manipulate camera property. I am trying to blur my camera display using "track bar". The code is working but in certain condition. It works, when i change the position of "track bar" using mouse click. But if i tried to slide the track bar it gives me an error as mention below.
Here is my code
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
Mat image, image_blurred;
int slider=5;
float sigma=0.3 *((slider - 1)*0.5 - 1) +0.8;
void on_Trackbar(int,void *)
{
int k_size = max(1,slider);
//k_size = k_size%2 == 0 ? k_size+1 : k_size;
setTrackbarPos("kernel","Blur window",3);
sigma=0.3 *((slider - 1)*0.5 - 1) +0.8;
GaussianBlur(image,image_blurred,Size(3,3),sigma);
}
int main()
{
Mat img;
VideoCapture cap(0);
if(!cap.isOpened())
{
cout<<"Camera is not successfully opened"<<endl;
return -1;
}
namedWindow("original image",CV_WINDOW_AUTOSIZE);
namedWindow("Blur Image",CV_WINDOW_AUTOSIZE);
while(!char(waitKey(30)=='q') && cap.isOpened())
{
cap>>img;
GaussianBlur(img,image_blurred,Size(slider,slider),sigma);
createTrackbar("kernel","Blur Image",&slider,21,on_Trackbar);
imshow("Blur Image",image_blurred);
imshow("original image",img);
}
destroyAllWindows();
return 0;
}
Please give your valuable views. Thanks in advance!!.
In the while loop, you're passing an invalid value to GaussianBlur, since slider can also be an even number.
You can correct this introducing a new variable int kernel_size = 2*slider+1. slider now is the radius of the kernel, and kernel_size is guaranteed to be odd.
Also you don't need to call GaussianBlur in the callback function, since it's already called in the main loop. The only goal of the callback is to update the values of kernel_size and sigma.
This code will work as expected:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
Mat image, image_blurred;
int slider = 0;
int kernel_size = 3;
float sigma = 0.3 *((kernel_size - 1)*0.5 - 1) + 0.8;
void on_Trackbar(int, void *)
{
kernel_size = 2 * slider + 1;
sigma = 0.3 *((kernel_size - 1)*0.5 - 1) + 0.8;
}
int main()
{
Mat img;
VideoCapture cap(0);
if (!cap.isOpened())
{
cout << "Camera is not successfully opened" << endl;
return -1;
}
namedWindow("original image", CV_WINDOW_AUTOSIZE);
namedWindow("Blur Image", CV_WINDOW_AUTOSIZE);
createTrackbar("kernel", "Blur Image", &slider, 21, on_Trackbar);
while (!char(waitKey(30) == 'q') && cap.isOpened())
{
cap >> img;
GaussianBlur(img, image_blurred, Size(kernel_size, kernel_size), sigma);
imshow("Blur Image", image_blurred);
imshow("original image", img);
}
destroyAllWindows();
return 0;
}
i am reading the Learning CV book, i came across the first example and encounter this problem
Using OPENCV 3.0.0 and VS 2013, all libraries added and checked.
the code is as follows
#include "opencv2/highgui/highgui.hpp"
int main( int argc, char** argv)
{
IplImage* img = cvLoadImage(argv[1]);
cvNamedWindow("Example1", CV_WINDOW_AUTOSIZE);
cvShowImage("Example1", img);
cvWaitKey(0);
cvReleaseImage(&img);
cvDestroyWindow("Example1");
}
So after compiling or build, I got a window named Example1, and it is grey, no image in the window.
Is this correct? Or what should I expect to get?
You are not loading the image correctly, i.e. argv[1] has an invalid path. You can check this like:
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
int main(int argc, char** argv)
{
IplImage* img = cvLoadImage(argv[1]);
//IplImage* img = cvLoadImage("path_to_image");
if (!img)
{
std::cout << "Image not loaded";
return -1;
}
cvNamedWindow("Example1", CV_WINDOW_AUTOSIZE);
cvShowImage("Example1", img);
cvWaitKey(0);
cvReleaseImage(&img);
cvDestroyWindow("Example1");
}
You can supply the path also directly in the code like:
IplImage* img = cvLoadImage("path_to_image");
You can refer here to know why your path may be wrong.
You also shouldn't use old C syntax, but use the C++ syntax. Your example will be like:
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
if (!img.data)
{
std::cout << "Image not loaded";
return -1;
}
imshow("img", img);
waitKey();
return 0;
}
You can refer to this answer to know how to setup Visual Studio correctly.
Its not clear if the image is being loaded. OpenCV will silently fail if it can't find the image.
Try
auto img= cv::imread(name, CV_LOAD_IMAGE_ANYDEPTH);
if (img.data() == nullptr)
{
std::cout << "Failed to load image" << std::endl;
}
I'm trying to figure out how motion detection work in opencv.
I can see there the video analysis reference but I don't find enough information about how this is used.
I've also seen some people using absdiff I tried it like this, but It gives me an exception at memore errors
OpenCV Error: Sizes of input arguments do not match (The operation is neither 'a
rray op array' (where arrays have the same size and the same number of channels)
, nor 'array op scalar', nor 'scalar op array') in cv::arithm_op, file C:\builds
\2_4_PackSlave-win32-vc12-shared\opencv\modules\core\src\arithm.cpp, line 1287
the code is
#include <iostream>
#include <sstream>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
int main()
{
//create matrix for storage
Mat image;
Mat image2;
Mat image3;
Mat image4;
//initialize capture
VideoCapture cap;
cap.open(0);
//create window to show image
namedWindow("window", 1);
while (1){
//copy webcam stream to image
cap >> image;
cap >> image2;
absdiff(image, image2, image3);
threshold(image3, image4, 128, 255, THRESH_BINARY);
//print image to screen
if (!image.empty()) {
imshow("window", image3);
}
//delay33ms
waitKey(10);
//
}
}
im obviously not using it right
You need to confirm the VideoCapture was successful before using the images. Also, you want to test if the image was captured successfully before using it. Try this:
VideoCapture cap(0);
if(!cap.isOpened()) {
std::cerr << "Failed to open video capture" << std::endl;
return -1;
}
namedWindow("window");
while(true) {
cap >> image;
cap >> image2;
if(image.empty() || image2.empty()) {
std::cerr << "failed to capture images\n";
return -1;
}
absdiff(image, image2, image3);
threshold(image3, image4, 128, 255, THRESH_BINARY);
imshow("window", image);
int k = waitKey(30) & 0xff;
if('q' == k || 27 == k)
break;
}
I'm having trouble using the filterByColor functionality in the SimpleBlobDetector tool that ships with OpenCV. Make doesn't give me any errors, but when I try to run the program, it segfaults at blobme.detect().
It works fine when I use filterByArea, it's just filterByColor that's giving me headaches.
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#define ACTIVE_CHANNEL 2
int main(int argc, char* argv[])
{
if (argc != 3)
{
std::cout << "./image_proc <file> <thresh> (-1 for default)" << std::endl;
return -1;
}
cv::Mat test_im = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);
cv::Mat hsvim, outim, channels[3], descriptor;
std::vector<cv::KeyPoint> keypoints;
// Convert to HSV
cv::cvtColor(test_im, hsvim, CV_RGB2HSV);
cv::split(hsvim, channels);
cv::SimpleBlobDetector::Params params;
params.filterByInertia = false;
params.filterByConvexity = false;
params.filterByColor = true;
params.filterByCircularity = false;
params.filterByArea = false;
params.blobColor = 255;
//params.minArea = 100.0f;
//params.maxArea = 500.0f;
// Trying to use blob detector
cv::SimpleBlobDetector blobme(params);
blobme.detect(channels[ACTIVE_CHANNEL], keypoints);
// Print keypoints
cv::drawKeypoints(channels[ACTIVE_CHANNEL], keypoints, outim);
// Display
cv::namedWindow("Display window", cv::WINDOW_AUTOSIZE);
cv::imwrite("imout.jpg", outim);
cv::imshow("Display window", outim);
cv::waitKey(0);
return 0;
}