I first used this code to capture webcam frames:
int main(int argc, char** argv)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
cap.set(CV_CAP_PROP_FPS, 15);
std::vector<cv::Mat> images(100);
for (framenumb = 0; framenumb < 100; ++framenumb)
{
// this is optional, preallocation so there's no allocation
// during capture
images[framenumb].create(480, 640, CV_32FC3);
}
for (framenumb = 0; framenumb < 100; ++framenumb)
{
Mat frame;
cap >> frame;
if (frame.empty()) break; // end of video stream
imshow("webcam", frame);
if (waitKey(1) == 27) break; // stop capturing by pressing ESC
frame.copyTo(images[framenumb]);
}
and then tried to use the following code to average the captured frames:
Mat avgImg;
Mat capturedImg;
for (framenumb = 0; framenumb < 100; ++framenumb)
{
avgImg.create(480, 640, CV_32FC3);
capturedImg = images[framenumb];
cv::accumulate(capturedImg, avgImg);
}
avgImg = avgImg / 100;
avgImg.convertTo(avgImg, CV_8UC3);
imshow("averaged", avgImg);
But then it just broke the programme and gave me a black image. Can anyone help me to identify where the error is? Thank you very much
You need to:
Create a zero initialized accumulation image
Add every image to it
Divide the accumulated image by the number of images
You instead re-create an empty accumulation image at every frame.
The code should be changed as:
Mat avgImg(480, 640, CV_32FC3, Scalar()); // Create and zero initialize
Mat capturedImg;
for (framenumb = 0; framenumb < 100; ++framenumb)
{
// avgImg.create(480, 640, CV_32FC3); // Don't create each time!
capturedImg = images[framenumb];
cv::accumulate(capturedImg, avgImg);
}
You probably can simplify your code as:
Mat avgImg(480, 640, CV_32FC3, Scalar()); // Create and zero initialize
for (framenumb = 0; framenumb < 100; ++framenumb)
{
avgImg += images[framenumb];
}
Related
So I am trying to edit all the frames from input video and than save them to the output. The saved video appears and has the proper filesize but can't open and the imshow inside the while doesn't show the frames at all (only a blank window). I've tried numerous codecs (H264,MJPG,MPEG,MPV4,XVID,CVID) and non of them make it work. What could be the problem? Also, the first function is for converting RGB to YUV and returning the gray channel, while the second one works with HPF and LPF. These functions work on individual images so I don't think they are the problem.
#include <opencv2\opencv.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
Mat KonverzijaRGB2YUV(Mat ulazniFrejm){
int width = ulazniFrejm.cols;
int height = ulazniFrejm.rows;
Mat konvertovan(height, width, CV_8UC3, Scalar(0,0,0));
for(int i=0; i<width; i++){
for(int j=0; j<height; j++){
double R = ulazniFrejm.at<Vec3b>(j,i).val[0];
double G = ulazniFrejm.at<Vec3b>(j,i).val[1];
double B = ulazniFrejm.at<Vec3b>(j,i).val[2];
double Y = 0.299*R + 0.587*G + 0.114*B;
double U = 0.436*(B-Y) / (1-0.114);
double V = 0.615*(R-Y) / (1-0.299);
konvertovan.at<Vec3b>(j,i).val[0] = Y;
konvertovan.at<Vec3b>(j,i).val[1] = U;
konvertovan.at<Vec3b>(j,i).val[2] = V;
}
}
Mat Ykanal[3];
split (konvertovan, Ykanal);
Mat YkanalRGB;
cvtColor(Ykanal[0], YkanalRGB, CV_GRAY2RGB);
return YkanalRGB;
}
Mat Izostravanje(Mat ulazniFrejm){
Mat izostrena;
float valLaplas[] =
{ 0.,-1.,0.,
-1.,4.,-1.,
0.,-1.,0. };
Mat kernelLaplas(Size(3,3), CV_32FC1, valLaplas);
Mat Laplas;
filter2D(ulazniFrejm, Laplas, -1, kernelLaplas);
convertScaleAbs(Laplas, Laplas);
Mat Gaus;
GaussianBlur(ulazniFrejm, Gaus, Size(3,3), 0, 0);
addWeighted(Laplas, 1.4, Gaus, 1.0, 0, izostrena);
return izostrena;
}
int main(int argc, char** argv){
VideoCapture ulazniVideo("esmeralda.mp4");
int brojFrejmova = static_cast<int>(ulazniVideo.get(CV_CAP_PROP_FRAME_COUNT));
if(!ulazniVideo.isOpened()){
cout << "Video se ne moze otvoriti" << endl;
}
waitKey(1000);
Size frameSize = Size((int)ulazniVideo.get(CV_CAP_PROP_FRAME_WIDTH), (int)ulazniVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
int fps = ulazniVideo.get(CV_CAP_PROP_FPS);
int ex = -1;
VideoWriter izlazniVideo;
izlazniVideo.open("esmeralda.mp4", ex, fps, frameSize, true);
int brojac = 0;
while(true){
Mat frame;
ulazniVideo >> frame;
Mat konvertovanFrejm;
konvertovanFrejm = KonverzijaRGB2YUV(frame);
Mat izostrenFrejm;
izostrenFrejm = Izostravanje(konvertovanFrejm);
if(frame.empty()){
break;
}
izlazniVideo << frame;
brojac++;
namedWindow("Video", CV_WINDOW_AUTOSIZE);
imshow("Video", izostrenFrejm);
int waitKey(1000/fps);
}
waitKey(0);
return 0;
}
in below code i want to apply temp_rec filter on movies frame.
But i got the error assertion failed.
with video_line_extractor function extract a time series of pixels in a row
then in the main i apply my temporal filter to all of the row of the image.
I know it is a inefficient native code but i can not use of opencv internal
function instead of my temp_rec function.
int width_frame, heigth_frame, num_frames;
VideoCapture cap;
void temp_rec(float* in, float* out, int w, float tau) {
float yp;
float tauo = 1 / tau;
for (int i = 0; i<w;i++) {
if (i == 0) {
*out = tauo*(*in);
}
else {
out -= 1;
yp = *(out);
out += 1;
*(out) = tauo*(*(in)) + ((exp(-tauo))*yp);
}
out += 1;
in += 1;//prepare for calculate next element
}
}
Mat video_line_extractor(int whichrow) {//extract a row of frames
// Create a VideoCapture object and open the input file
// If the input is the web camera, pass 0 instead of the video file name
cap = VideoCapture("a valid path");
int frame_counter = 0;
Mat M_temp = Mat::eye(width_frame, num_frames, CV_8UC3);
// Check if camera opened successfully
if (!cap.isOpened()) {
cout << "Error opening video stream or file :(" << endl;
}
while (frame_counter<num_frames) {
Mat frame;
// Capture frame-by-frame
cap >> frame;
// If the frame is empty, break immediately
if (frame.empty())
break;
cvtColor(frame, frame, CV_BGR2GRAY);
for(int j=0;j<width_frame;j++) //we in above first extract a movie frame that as we know it is a image
//in this 'for' we extract that row data with a pointer to one of the row in th M_temp
M_temp.at<uchar>(frame_counter , j) = frame.ptr<uchar>(whichrow)[j];
frame_counter++;
}
// When everything done, release the video capture object
cap.release();
return M_temp;
}
int main() {
cap=VideoCapture("a valid path");
width_frame = cap.get(CV_CAP_PROP_FRAME_WIDTH);
heigth_frame = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
num_frames = cap.get(CV_CAP_PROP_FRAME_COUNT);
Mat image= Mat::zeros(width_frame, num_frames, CV_8UC3);//initalize input
Mat image_o = Mat::zeros(width_frame, num_frames, CV_8UC3);//initialize output
for (int jj = 0;jj < heigth_frame;jj++) {
image = video_line_extractor(jj);
for (int j = 0;j < width_frame;j++)
temp_rec((float *)image.ptr<uchar>(j), (float *)image_o.ptr<uchar>(j), num_frames, 10);
}
imshow("Image", image);
waitKey(0);
return 0;
}
edit: assertion error
https://i.stack.imgur.com/BGGu0.png
int width_frame, heigth_frame, num_frames;
VideoCapture cap;
void temp_rec(float* in, float* out, int w, float tau) {
float yp;
float tauo = 1 / tau;
for (int i = 0; i<w;i++) {
if (i == 0) {
*out = tauo*(*in);
}
else {
out -= 1;
yp = *(out);
out += 1;
*(out) = tauo*(*(in)) + ((exp(-tauo))*yp);
}
out += 1;
in += 1;//prepare for calculate next element
}
}
Mat video_line_extractor(int whichrow) {//extract a row of frames
// Create a VideoCapture object and open the input file
// If the input is the web camera, pass 0 instead of the video file name
cap = VideoCapture("a valid path");
int frame_counter = 0;
Mat M_temp = Mat::eye(width_frame, num_frames, CV_8UC3);
// Check if camera opened successfully
if (!cap.isOpened()) {
cout << "Error opening video stream or file :(" << endl;
}
while (frame_counter<num_frames) {
Mat frame;
// Capture frame-by-frame
cap >> frame;
// If the frame is empty, break immediately
if (frame.empty())
break;
cvtColor(frame, frame, CV_BGR2GRAY);
for(int j=0;j<width_frame;j++) //we in above first extract a movie frame that as we know it is a image
//in this 'for' we extract that row data with a pointer to one of the row in th M_temp
M_temp.at<uchar>(frame_counter , j) = frame.ptr<uchar>(whichrow)[j];
frame_counter++;
}
// When everything done, release the video capture object
cap.release();
return M_temp;
}
int main() {
cap=VideoCapture("a valid path");
width_frame = cap.get(CV_CAP_PROP_FRAME_WIDTH);
heigth_frame = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
num_frames = cap.get(CV_CAP_PROP_FRAME_COUNT);
Mat image= Mat::zeros(width_frame, num_frames, CV_8UC3);//initalize input
Mat image_o = Mat::zeros(width_frame, num_frames, CV_32FC3);//initialize output
for (int jj = 0;jj < heigth_frame;jj++) {
image = video_line_extractor(jj);
for (int j = 0;j < width_frame;j++)
temp_rec((float *)image.ptr<uchar>(j), (float *)image_o.ptr<uchar>(j), num_frames, 10);
}
imshow("Image", image);
waitKey(0);
return 0;
}
For now I have:
Mat avgImg(480, 640, CV_32FC3, Scalar()); // Create and zero initialize
Mat capturedImg;
for (framenumb = 0; framenumb < 10; ++framenumb)
{
// avgImg.create(480, 640, CV_32FC3); // Don't create each time!
capturedImg = images[framenumb];
cv::accumulate(capturedImg, avgImg);
}
avgImg = avgImg / 100;
avgImg.convertTo(avgImg, CV_8UC3);
imshow("averaged", avgImg);
How can I make it so that the forloop values will increase every time it finishes looping. For example, after 10 images are accumulated and averaged, the loop will then automatically change to for (framenumb = 10; framenumb < 20; ++framenumb)to take the next 10 images and so on.
Something as simple as:
for(int i = 0; i < NUM; i++) {
Mat avgImg(480, 640, CV_32FC3, Scalar()); // Create and zero initialize
Mat capturedImg;
for (framenumb = 10 * i; framenumb < (10 * i) + 10; ++framenumb) {
// avgImg.create(480, 640, CV_32FC3); // Don't create each time!
capturedImg = images[framenumb];
cv::accumulate(capturedImg, avgImg);
}
avgImg = avgImg / 100;
avgImg.convertTo(avgImg, CV_8UC3);
imshow("averaged", avgImg);
}
Where NUM is the number of images divided by 10.
I would like to sharpen my image using OpenCV and I have looked at an example online which is performing the sharpening on a grayscale image and I tried it and it is working perfectly. However, I am now trying to do the same but with RGB colors so I am performing the same functionality on the three channel separately, but it is not giving me any result the image is exactly as the original image.
#include "Sharpening.h"
using namespace std;
Sharpening::Sharpening() {
}
Sharpening::~Sharpening() {
}
IplImage* Sharpening::laplace(IplImage* channel) {
CvSize size = cvSize(channel->width, channel->height);
IplImage* temp = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* lapl = cvCreateImage(size, IPL_DEPTH_8U, 1);
int width = size.width;
int height = size.height;
cvConvertScale(channel, temp, 1.0);
CvMat* ker = cvCreateMat(3, 3, CV_32FC1);
cvSet(ker, cvScalarAll(-1.0));
cvSet2D(ker, 1, 1, cvScalarAll(15.0));
cout << "this is been executed";
cvFilter2D(temp, lapl, ker);
cvReleaseMat(&ker);
double maxv = 0.0;
float maxFloat = 1.79769e+308;
double minv = maxFloat;
cvMinMaxLoc(lapl, &minv, &maxv);
for (int i = 0; i < width * height; i++) {
double lap_val = cvGet1D(lapl, i).val[0];
int v = (int) ((255.0 * lap_val / maxv) + 0.5); // this calculation does nothing particularly
cvSet1D(temp, i, cvScalarAll(v));
}
maxv = 0.0;
cvMinMaxLoc(channel, &minv, &maxv);
for (int i = 0; i < width * height; i++) {
double val = cvGet1D(channel, i).val[0];
int v = (int) ((255.0 * val / maxv) + 0.5);
cvSet1D(channel, i, cvScalarAll(v));
}
cvReleaseImage(&temp);
cvReleaseImage(&lapl);
cvReleaseMat(&ker);
return channel;
} // end of function
int Sharpening::calculateLoop(int number) {
int value = 2;
for (int i = 0; i < 10; i++) {
number = number * value;
cout << number << endl;
}
return number;
}
//======================================================================================
int Sharpening::SharpenColored(Sharpening sharp) {
int key = 0;
CvCapture *capture = 0;
IplImage* frame = 0;
cvNamedWindow("deblur", CV_WINDOW_AUTOSIZE);
cvNamedWindow("deblur2", CV_WINDOW_AUTOSIZE);
cvNamedWindow("origional", CV_WINDOW_AUTOSIZE);
// initialize camera
capture = cvCaptureFromCAM(0); //capture from a camera
//capture = cvCaptureFromAVI("jabelH2.avi");
//frame = cvQueryFrame(capture);
if (!cvGrabFrame(capture)) { // capture a frame
printf("Could not grab a frame\n\7");
exit(0);
}
frame = cvQueryFrame(capture);
CvSize imageSize1 = cvSize(frame->width, frame->height);
IplImage* R = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);
IplImage* G = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);
IplImage* B = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);
IplImage* R2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);
IplImage* G2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);
IplImage* B2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);
IplImage* source = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);
IplImage* result = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);
IplImage* result2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);
QFuture<IplImage*> future1;
QFuture<IplImage*> future2;
QFuture<IplImage*> future3;
while (key != 'q') {
// get a frame
frame = cvQueryFrame(capture);
// always check
if (!frame)
break;
source = frame;
cvSplit(frame, B, G, R, NULL);
future1 = QtConcurrent::run(sharp, &Sharpening::laplace, R);
future2 = QtConcurrent::run(sharp, &Sharpening::laplace, G);
future3 = QtConcurrent::run(sharp, &Sharpening::laplace, B);
R2 = future1.result();
G2 = future2.result();
B2 = future3.result();
cvMerge(B2, G2, R2, NULL, result);
cvAdd(source, result, result2, NULL);
cvShowImage("origional", source);
cvShowImage("deblur", R2);
cvShowImage("deblur2", G2);
key = cvWaitKey(1);
} //end of while
cvDestroyWindow("deblur");
cvDestroyWindow("deblur2");
cvDestroyWindow("origional");
cvReleaseImage(&R);
cvReleaseImage(&source);
cvReleaseImage(&R2);
cvReleaseImage(&G);
cvReleaseImage(&G2);
cvReleaseImage(&B);
cvReleaseImage(&B2);
cvReleaseImage(&result);
cvReleaseImage(&result2);
cvReleaseCapture(&capture);
delete future1;
delete future2;
delete future3;
return 0;
} //end of function
//======================================================================================
int main(int argc, char *argv[]) {
Sharpening sh;
sh.SharpenColored(sh);
}
I am now trying to do the same but with RGB colors so I am performing
the same functionality
Check your assumptions ! I don't think you are doing the same. I don't know openCv
very well, but your not doing anything with your temp image, so there is no reason for the channel image to change ! You should set the result of the temp image in the original image, something like this perhaps :
for (int i = 0; i < width * height; i++) {
double lap_val = cvGet1D(lapl, i).val[0]; // get modified image data
int v = (int) ((255.0 * lap_val / maxv) + 0.5); // scale to 0 255
cvSet1D(channel, i, cvScalarAll(v)); // store in original image
}
Or you can take the original code, and comment it to explain what each cvImage contains, and see what you have missed when reusing it in your app.
I would like to access pixels in RGB with OpenCV 2.3.
I'm trying like this but it's like every pixels are equal frame after frame because I got no output. Images are from my webcam and I can see them.
Btw RED = 0;
THX
Mat frame;
Mat oldFrame;
VideoCapture cap(0);
cap >> oldFrame;
sumFramePix = oldFrame.cols * oldFrame.rows;
nbChannels = oldFrame.channels();
cout << "NbcHANNELs : " << nbChannels << endl;
imshow("Video 1", oldFrame);
while(1)
{
cap >> frame;
imshow("Video 1", frame);
for(int i=0; i<frame.rows; i++)
{
for(int j=0; j<frame.cols; j++)
{
if (frame.ptr<uchar>(i)[nbChannels*j+RED] < oldFrame.ptr<uchar>(i)[nbChannels*j+RED])
{
cout << "==============-";
}
}
}
oldFrame = frame;
if(waitKey(300) >= 0) break;
}
Change
oldFrame = frame;
to
oldFrame = frame.clone();
You are creating two Mat objects that point to the same data. clone() makes a deep copy.