I am trying to grab multiple frames from 2 USB cameras using threads. Issue is i need to push frames into frame_queue vector and then pop one frame at a time from the vector for further image processing.
I followed this link
https://putuyuwono.wordpress.com/2015/05/29/multi-thread-multi-camera-capture-using-opencv/
The code in this link has used concurrent_queue.h for queuing up Mat frames but instead i want to use vector Mat to store the incoming frames.
Header file CameraStreamer.h:
#include <iostream>
#include <string>
#include <thread>
#include <vector>
//#include <concurrent_queue.h>
#include <boost/thread.hpp>
#include "opencv2/videoio.hpp"
using namespace std;
using namespace cv;
//using namespace concurrency;
class CameraStreamer{
public:
//this holds camera stream urls
vector<string> camera_source;
//this holds usb camera indices
vector<int> camera_index;
//this holds OpenCV VideoCapture pointers
vector<VideoCapture*> camera_capture;
//this holds queue(s) which hold images from each camera
vector<Mat*> frame_queue;
//this holds thread(s) which run the camera capture process
vector<thread*> camera_thread;
//Constructor for IP Camera capture
CameraStreamer(vector<string> source);
//Constructor for USB Camera capture
CameraStreamer(vector<int> index);
//Destructor for releasing resource(s)
~CameraStreamer();
private:
bool isUSBCamera;
int camera_count;
//initialize and start the camera capturing process(es)
void startMultiCapture();
//release all camera capture resource(s)
void stopMultiCapture();
//main camera capturing process which will be done by the thread(s)
void captureFrame(int index);
};
Here is my CameraStreamer.cpp:
#include "camerastreamer.h"
CameraStreamer::CameraStreamer(vector<string> stream_source)
{
camera_source = stream_source;
camera_count = camera_source.size();
isUSBCamera = false;
startMultiCapture();
}
CameraStreamer::CameraStreamer(vector<int> capture_index)
{
camera_index = capture_index;
camera_count = capture_index.size();
isUSBCamera = true;
startMultiCapture();
}
CameraStreamer::~CameraStreamer()
{
stopMultiCapture();
}
void CameraStreamer::captureFrame(int index)
{
VideoCapture *capture = camera_capture[index];
while (true)
{
Mat frame;
//Grab frame from camera capture
(*capture) >> frame;
//Put frame to the queue
frame_queue[index]->push(frame);
//relase frame resource
frame.release();
}
}
void CameraStreamer::startMultiCapture()
{
VideoCapture *capture;
thread *t;
vector<Mat> *q;
for (int i = 0; i < camera_count; i++)
{
//Make VideoCapture instance
if (!isUSBCamera){
string url = camera_source[i];
capture = new VideoCapture(url);
cout << "Camera Setup: " << url << endl;
}
else{
int idx = camera_index[i];
capture = new VideoCapture(idx);
cout << "Camera Setup: " << to_string(idx) << endl;
}
//Put VideoCapture to the vector
camera_capture.push_back(capture);
//Make thread instance
t = new thread(&CameraStreamer::captureFrame, this, i);
//Put thread to the vector
camera_thread.push_back(t);
//Make a queue instance
q = new vector<Mat>;
//Put queue to the vector
frame_queue.push_back(q);
}
}
void CameraStreamer::stopMultiCapture()
{
VideoCapture *cap;
for (int i = 0; i < camera_count; i++)
{
cap = camera_capture[i];
if (cap->isOpened()){
//Relase VideoCapture resource
cap->release();
cout << "Capture " << i << " released" << endl;
}
}
}
Main.cpp:
int main()
{
//IP camera URLs
vector<string> capture_source = {
"rtsp://192.168.2.100/profile2/media.smp",
"rtsp://192.168.0.100/profile2/media.smp"
};
//USB Camera indices
vector<int> capture_index = { 0, 1 };
//Highgui window titles
vector<string> label;
for (size_t i = 0; i < capture_index.size(); i++)
{
string title = "CCTV " + to_string(i);
label.push_back(title);
}
//Make an instance of CameraStreamer
CameraStreamer cam(capture_index);
while (waitKey(20) != 27)
{
//Retrieve frames from each camera capture thread
for (size_t i = 0; i < capture_index.size(); i++)
{
Mat *frame;
//Pop frame from queue and check if the frame is valid
cam.frame_queue[i].pushback(frame);
//Show frame on Highgui window
imshow(label[i], frame);
}
}
}
i tried using
cam.frame_queue[i].pushback(frame);
but it doesn't work.
So i am not sure how do i push and pop a frame into frame_queue vector with an index.
Related
I have a function that is executing by std::thread. I want it works until the user closes the terminal that running roscore by pressing Ctrl+C. Because of that I use this inside the thread:
void publish_camera_on_topic(std::vector<Camera> cameras, const std::vector<ros::Publisher> publishers, const int camera_index)
{
int frameSize;
BYTE *imagePtr;
// frame id
int frame_id = 0;
cv_bridge::CvImage img_bridge;
sensor_msgs::Image img_msg;
while (ros::ok()) {
// Grab and display a single image from each camera
imagePtr = cameras[camera_index].getRawImage();
frameSize = cameras[camera_index].getFrameSize();
cameras[camera_index].createRGBImage(imagePtr,frameSize);
unsigned char* pImage = cameras[camera_index].getImage();
if (NULL != pImage) {
Mat image(cameras[camera_index].getMatSize(), CV_8UC3, pImage, Mat::AUTO_STEP);
// release asap
cameras[camera_index].releaseImage();
//cvtColor(image, image, CV_BGR2RGB,3);
// publish on ROS topic
std_msgs::Header header; // empty header
header.seq = frame_id; // user defined counter
header.stamp = ros::Time::now(); // time
img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::RGB8, image);
img_bridge.toImageMsg(img_msg); // from cv_bridge to sensor_msgs::Image
publishers[camera_index].publish(img_msg); // ros::Publisher pub_img = node.advertise<sensor_msgs::Image>("topic", queuesize);
}
// increase frame Id
frame_id = frame_id + 1;
}
std::cout << "ROS closing for thread of camera " << camera_index << " recieved." << std::endl;
}
Also, I create thread like this:
// image publisher
// for each camera create an publisher
std::vector<ros::Publisher> publishers;
for (size_t i = 0; i < cameras.size(); i++) {
char topic_name[200];
sprintf(topic_name, "/lumenera_camera_package/%d", i + 1);
publishers.push_back(nh.advertise<sensor_msgs::Image>(topic_name, 10));
}
// work with each camera on a seprate thread
std::vector<std::thread> thread_vector;
for(size_t i=0; i < cameras.size(); i++) {
thread_vector.push_back(std::thread(publish_camera_on_topic, cameras, publishers, i));
}
ros::spin();
std::for_each(thread_vector.begin(), thread_vector.end(), [](std::thread &t){t.join(); });
for(size_t i=0; i < cameras.size(); i++) {
cameras[i].stopStreaming();
}
ROS_INFO("Node: [lumenera_camera_node] has been Ended.");
However, when I press Ctrl+C in the terminal and stop the roscore, the threads keep running, and the value of ros::ok() does not change.
The problem is solved. The issue is ros::ok() does not check for ROS master. Instead of this line:
while (ros::ok()) { //do sth}
This line should be used:
while (ros::ok() && ros::master::check()) { // do sth}
I have this function reading negative images from a dataset (approx 122000)
void load_images(const String & dirname, vector< Mat > & img_lst, bool showImages = false)
{
vector< String > files;
glob(dirname, files);
for (size_t i = 0; i < files.size(); ++i)
{
Mat img = imread(files[i]); // preia imagine
if (img.empty()) // treci peste daca este imagine invalida
{
cout << files[i] << " is invalid!" << endl;
continue;
}
if (showImages)
{
imshow("image", img);
waitKey(1);
}
img_lst.push_back(img);
}
}
and takes a lot of time to process and sometimes it gets blocked.
How can I optimize it and make it in parallel?
I changed your code a little to use OpenMP to parallelise the loading - the actual changes are minimal - I just put an OpenMP pragma before the for loop and serialised access to the vector of images since vectors are not thread safe.
#include <iostream>
#include <vector>
#include <mutex>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
void load_images(int start,int end){
vector<Mat>img_lst;
mutex mtx;
#pragma omp parallel for
for(size_t i=start;i<=end;i++){
char filename[16];
sprintf(filename,"%d.jpg",i);
Mat img = imread(filename);
if (img.empty()){
cerr << "ERROR: Failed to load " << filename << endl;
}
mtx.lock();
img_lst.push_back(img);
mtx.unlock();
}
mtx.lock();
cout << "INFO: Loaded " << img_lst.size() << endl;
mtx.unlock();
}
int
main(int argc,char*argv[])
{
load_images(1,122000);
}
You control the number of threads like this:
export OMP_NUM_THREADS=2
time ./main
The times for loading 122,000 images varied according to the number of threads I used according to the following table:
Threads Time (s)
================
1 44
2 23
4 12.4
8 8.8
I then decided that, if you are doing this often enough to care, you would maybe want to pay a small price up front to improve the times yet further. So, instead of doing all that CPU-intensive code to decompress JPEGs, you might want to convert your images once, to a more simply read format - such as PNM. So, I converted all the JPEG images to PNM using GNU Parallel and then loaded the PNM images instead:
So that looks like:
seq 122000 | parallel convert {}.jpg {}.pnm
And the code is:
...
...
#pragma omp parallel for
for(size_t i=start;i<=end;i++){
char filename[16];
sprintf(filename,"%d.pnm",i); <--- ONLY LINE CHANGED
Mat img = imread(filename);
...
...
And you can see the times are considerably shorter:
Nthreads Time(s)
================
1 7
2 4
4 2.5
8 3.2
To compile with OpenMP, use:
g++ -fopenmp =O3 -march native ...
You can try this
class parReader
{
public:
parReader(std::string dirname, std::vector< cv::Mat > & lst);
private:
size_t filesIdx;
HANDLE hFilesMux,hImgListMux;
std::vector<cv::String> files;
std::vector<cv::Mat> img_lst;
static void readImgs(parReader *nm);
const char *getNext();
void push_back(cv::Mat &img);
};
parReader::parReader(std::string dirname, std::vector<cv::Mat> & lst) :img_lst(lst), filesIdx(0),hFilesMux(NULL),hImgListMux(NULL)
{
hFilesMux = CreateMutex(NULL, 0, NULL);
hImgListMux = CreateMutex(NULL, 0, NULL);
cv::glob(dirname, files);
std::thread pr1(readImgs, this);
std::thread pr2(readImgs, this);
std::thread pr3(readImgs, this);
std::thread pr4(readImgs, this);
pr1.join();
pr2.join();
pr3.join();
pr4.join();
CloseHandle(hFilesMux);
CloseHandle(hImgListMux);
}
const char *parReader::getNext()
{
const char *res = NULL;
WaitForSingleObject(hFilesMux, INFINITE);
if (filesIdx < files.size())
res = files[filesIdx++].c_str();
ReleaseMutex(hFilesMux);
return res;
}
void parReader::push_back(cv::Mat &img)
{
WaitForSingleObject(hImgListMux, INFINITE);
img_lst.push_back(img);
ReleaseMutex(hImgListMux);
}
void parReader::readImgs(parReader *nm)
{
while (true)
{
const char *fn = nm->getNext();
if (fn == NULL) break;
cv::Mat img = cv::imread(fn);
if (img.empty()) // treci peste daca este imagine invalida
{
std::cout << fn << " is invalid!" << std::endl;
continue;
}
nm->push_back(img);
}
}
int main()
{
std::vector<cv::Mat> imgList;
parReader mgr("*.png",imgList);
}
tested it briefly but it should work reading images with 4 threads.
I need to make small video player with OpenCV, which have to support the following functionality. Key 'p' on keyboard - pause/unpause, 'q' - exit, left and right arrow keys - play video frame by frame straight and reverse when it is paused. So the problem is when I try to show video with high quality and I hold arrow key for several seconds it does not run, but freeze and then jump to current frame after I release key. I tried to fix this with adding this_thread::sleep after cv::imshow() to give time to draw, but it did not help at all. So here is the code. Also, I have some reasons to use boost instead of C++11, so it is ok.
main.cpp
#include "VideoPlayer.hpp"
#include <iostream>
int main(int argc, char *argv[])
{
if (argc < 2) {
std::cerr << "Video file full name required as argument." << std::endl;
}
VideoPlayer vp(argv[1]);
if (!vp.play())
return 1;
return 0;
}
VideoPlayer.hpp
#pragma once
#include <opencv/cxcore.hpp>
#include <opencv/highgui.h>
#include <string>
class VideoPlayer
{
public:
VideoPlayer(const std::string &video, const std::string &windowName = "Output window",
unsigned int delay = 30);
bool play();
private:
cv::VideoCapture videoCapture_;
std::string windowName_;
unsigned int delay_;
private:
bool processKey(int key);
void setFrame(int frameNum);
};
VideoPlayer.cpp
#include "VideoPlayer.hpp"
#include <iostream>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/thread/thread.hpp>
VideoPlayer::VideoPlayer(const std::string &video, const std::string &windowName,
unsigned int delay)
: videoCapture_(video)
, windowName_(windowName)
, delay_(delay)
{}
bool VideoPlayer::play()
{
if (!videoCapture_.isOpened()) {
std::cerr << "Unable to open video." << std::endl;
return false;
}
cv::namedWindow(windowName_);
for (;;) {
cv::Mat frame;
videoCapture_ >> frame;
cv::imshow(windowName_, frame);
int key = cv::waitKey(delay_);
if (processKey(key))
break;
}
return true;
}
bool VideoPlayer::processKey(int key)
{
if (key == 'p') {
for (;;) {
int pausedKey = cv::waitKey(0);
if (pausedKey == 'p') {
break;
} else if (pausedKey == 'q') {
return true;
} else if (pausedKey == 65363) {
int frameNum = videoCapture_.get(CV_CAP_PROP_POS_FRAMES);
setFrame(frameNum);
} else if (pausedKey == 65361) {
int frameNum = videoCapture_.get(CV_CAP_PROP_POS_FRAMES);
setFrame(frameNum - 2);
}
}
} else if (key == 'q') {
return true;
}
return false;
}
void VideoPlayer::setFrame(int frameNum)
{
if (frameNum > 0 && frameNum < videoCapture_.get(CV_CAP_PROP_FRAME_COUNT)) {
std::cerr << frameNum << std::endl;
videoCapture_.set(CV_CAP_PROP_POS_FRAMES, frameNum);
cv::Mat frame;
videoCapture_ >> frame;
cv::imshow(windowName_, frame);
boost::this_thread::sleep(boost::posix_time::milliseconds(10));
}
}
I also created a multithreading implementation with buffer based on std::queue with lock, but it didn't solve the problem. And I tried to use boost::lockfree::queue, but I could not manage to finish it because of some strange behavior. I will share this code later, if it is necessary.
So, if somebody knows some good practice, how to avoid this problem, help me please.
EDIT:
Replacing boost::this_thread::sleep(boost::posix_time::milliseconds(10)); with cv::waitKey(0) is bad, because is makes me to do two short presses on arrow key to change one frame, and it does not help, because holding key skips it very fast. So the following code helped, but it is too strange and it is necessary to select times each for each video.
void VideoPlayer::setFrame(int frameNum)
{
if (frameNum > 0 && frameNum < videoCapture_.get(CV_CAP_PROP_FRAME_COUNT)) {
std::cerr << frameNum << std::endl;
videoCapture_.set(CV_CAP_PROP_POS_FRAMES, frameNum);
cv::Mat frame;
videoCapture_ >> frame;
cv::imshow(windowName_, frame);
int times = 7;
for (int i = 0; i < times; i++)
cv::waitKey(10);
}
}
Also, I'm unable to use Qt or something else, only C++03 with boost and OpenCV.
I think I need some trick to make cv::waitKey(time) wait fortime whether any key pressed or not.
Good afternoon to all!
I've been using Visual Studio 2010 with OpenCV to develop a code for Face recognition. I'm trying to reach the task by two Threads (I need to do it this way, because i´m going to apply it on a bigger project), one (the main) to show the frames and the second to capture (from the Webcam of my laptop) and store the frames on a Mat object (Fast Capture).
The inconvenient here is that the second Thread is capturing the frames, but the main is not showing them. I think there is a problem with copying the Mat from the capture Thread to the Mat on the main thread ("current_frame" seems to be empty after I do the assignation)
Here is the code (I'm using Boost::Thread for Multithreading)
New code with suggestions
Global declarations
#include <iostream>
#include <stdio.h>
#include <boost\thread.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
boost::mutex mtx;
The function
void camCapture(VideoCapture& cap, Mat& frame, bool* Capture)
{
while (*Capture==true)
{
mtx.lock();
cap >> frame;
mtx.unlock();
if(frame.empty())
{
cout<<"No hay captura"<<endl;
}
else
{
cout<<"Frame capturado"<<endl;
}
}cout << "camCapture finished\n";
return;}
The main
int main() {
try{
VideoCapture cap(0); // open the default camera
Mat frame,current_frame, SFI, Input;
bool *Capture = new bool;
*Capture = true;
if (!cap.isOpened()) // check if we succeeded
return -1;
//your capture thread has started
boost::thread captureThread(camCapture, cap, frame, Capture);
while(1)
{
if(frame.empty())
{
cout<<"Frame en hilo principal vacio"<<endl;
}
else{
cout<<"Frame en hilo principal capturado"<<endl;
}
mtx.lock();
current_frame = frame.clone();
mtx.unlock();
if(current_frame.empty())
{
cout<<"Current_Frame vacio"<<endl;
}
else{
imshow("Streaming",current_frame);
if(waitKey(10)==27)break;
}
}
//Terminate the thread
captureThread.join();
}
catch(Exception & e)
{
cout<<e.what()<<endl;
}
return 0;}
according to Boost threads - passing parameters by reference you have to use boost::ref(v) if you want to pass a variable by reference to a boost::thread function. But you could use pointers instead.
In addition you should share the mutex to the thread by passing it as pointer or reference variable instead of using it as global:
#include <iostream>
#include <stdio.h>
#include <boost/thread.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
boost::mutex mtx;
void camCapture(VideoCapture& cap, Mat& frame, bool* Capture)
{
while (*Capture == true)
{
mtx.lock();
cap >> frame;
mtx.unlock();
if (frame.empty())
{
cout << "No hay captura" << endl;
}
else
{
cout << "Frame capturado" << endl;
}
}cout << "camCapture finished\n";
return;
}
int main() {
try{
VideoCapture cap(0); // open the default camera
Mat frame, current_frame, SFI, Input;
bool *Capture = new bool; // better not use a pointer here, but use a bool and pass the address or by reference.
*Capture = true;
if (!cap.isOpened()) // check if we succeeded
return -1;
//your capture thread has started
boost::thread captureThread(camCapture, boost::ref(cap), boost::ref(frame), Capture);
while (1)
{
mtx.lock();
current_frame = frame.clone();
mtx.unlock();
if (current_frame.empty())
{
cout << "Current_Frame vacio" << endl;
}
else{
imshow("Streaming", current_frame);
if (waitKey(10) == 27)
{
// TODO: you should use a mutex (or an atomic type) here, too, maybe setting a bool is thread-safe, but this can't be guaranteed for each hardware!
*Capture = false;
break;
}
}
}
//Terminate the thread
captureThread.join();
// give memory free:
delete Capture;
}
catch (Exception & e)
{
cout << e.what() << endl;
}
return 0;
}
I am using opencv c++ on Mac OS X 10.10.2 to process video frames and display them. The performance of imshow with waitKey to display the video is extremely slow.
I have the following code which displays HD (1920x1080) grayscale frames correctly, except that it runs about 10 times too slow (i.e. 2 to 3 frames per second instead of 30 frames per second).
cv::Mat framebuf[TEST_COUNT];
//--- Code here to allocate and fill the frame buffer with about 4 seconds of video. This part works correctly.
//--- This loop runs too slow by factor of approximately 10x
for (int f = 0; f < TEST_COUNT; f++)
{
cv::imshow(windowName, framebuf[f]);
cv::waitKey(33);
}
Can anyone suggest how to get real-time or near real-time performance from opencv imshow()? I have seen many posts that state that they are displaying video in real-time or even faster than real-time, so I am not sure what I am doing wrong. Any help would be greatly appreciated.
I could be wrong but for me the problem is not with your code, but with your os/configuration. I've written a small test:
import cv2
import numpy as np
from random import randrange
img = np.zeros((1920, 1080), dtype = np.uint8)
counter = 0
while counter < 1000:
cv2.line(img, (randrange(0, 1920), randrange(0, 1080)), (randrange(0, 1920), randrange(0, 1080)), (randrange(0, 255)))
cv2.imshow('test', img)
temp = cv2.waitKey(1)
counter += 1
print counter
On my machine (Core 2 duo 2,6Ghz x64, 8gb ram, ssd) it took about 30 seconds for this test to complete. Run it and if you will get significantly bigger time than definitelly something is wrong with your laptop/opencv configuration/etc. I've used OpenCV 2.4.x on Mac OS X (it was 10.9 i think) and it was running fine. Reinstalling OpenCV is the most obvious solution which comes to my mind. When you remove OpenCV, use brew to install it again - brew install opencv --with-tbb --with-python --with-ffpmeg (or something similar - check using brew options opencv) should be fine. First options tells brew to build opencv with tbb(thread building block - library for multithreading, sometimes can significantly improve speed), second to install python wrappers, and the last one to install ffmpeg(handle codecs etc).
You would have to reduce the input to the function wait key. Try using a lower number in the range of 2-5. It also depends on the other processes you have running simultaneously, try shutting down other processes and see if it improves
you can create your own window to show the image. Add MyWindow.m MyWindow.h file to project.
MyWindow.h
#ifndef MY_WINDOW_H
#define MY_WINDOW_H
#ifdef __cplusplus
extern "C" {
#endif
void* createNSWindow(int x, int y, int w, int h);
void renderNSWindow(void* inwindow, void* data, int w, int h, int c);
void processNSEvent();
#ifdef __cplusplus
}
#endif
#endif
usage, in main.cpp, do not forget waitKey
#include "MyWindow.h"
// need create a cv window and do nothing
cv::namedWindow("xxx", 1);
// create window
void* w = createNSWindow(0, 0, 0, 0);
// frame image
cv::Mat frameImage;
// render loop
renderNSWindow(w, frameImage.data, frameImage.cols, frameImage.rows, frameImage.channels());
// need waitKey to display window
processNSEvent();
implement, in MyWindow.m, delete import "MyWindow.h"
#import <Cocoa/Cocoa.h>
#interface MyWindow : NSWindow
#property(nonatomic, strong) NSImageView *imgv;
#end
#implementation MyWindow
#end
static NSImage* _createNSImage(void* data, int w, int h, int c);
void* createNSWindow(int x, int y, int w, int h) {
NSRect screenFrame = [[NSScreen mainScreen] frame];
NSRect frame = NSMakeRect(x, y, w, h);
if (w == 0 || h == 0) {
frame = screenFrame;
}
MyWindow* window = [[MyWindow alloc] initWithContentRect:frame
styleMask:NSWindowStyleMaskBorderless
backing:NSBackingStoreBuffered
defer:NO] ;
//_initApp(window);
[window makeKeyAndOrderFront:NSApp];
window.titleVisibility = TRUE;
window.styleMask = NSWindowStyleMaskResizable | NSWindowStyleMaskTitled |NSWindowStyleMaskFullSizeContentView;
window.imgv = [[NSImageView alloc] initWithFrame:NSMakeRect(0, 0, frame.size.width, frame.size.height)];
[window.contentView addSubview:window.imgv];
return (void*)CFBridgingRetain(window);
}
static NSImage* _createNSImage(void* data, int w, int h, int c) {
size_t bufferLength = w * h * c;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, data, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = c * bitsPerComponent;
size_t bytesPerRow = c * w;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast;
if (c < 4) {
bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaNone;
unsigned char* buf = data;
for(int i = 0; i < w*h; i++) {
unsigned char temp = buf[i*c];
buf[i*c] = buf[i*c+c-1];
buf[i*c+c-1] = temp;
}
}
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(w,
h,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
NSImage* image = [[NSImage alloc] initWithCGImage:iref size:NSMakeSize(w, h)];
return image;
}
void renderNSWindow(void* inwindow, void* data, int w, int h, int c) {
MyWindow* window = (__bridge MyWindow*)inwindow;
window.imgv.image = _createNSImage(data, w, h, c);
}
void processNSEvent() {
for (;;)
{
NSEvent* event = [NSApp nextEventMatchingMask:NSEventMaskAny
untilDate:[NSDate distantPast]
inMode:NSDefaultRunLoopMode
dequeue:YES];
if (event == nil)
break;
[NSApp sendEvent:event];
}
}
other things, the waitKey now take about 20ms, you can do OpenCV in background thread, and show window in main thread. Also use processNSEvent instead of waitKey that only take about 10ms.
full source code:
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <dispatch/dispatch.h>
#include "MyWindow.h"
using namespace std;
using namespace cv;
int opencvfunc(int argc, const char *argv[]);
bool newFrame = false;
cv::Mat back_frame;
int main(int argc, const char * argv[]) {
cv::namedWindow("render", 1);
void* w = createNSWindow(0, 0, 0, 0);
dispatch_queue_t opencvq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0);
dispatch_async(opencvq, ^{
opencvfunc(argc, argv);
});
while(true) {
usleep(3*1000);
if(newFrame) {
std::chrono::system_clock::time_point starttime = std::chrono::system_clock::now();
renderNSWindow(w, back_frame.data, back_frame.cols, back_frame.rows, back_frame.channels());
newFrame = false;
//auto key = cv::waitKey(1);
//if (key == 'q') {
// break;
//}
processNSEvent();
std::chrono::system_clock::time_point endtime = std::chrono::system_clock::now();
std::cout << "imshow:" << std::chrono::duration_cast<std::chrono::duration<double>>(endtime-starttime).count()*1000 << std::endl;
}
}
return 0;
}
int opencvfunc(int argc, const char *argv[]) {
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened()) {
std::cout << "Couldn't open camera 0." << endl;
return EXIT_FAILURE;
}
Mat frame, unmodified_frame;
for (;;) {
cap >> frame; // get a new frame from camera
if (frame.empty()) { // stop if we're at the end of the video
break;
}
//unmodified_frame = frame.clone();
// ...
back_frame = frame.clone();
newFrame = true;
}
return EXIT_SUCCESS;
}
OpenCV 4 had resolved this issue, please update to new version.
One more thing, process video and show video in two thread.
#include <stdio.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <dispatch/dispatch.h>
using namespace cv;
using namespace std;
bool newFrame = false;
Mat back_frame;
int opencvmain(int argc, char** argv ) {
// open camear
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened()) {
std::cout << "Couldn't open camera 0." << std::endl;
return EXIT_FAILURE;
}
// define frame images
cv::Mat frame;
// frame loop
for (;;) {
// get video frame
cap >> frame;
if (frame.empty()) {
break;
}
// render
back_frame = frame.clone();
newFrame = true;
}
return 0;
}
int main(int argc, char** argv ) {
namedWindow("video", WINDOW_AUTOSIZE );
dispatch_queue_t opencvq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0);
dispatch_async(opencvq, ^{
opencvmain(argc, argv);
});
while(true) {
usleep(3*1000);
if(newFrame) {
imshow("video", back_frame);
auto key = cv::waitKey(1);
if (key == ' ') {
break;
}
newFrame = false;
}
}
return 0;
}