Currently started to work with qt and found some bug in my code, and I can't understand where it comes from. Maybe you will see and explain why it happens.
Here is main.cpp code part which changes shutter from 3000 to maximum:
if (camera.test_cam->liveFrameReady())
{
camera.visualizeFrame(camera.test_cam->liveFrame());
camera.set_shutter(0, 1, 3000 + i * 200);
controlWidget->update(camera.test_cam->liveFrame());
i++;
}
The code works slowly(1 fps ), because of the camera.visuzlizeFrame() method:
void OsCam::visualizeFrame(Common::FrameHandle hFrame)
{
void* buffer_ptr = this->getFrameData(hFrame);
int width = hFrame->dataType()->width();
int height = hFrame->dataType()->height();
cv::Mat m(height, width, CV_8UC3, (int*)buffer_ptr);
cv::imshow("test image", m);
cv::waitKey(1);
}
Gui interface shows the camera frame in real time, test image from visualizeFrame at the background
Actionally, I don't need to call this method (I used it just to be sure that I can read the memory and I used opencv because I am more familiar with it).
But if I get rid of this camera.visuzlizeFrame() my gui becomes white and does not give any response.
Even if I use cv::waitKey or Sleep functions, nothing happens to the gui.
void ControlWidget::update(Common::FrameHandle hFrame)
{
try
{
QImage img((uchar*)hFrame->buffer()->data(), hFrame->dataType()->width(), hFrame->dataType()->height(), QImage::Format::Format_RGB888);
QSize standart_size = QSize(hFrame->dataType()->width() / 3, hFrame->dataType()->height() / 3);
QPixmap rectPxmp = QPixmap::fromImage(img).scaled(standart_size);
this->camera_1->setPixmap(rectPxmp);
this->camera_2->setPixmap(rectPxmp);
cv::waitKey(300);
}
catch (GeneralException& e)
{
std::cout << e.what() << std::endl;
}
}
Shall I go to QThreads? One thread for reading the buffer and another one for visualization of gui?
Thank you!
some additional code:
void* OsCam::getFrameData(Common::FrameHandle hFrame)
{
bool displayFrameData = false;
void* pFrameData = NULL;
try
{
if (hFrame)
{
if (displayFrameData)
{
// display information about the frame
cout << "Frame index: " << hFrame->frameIndex();
cout << "Frame timestamp: " << hFrame->timestamp();
// display information about the frame "data type"
DataTypeHandle hDataType = hFrame->dataType();
cout << "Frame size in bytes: " << hDataType->frameSizeInBytes() << endl;
cout << "Width in pixels: " << DataType::width(hDataType) << endl;
cout << "Height in rows: " << DataType::height(hDataType) << endl;
cout << "Bit depth: " << DataType::bitDepth(hDataType) << endl;
cout << "Bytes/line (stride): " << DataType::stride(hDataType) << endl;
// display the frame video format
VideoDataType::Format videoFormat = VideoDataType::format(hDataType);
cout << "Video format: " << VideoDataType::formatString(videoFormat).c_str() << endl;
// get a pointer to the frame data
}
pFrameData = hFrame->buffer()->data();
}
}
catch (GeneralException& e)
{
cout << e.what() << endl;
}
return pFrameData;
}
and main (I changed it a little bit) :
int main(int argc, char **argv)
{
QApplication app(argc, argv);
ControlWidget *controlWidget = new ControlWidget;
//controlWidget->show();
try
{
OsCam camera;
int i = 0;
for (int j = 0; j<10000 ; j++)
{
if ((camera.test_cam->liveFrameReady()))
{
Common::FrameHandle loaded = camera.test_cam->liveFrame()->clone();
camera.visualizeFrame(loaded);
controlWidget->update(loaded);
camera.set_shutter(0, 1, 3000 + i * 200);
loaded->~Frame();
}
i++;
}
}
catch (GeneralException& e)
{
std::cout << e.what() << std::endl;
int a;
std::cin >> a;
return 1;
}
}
After cv::named_window
Related
I've made this simple program according to a github code.
All you have to take care of is the while loop at the end of the main() method.
#include "SDL2\SDL.h"
constexpr const char* WAV_PATH = "applause.wav";
#include <iostream>
static Uint8* sg_pAudioPos;
static Uint32 sg_AudioLength;
struct CWavWrapper
{
Uint32 m_Length;
Uint8* m_pBuffer;
SDL_AudioSpec m_Spec;
};
void ExitProgram(int code = 0)
{
system("pause");
exit(code);
}
void AudioCallback(void* pData, Uint8* pStream, int Length);
#undef main
int main()
{
using std::cout;
using std::cerr;
using std::endl;
if (SDL_Init(SDL_INIT_AUDIO) < 0)
{
cerr << "couldnt init audio: " << SDL_GetError() << endl;
ExitProgram(1);
}
cout << "Loading wav... " << WAV_PATH << endl;
static CWavWrapper Wav;
if(SDL_LoadWAV(WAV_PATH,&Wav.m_Spec,&Wav.m_pBuffer,&Wav.m_Length) == NULL)
{
cerr << "couldnt load wav: " << SDL_GetError() << endl;
ExitProgram(1);
}
Wav.m_Spec.callback = AudioCallback;
Wav.m_Spec.userdata = NULL;
sg_pAudioPos = Wav.m_pBuffer;
sg_AudioLength = Wav.m_Length;
cout << "Opening Audio..." << endl;
if (SDL_OpenAudio(&Wav.m_Spec, NULL) < 0)
{
cerr << "couldn't open audio: " << SDL_GetError() << endl;
ExitProgram(1);
}
cout << "Success! Starting Audio..." << endl;
SDL_PauseAudio(0);
while (sg_AudioLength > 0)
{
//If I remove this output line or as in the GitHub example not do the SDL_Delay() the wav gets played infinitely.
//how does printing out a variable change its value (as it should do only in the callback)?
cout << "sg_AudioLength: " << sg_AudioLength << endl;
}
SDL_CloseAudio();
SDL_FreeWAV(Wav.m_pBuffer);
ExitProgram(0);
}
void AudioCallback(void* pData, Uint8* pStream, int Length)
{
if (sg_AudioLength == 0)
return;
if (Length > sg_AudioLength)
{
Length = sg_AudioLength;
}
SDL_MixAudio(pStream, sg_pAudioPos, Length, SDL_MIX_MAXVOLUME);
sg_pAudioPos += Length;
sg_AudioLength -= Length;
}
As you can see at the end of the main() function, I put a description on what happens when I remove the cout line.
I think it might has to do something with the AudioCallback only being called when a certain code is done. But I am not sure and I would like to get an answer to it.
EDIT: I've noticed that when anything gets processed in the while loop, the audio seems to play right. Does anything from compiling to run time notice that the loop does not change the variable and thinks that this is now an endless loop so the loop does not even try to check the variable again?
I want to convert stereo images captured by Basler cameras to opencv (Mat) format. In the below code i have converted images to opencv format, but in show stages, i can not show the images. please guide me.
Thanks
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
PylonInitialize();
Pylon::PylonAutoInitTerm autoInitTerm;//me
try
{
// Get the transport layer factory.
CTlFactory& tlFactory = CTlFactory::GetInstance();
// Get all attached devices and exit application if no device is found.
DeviceInfoList_t devices;
if (tlFactory.EnumerateDevices(devices) == 0)
{
throw RUNTIME_EXCEPTION("No camera present.");
}
CInstantCameraArray cameras(min(devices.size(), c_maxCamerasToUse));
// Create and attach all Pylon Devices.
for (size_t i = 0; i < cameras.GetSize(); ++i)
{
cameras[i].Attach(tlFactory.CreateDevice(devices[i]));
// Print the model name of the camera.
cout << "Using device " << cameras[i].GetDeviceInfo().GetModelName() << endl;
}
CGrabResultPtr ptrGrabResult;
CImageFormatConverter formatConverter;//me
formatConverter.OutputPixelFormat = PixelType_BGR8packed;//me
CPylonImage pylonImage;//me
// Create an OpenCV image
Mat openCvImage;//me
for (int i = 0; i < c_countOfImagesToGrab && cameras.IsGrabbing(); ++i)
{
cameras.RetrieveResult(5000, ptrGrabResult, TimeoutHandling_ThrowException);
intptr_t cameraContextValue = ptrGrabResult->GetCameraContext();
#ifdef PYLON_WIN_BUILD
#endif
// Print the index and the model name of the camera.
cout << "Camera " << cameraContextValue << ": " << cameras[cameraContextValue].GetDeviceInfo().GetModelName() << endl;
// Now, the image data can be processed.
cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *)ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t)pImageBuffer[0] << endl << endl;
formatConverter.Convert(pylonImage, ptrGrabResult);//me
// Create an OpenCV image out of pylon image
openCvImage = cv::Mat(ptrGrabResult->GetHeight(), ptrGrabResult->GetWidth(), CV_8UC3, (uint8_t *)pylonImage.GetBuffer());//me
if (cameraContextValue == 0)
{
imshow("left camera", openCvImage);
imwrite("right_img.png", openCvImage);
}
else if (cameraContextValue == 1)
{
imshow("right camera", openCvImage);
imwrite("right_img.png", openCvImage);
}
Sleep(3000);
}
}
catch (const GenericException &e)
{
// Error handling
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while (cin.get() != '\n');
// Releases all pylon resources.
PylonTerminate();
return exitCode;
}
You need to create a window to display an opencv image into, use :
namedWindow("left camera", CV_WINDOW_NORMAL);
imshow("left camera", openCvImage);
There is also a few mistakes in your code, i guess "right_img.png" should be change in "left_img.png", otherwise you will save only one image.
And this is redundant code
PylonInitialize();
Pylon::PylonAutoInitTerm autoInitTerm;
autoInitTerm is automatically calling PylonInitialize() and PylonTerminate(). So you should remove it or remove PylonInitialize() and PylonTerminate()
I think a waitKey(0) is required after the imshow to display the image.
add below piece of code. after completing for (size_t i = 0; i < cameras.GetSize(); ++i)
cameras.StartGrabbing(GrabStrategy_LatestImageOnly, GrabLoop_ProvidedByUser);
Add this to your code. and like above comments remove unnecessary code.
I am attempting to use OpenCV to capture images from multiple cameras via separate thread workers.
Eventually, CaptureWorker will feed frames to a worker queue for processing, but currently I'd like to save the image. The following code runs, but the saved 'frames' are damaged and unable to be opened, but are not empty.
Originally, I was constructing and storing the VideoCapture in CaptureWorker::startCapture(); however, the program crashed while opening the stream. Constructing and opening the VideoCapture in main() turns on the camera and allows the thread to run.
What is causing the frame writing to break? No errors are reported.
main.cpp
int main(int argc, char * argv[]) {
if(argc <= 1) {
cout << "No camera arguments have been specified. Exiting." << endl;
return -1;
}
int numberOfCameras = argc - 1;
int cameraNumbers[argc-1];
for(int i=0; i<numberOfCameras; i++) {
cameraNumbers[i] = atoi(argv[i+1]);
}
VideoCapture cameras[10];
pthread_t cameraThreads[numberOfCameras];
for(int i=0; i<numberOfCameras; i++) {
cout << "Creating camera thread: " << i << endl;
cameras[i] = VideoCapture(i);
CaptureWorker capWorker(cameras[i], i);
pthread_create(&cameraThreads[i], NULL, &CaptureWorker::startCaptureWrap, &capWorker);
}
cout << "Closing Main" << endl;
pthread_exit(NULL);
}
CaptureWorker.cpp
class CaptureWorker {
private:
int cameraNumber;
string cameraName;
VideoCapture camera;
public:
CaptureWorker(VideoCapture camera, int cameraNumber) {
this->cameraNumber = cameraNumber;
this->camera = camera;
this->cameraName = "Camera_";
cameraName.append(to_string(cameraNumber));
cout << "CaptureWorker: Worker created" << endl;
}
void *startCapture(void) {
string imageSavePath = "Resources/images/" + this->cameraName;
mkdir(imageSavePath.c_str(), 0777);
cout << "CaptureWorker: Starting capture from camera" << endl;
int count = 0;
while(count <= 3) {
cout << "Capturing frame " << count;
Mat frame;
this->camera >> frame;
cout << " --- ";
string imageNameAndPath;
imageNameAndPath.append(imageSavePath);
imageNameAndPath.append("/img_");
imageNameAndPath.append(to_string(count).c_str());
imageNameAndPath.append(".jpg");
cout << "Saving to: " << imageNameAndPath.c_str() << endl;
imwrite(imageNameAndPath.c_str(), frame);
int c = cvWaitKey(1000);
if((char)c==27 ) {
break;
}
count++;
}
return 0;
}
static void *startCaptureWrap(void *arg) {
return ((CaptureWorker *) arg)->startCapture();
}
};
On OSX using cross g++ compiler in eclipse. Disclaimer: New to C++, OpenCV, multithreading, hopefully I've avoided a silly error.
I am displaying and manipulating a video with open CV as so
// looping through list of videos
for (unsigned int i = 0; i < img_loc.size(); i++)
{
string fileLoc = root_dir + "\\" + img_loc[i];
string name = img_loc[i].substr(0,img_loc[i].find("."));
cv::VideoCapture cap(fileLoc);
image_window win;
int cnt = 0;
while (!win.is_closed())
{
cv::Mat temp;
cap >> temp;
if (temp.empty())
{
break;
}
cout << "frame number ---- " << cap.get(CV_CAP_PROP_POS_FRAMES) << endl;
cv_image<bgr_pixel> cimg(temp);
// some image manipulations
win.clear_overlay();
win.set_image(cimg);
cout << cnt << endl;
// save some details
cnt++;
cout << "after cnt++ ------------ " << cnt << endl;
}
cout << endl << "finished with " << img_loc[i] << ", proceed to the next video?" << endl;
cin.get();
}
which works fine for the first for all frames except the very last. I know there are exactly 200 frames but cnt only ever reaches 198 and the frames only ever go up to 199. The final frame isn't entirely necessary but it means there's some extra data handling that I'd rather avoid. Any ideas?
Hi I have written the following code in OpenCV. Basically it reads a video from file. Now, I want to create a function to resize the video but I am unsure how to call the "VideoCapture" class from the main function. I have written a sample function to see if it'll read anything but it compiles fine showing stuff from the main function but nothing from the newly created function. Any help? P.S I'm not very experienced, bear with me LOL.
using namespace cv;
using namespace std;
void resize_video(VideoCapture capture);
int main(int argc, char** argv)
{
VideoCapture capture; //the C++ API class to capture the video from file
if(argc == 2)
capture.open(argv[1]);
else
capture.open(0);
if(!capture.isOpened())
{
cout << "Cannot open video file " << endl;
return -1;
}
Mat frame;
namedWindow("display", CV_WINDOW_AUTOSIZE);
cout << "Get the video dimensions " << endl;
int fps = capture.get((int)CV_CAP_PROP_FPS);
int height = capture.get((int)CV_CAP_PROP_FRAME_HEIGHT);
int width = capture.get((int)CV_CAP_PROP_FRAME_WIDTH);
int noF = capture.get((int)CV_CAP_PROP_FRAME_COUNT);
CvSize size = cvSize(width , height);
cout << "Dimensions: " << width << height << endl;
cout << "Number of frames: " << noF << endl;
cout << "Frames per second: " << fps << endl;
while(true)
{
capture >> frame;
if(frame.empty())
break;
imshow("display", frame);
if (waitKey(30)== 'i')
break;
}
//resize_video();
}
void resize_video(VideoCapture capture)
{
cout << "Begin resizing video " << endl;
//return 0;
}
you want to call your function INSIDE the while loop, not after it (too late, program over)
so, it might look like this:
void resize_video( Mat & image )
{
//
// do your processing
//
cout << "Begin resizing video " << endl;
}
and call it like:
while(true)
{
capture >> frame;
if(frame.empty())
break;
resize_video(frame);
imshow("display", frame);
if (waitKey(30)== 'i')
break;
}