I made a code in c++ to take a snapshot with a Ueye camera, however the picture that gets saved is just white, in saying that sometimes to can see a tiny bit of the road but still just white. I believe it’s an issue with an auto parameter but I feel as if I have tried everything.
Below is my code:
void MainWindow::CaptureImage(){
int initcamera = is_InitCamera(&hCam, hWndDisplay);
if(initcamera != IS_SUCCESS)
{
cout<<endl<<"Failed to initialize the camera"<<endl;
exit(-1);
}else{
cout<<endl<<"Initialized Camera"<<endl;
}
int camerainfo = is_GetCameraInfo (hCam, &camera_info);
if(camerainfo != IS_SUCCESS)
{
cout<<endl<<"Unable to acquire camera information"<<endl;
exit(-1);
}else{
cout<<endl<<"Camera information required"<<endl;
}
int sensorinfo = is_GetSensorInfo (hCam, &sInfo);
if(sensorinfo != IS_SUCCESS)
{
cout<<endl<<"Unable to acquire sensor information"<<endl;
exit(-1);
}else{
cout<<endl<<"Sensor information acquired"<<endl;
}
int colormode = is_SetColorMode(hCam, IS_CM_BGR8_PACKED);
if(colormode != IS_SUCCESS)
{
cout<<endl<<"Unable to set the color mode"<<endl;
exit(-1);
}else{
cout<<endl<<"Color mode set"<<endl;
}
int pXPos = (sInfo.nMaxWidth);
int pYPos = (sInfo.nMaxHeight);
int rit = is_AllocImageMem (hCam,pXPos,pYPos, 24, &m_pcImageMemory, &m_lMemoryId);
if(rit != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO INITIALIZE MEMORY"<<endl;
system("PAUSE");
exit(-1);
}else{
cout<<endl<<"INITIALIZED MEMORY"<<endl;
}
int rat = is_SetImageMem (hCam, m_pcImageMemory, m_lMemoryId);
if(rat != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO ACTIVATE MEMORY"<<endl;
system("PAUSE");
exit(-1);
}else{
cout<<endl<<"ACTIVATE MEMORY"<<endl;
}
double strenght_factor = 1.0;
int colorcorrection = is_SetColorCorrection(hCam, IS_CCOR_ENABLE, &strenght_factor);
double pval = 1;
int whiteb = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &pval, 0);
double gval = 1;
int gains = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &gval, 0);
int dummy;
char *pMem, *pLast;
IMAGE_FILE_PARAMS ImageFileParams;
ImageFileParams.pwchFileName = L"./TestImage.bmp"; /// <-- Insert name and location of the image
ImageFileParams.pnImageID = NULL;
ImageFileParams.ppcImageMem = NULL;
ImageFileParams.nQuality = 0;
ImageFileParams.nFileType = IS_IMG_BMP;
int sho = is_FreezeVideo(hCam, IS_WAIT);
if(sho != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO ACQUIRE FROM THE CAMERA"<<endl;
system("PAUSE");
exit(-1);
}
if (sho == IS_SUCCESS){
int m_Ret = is_GetActiveImageMem(hCam, &pLast, &dummy);
int n_Ret = is_GetImageMem(hCam, (void**)&pLast);
}
if (is_ImageFile(hCam, IS_IMAGE_FILE_CMD_SAVE, (void*)&ImageFileParams, sizeof(ImageFileParams)) == IS_SUCCESS)
{
cout << endl << "An Image was saved" << endl;
}
else
{
cout << endl << "something went wrong" << endl;
}
// Releases an image memory that was allocated
//is_FreeImageMem(hCam, pcImageMemory, nMemoryId);
// Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
is_ExitCamera(hCam);
}
I have tried many things like the parameters below however it is still just white. I had the camera in the room which is darker and the image came out ok, so I defiantly think it’s due to the day light outside.
const wstring filenameU(filename.begin(), filename.end());
is_ParameterSet(hCam, IS_PARAMETERSET_CMD_LOAD_FILE,(void*) filenameU.c_str(), 0);
unsigned int range[3];
memset(range, 0, sizeof(range));
is_PixelClock(hCam, IS_PIXELCLOCK_CMD_GET_RANGE, (void*)range, sizeof(range));
unsigned int maximumPixelClock = range[1];
is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&maximumPixelClock, sizeof(maximumPixelClock));
double pval1 = auto_exposure, pval2 = 0;
double pval1 = 1, pval2 = 0;
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER,&pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_FRAMERATE,&pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_AUTO_WB_OFFSET, &pval1, &pval2);
int desiredFrameRate = 60;
is_SetFrameRate(hCam, desiredFrameRate, &m_actualFrameRate);
You cannot just take one picture because the exposure might be to high. You have to capture a few images to give the auto exposure control a chance to lower the exposure. The reason you have to capture some frames is that the AEC is done in software and can only start to change parameters if it gets some frames.
Related
There is a function that has a while loop. int takes a variable. for example I initialized the function with 4. Then I want to initialize the function with 16.
void Widget::on_fourCameras_clicked(){
CamerasInitialize(4);
}
void Widget::on_sixteenCameras_clicked(){
isNewCameraSelected = true;
CamerasInitialize(16);
}
When I call it with 16, it does not delete the function I started with the previous 4 from memory. I'm trying to break the previous loop by putting the flag and start the 2nd one. This time, since the click function does not change the flag before it ends, it starts the function I sent 16 first. Then it changes the flag. I can't do it in multithread because of the functions I use. What do I need to do to repeatedly call the same function with a while loop and delete the previous ones from memory?
ubuntu 20.04.02
QT / c++
void Widget::CamerasInitialize(int camNumber) {
namedWindow( "Output", cv::WINDOW_OPENGL );
setWindowProperty( "Output", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN );
cv::cuda::GpuMat *inputFrames = new cv::cuda::GpuMat[camNumber];
cv::cuda::GpuMat *inputFramesConverted = new cv::cuda::GpuMat[camNumber];
cv::cuda::GpuMat *sphericalDistortionOutput = new cv::cuda::GpuMat[camNumber];
cv::Ptr<cv::cudacodec::VideoReader> videoReader[camNumber];
cv::cuda::PtrStepSz<int32_t> *d_ptrs = NULL;
cv::cuda::PtrStepSz<int32_t> *h_ptrs = new cv::cuda::PtrStepSz<int32_t>[camNumber];
cudaMalloc(&d_ptrs, camNumber * sizeof(cv::cuda::PtrStepSz<int32_t>));
std::time_t timeBegin = std::time(0);
int tick = 0;
long frameCounter = 0;
setCurrentMatrixCpuToGpu(camNumber);
while (true)
{
for(int i=0; i<camNumber; i++){
videoReader[i]->nextFrame(inputFrames[i]);
h_ptrs[i] = inputFrames[i];
}
if(isMovedCamera){
pBackSub.release();
pBackSub = cuda::createBackgroundSubtractorMOG();
isMovedCamera = false;
}
cudaMemcpy(d_ptrs, h_ptrs, camNumber * sizeof(cv::cuda::PtrStepSz<int32_t>), cudaMemcpyHostToDevice);
GpuMat outval;
Mat outputCPU;
if(camNumber == 4){
arrayMult(d_ptrs, outval, cameraMatrixOnGpu4Camera, pixelW, pixelH);
}else if(camNumber == 16){
arrayMult(d_ptrs, outval, cameraMatrixOnGpu16Camera, pixelW, pixelH);
}
//cuda::bilateralFilter(videoFrames[0], colorImages[0], 90, 30, 30);
frameCounter++;
std::time_t timeNow = std::time(0) - timeBegin;
if (timeNow - tick >= 1)
{
tick++;
//cout << "Frames per second: " << frameCounter << endl;
frameCounter = 0;
}
outval = outval(Rect(cropPositionsX, cropPositionsY, cropPositionsW, cropPositionsH));
cuda::resize(outval, outval, Size(SCREENRESOLUTION_WITDH, SCREENRESOLUTION_HEIGHT), 0, 0, INTER_AREA);
imshow( "Output", outval);
setMouseCallback("Output", CallBackFunc, NULL);
if (waitKey(1) == 'q' || isNewCameraSelected)
{
isNewCameraSelected = false;
break;
}
}
//destroyWindow("Output");
}
I am using libx264 compiled from source. It was configured to get both .dll and .lib by this command
./configure --disable-cli --enable-shared --extra-ldflags=-Wl,--output-def=libx264.def`
I am using the libx264 API in my screen-sharing program with the preset - "veryfast", tune - "zerolatency", profile - "high" and also the following settings.
param.i_csp = X264_CSP_BGRA;
param.i_threads = 1;
param.i_width = width;
param.i_height = height;
param.i_fps_num = fps;
param.i_fps_den = 1;
param.rc.i_bitrate = bitrate;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
For these settings the program works fine. I specified it as single threaded by setting param.i_threads = 1.
If this is removed, x264 defaults to using multiple threads and sets param.i_threads as 1.5x of number of cores in the CPU automatically. This will give faster performance than running in single thread.
But when I remove the param.i_threads = 1 to make it multi-threaded, the generated output is fully grey. I cannot see any output when I view the live stream with VLC or some times I can view a weird output.
I am using this bitmap image as an example (https://imgur.com/a/l8LCd1l). Only this same image is being encoded multiple times. When it is saved into .h264 video, it is viewable clearly. But when the encoded payload is sent through rtmp, the live stream produces very bad and weird output (or sometimes no output). This is the weird output which im seeing most of the time for this image: https://imgur.com/a/VdyX1Zm
This is the full example code in which I am both streaming and writing video file of the same picture. This is using the srs librtmp library. There is no error but the stream has weird output.
In this code if you set add param.i_threads = 1; then only the output stream will be viewable. The problem is that it should be viewable in both single-threaded and multi-threaded encoding.
#include <iostream>
#include <stdio.h>
#include <sstream>
#include <x264.h>
#include "srs_librtmp.h"
#pragma comment(lib, "C:/Softwares/x264/libx264.lib")
using namespace std;
int check_ret(int ret);
int main()
{
int dts = 0;
x264_param_t param;
x264_t* h;
x264_nal_t* nals;
int i_nal;
int pts = 0;
int i_frame_size;
x264_picture_t picIn;
x264_picture_t picOut;
x264_param_default_preset(¶m, "veryfast", "zerolatency");
//x264 settings
param.i_csp = X264_CSP_BGRA;
param.i_width = 1920;
param.i_height = 1080;
param.i_fps_num = 30;
param.i_fps_den = 1;
param.rc.i_bitrate = 2500;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
x264_param_apply_profile(¶m, "high");
h = x264_encoder_open(¶m);
//allocate picture
x264_picture_alloc(&picIn, param.i_csp, param.i_width, param.i_height);
//picture settings
picIn.img.i_plane = 1;
picIn.img.i_stride[0] = 4 * param.i_width;
picIn.i_type = X264_TYPE_AUTO;
int header_size = x264_encoder_headers(h, &nals, &i_nal);
FILE* fptr;
fopen_s(&fptr, "example1.h264", "wb");
// write sps and pps in the video file
fwrite(nals->p_payload, header_size, 1, fptr);
int size = 1920 * 1080 * 4;
char* bmp = new char[size];
FILE* bitptr;
errno_t err = fopen_s(&bitptr, "flower.bmp", "rb");
fseek(bitptr, 54, SEEK_SET);
fread(bmp, size, 1, bitptr);
fclose(bitptr);
srs_rtmp_t rtmp = srs_rtmp_create("127.0.0.1:1935/live/test");
if (srs_rtmp_handshake(rtmp) != 0)
{
std::cout << "Simple handshake failed.";
return -1;
}
std::cout << "Handshake completed successfully.\n";
if (srs_rtmp_connect_app(rtmp) != 0) {
std::cout << "Connecting to host failed.";
return -1;
}
std::cout << "Connected to host successfully.\n";
if (srs_rtmp_publish_stream(rtmp) != 0) {
std::cout << "Publish signal failed.";
}
std::cout << "Publish signal success\n";
// write sps and pps in the live stream
int ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nals->p_payload), header_size, 0, 0);
ret = check_ret(ret);
if (!ret)
return -1;
std::cout << "SPS and PPS sent.\n";
// main loop
std::cout << "Now streaming and encoding\n";
int i = 1800;
while (i--)
{
picIn.img.plane[0] = reinterpret_cast<uint8_t*>(bmp);
picIn.i_pts = pts++;
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, &picIn, &picOut);
if (i_frame_size)
{
for (int j = 0; j < i_nal; j++)
{
x264_nal_t* nal = nals + j;
// write data in the video file
fwrite(nal->p_payload, nal->i_payload, 1, fptr);
// write data in the live stream
ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nal->p_payload), nal->i_payload, dts, dts);
ret = check_ret(ret);
if (!ret)
{
return -1;
}
}
}
else
{
std::cout << "i_frame_size = 0 (encoder failed)\n";
}
dts += 33;
}
while (x264_encoder_delayed_frames(h))
{
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, NULL, &picOut);
if (i_frame_size)
{
fwrite(nals->p_payload, i_frame_size, 1, fptr);
}
}
std::cout << "\nAll done\n";
std::cout << "Output video is example1.h264 and it is viewable in VLC";
return 0;
}
int check_ret(int ret)
{
if (ret != 0) {
if (srs_h264_is_dvbsp_error(ret)) {
srs_human_trace("ignoring drop video error, code=%d", ret);
}
else if (srs_h264_is_duplicated_sps_error(ret)) {
srs_human_trace("ignoring duplicated sps, code=%d", ret);
}
else if (srs_h264_is_duplicated_pps_error(ret)) {
srs_human_trace("ignoring duplicated pps, code=%d", ret);
}
else {
srs_human_trace("sending h264 raw data failed. ret=%d", ret);
return 0;
}
}
return 1;
}
If you would like to download the original flower.bmp file, here is the link: https://gofile.io/d/w2kX56
This error can be reproduced in any other bmp file also.
Please tell me what is causing this problem when multi-threading is enabled. Am I setting wrong values? Is the code in which I am streaming the encoded data wrong?
TL;DR
I am trying to capture keyboard events (more specifically, the Ctrl+c command) in my own C++ program. I am attempting this through generic keyboard presses in SDL2.
END TL;DR
I have found links on SO and the internet that cover the subject of handling keyboard events with SDL2. I have a few of them listed here.
https://stackoverflow.com/questions/28105533/sdl2-joystick-dont-capture-pressed-event
https://lazyfoo.net/tutorials/SDL/04_key_presses/index.php
http://www.cplusplus.com/forum/windows/182214/
http://gigi.nullneuron.net/gigilabs/handling-keyboard-and-mouse-events-in-sdl2/
The major issue I think is causing the problem is that I am also using an Xbox-style joystick at the same time. I have had no issues whatsoever with capturing joystick events. I have been doing that for a long time now. I am having issues trying to get anything with the keyboard to throw an event. I have tried if(event.type == SDL_KEYDOWN) and then checking which key it was, but that appears to return nothing. I feel like there is some macro that I need to define to allow this since I keep finding the same solutions on the internet.
I have included the entire script that I am running at the moment.
#include <boost/thread.hpp>
// Time library
#include <chrono>
// vector data structure
#include <vector>
// Thread-safe base variables
#include <atomic>
// std::cout
#include <iostream>
// Joystick library
#include <SDL2/SDL.h>
// Counters for printing
std::atomic_int printcounter{ 0 };
// This is every 3 * 1000 milliseconds
const int printer = 300;
// If an event is found, allow for printing.
std::atomic_bool eventupdate{ false };
// This function converts the raw joystick axis from the SDL library to proper double precision floating-point values.
double intToDouble(int input)
{
return (double) input / 32767.0 ;
}
// Prevent joystick values from going outside the physical limit
double clamp(double input)
{
return (input < -1.0) ? -1.0 : ( (input > 1.0) ? 1.0 : input);
}
// SDL library joystick deadband
const int JOYSTICK_DEAD_ZONE = 5000;
// These are the raw read in values from the joystick in XInput (XBox) mode.
//Normalized direction
int leftX = 0;
int leftY = 0;
int rightX = 0;
int rightY = 0;
int leftTrigger = -32768;
int rightTrigger = -32768;
// Button array
uint buttons[11] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
// Tbe pov hat is only 4 bits - 1, 2, 4, 8
int povhat = 0;
// These are the rectified joystick values
double leftstickx = 0;
double leftsticky = 0;
double rightstickx = 0;
double rightsticky = 0;
double lefttrigger = 0;
double righttrigger = 0;
// These are the rectified boolean buttons
bool leftstickbut = false;
bool rightstickbut = false;
bool xbutton = false;
bool ybutton = false;
bool abutton = false;
bool bbutton = false;
bool rightbut = false;
bool leftbut = false;
bool startbut = false;
bool backbut = false;
bool centbut = false;
// This is the boolean that controls running the robot.
std::atomic_bool quitrobot{false};
// Joystick values
static double joyvalues[6] = { 0, 0, 0, 0, 0, 0};
static bool joybuttons[11] = { false };
// Sleep function
void wait(int milliseconds)
{
boost::this_thread::sleep_for(boost::chrono::milliseconds{milliseconds});
}
// Now the main code
int main(int argc, char** argv)
{
// Now the robot goes through the looping code until a quit flag is set to true
while ( ! quitrobot)
{
// Now we look for an Xbox-style joystick
std::cout << "Looking for gamepad..." << std::endl;
while(true)
{
// Now the program waits until an Xbox-style joystick is plugged in.
// resetting SDL makes things more stable
SDL_Quit();
// restart SDL with the expectation that a jostick is required.
SDL_Init(SDL_INIT_JOYSTICK);
// SDL_HINT_GRAB_KEYBOARD
// check for a joystick
int res = SDL_NumJoysticks();
if (res > 0) { break; } // Here a joystick has been detected.
if (res < 0)
{
std::cout << "Joystick detection error: " << std::to_string(res) << std::endl;
}
// we don't want the program running super fast when detecting hardware.
wait(20);
}
// Now we check to make sure that the joystick is valid.
// Open the joystick for reading and store its handle in the joy variable
SDL_Joystick *joy = SDL_JoystickOpen(0);
if (joy == NULL) {
/* back to top of while loop */
continue;
}
// Get information about the joystick
const char *name = SDL_JoystickName(joy);
const int num_axes = SDL_JoystickNumAxes(joy);
const int num_buttons = SDL_JoystickNumButtons(joy);
const int num_hats = SDL_JoystickNumHats(joy);
printf("Now reading from joystick '%s' with:\n"
"%d axes\n"
"%d buttons\n"
"%d hats\n\n",
name,
num_axes,
num_buttons,
num_hats);
/* I'm using a logitech F350 wireless in X mode.
If num axis is 4, then gamepad is in D mode, so neutral drive and wait for X mode.
[SAFETY] This means 'D' becomes our robot-disable button.
This can be removed if that's not the goal. */
if (num_axes < 5) {
/* back to top of while loop */
continue;
}
// This is the read joystick and drive robot loop.
while(true)
{
// poll for disconnects or bad things
SDL_Event e;
if (SDL_PollEvent(&e)) {
// SDL generated quit command
if (e.type == SDL_QUIT) { break; }
// Checking for Ctrl+c on the keyboard
// SDL_Keymod modstates = SDL_GetModState();
// if (modstates & KMOD_CTRL)
// {
// One of the Ctrl keys are being held down
// std::cout << "Pressed Ctrl key." << std::endl;
// }
if(e.key.keysym.scancode == SDLK_RCTRL || e.key.keysym.scancode == SDLK_LCTRL || SDL_SCANCODE_RCTRL == e.key.keysym.scancode || e.key.keysym.scancode == SDL_SCANCODE_LCTRL)
{
std::cout << "Pressed QQQQ." << std::endl;
}
if (e.type == SDL_KEYDOWN)
{
switch(e.key.keysym.sym){
case SDLK_UP:
std::cout << "Pressed up." << std::endl;
break;
case SDLK_RCTRL:
std::cout << "Pressed up." << std::endl;
break;
case SDLK_LCTRL:
std::cout << "Pressed up." << std::endl;
break;
}
// Select surfaces based on key press
switch( e.key.keysym.sym )
{
case SDLK_UP:
std::cout << "Pressed Up." << std::endl;
break;
case SDLK_DOWN:
std::cout << "Pressed Up." << std::endl;
break;
case SDLK_LEFT:
std::cout << "Pressed Up." << std::endl;
break;
case SDLK_RIGHT:
std::cout << "Pressed Up." << std::endl;
break;
}
std::cout << "Pressed blah di blah blah please print me." << std::endl;
}
// Checking which joystick event occured
if (e.jdevice.type == SDL_JOYDEVICEREMOVED) { break; }
// Since joystick is not erroring out, we can
else if( e.type == SDL_JOYAXISMOTION )
{
//Motion on controller 0
if( e.jaxis.which == 0 )
{
// event happened
eventupdate = true;
// Left X axis
if( e.jaxis.axis == 0 )
{
// dead zone check
if( e.jaxis.value > -JOYSTICK_DEAD_ZONE && e.jaxis.value < JOYSTICK_DEAD_ZONE)
{
leftX = 0;
}
else
{
leftX = e.jaxis.value;
}
}
// Right Y axis
else if( e.jaxis.axis == 1 )
{
// dead zone check
if( e.jaxis.value > -JOYSTICK_DEAD_ZONE && e.jaxis.value < JOYSTICK_DEAD_ZONE)
{
leftY = 0;
}
else
{
leftY = e.jaxis.value;
}
}
// Left trigger
else if ( e.jaxis.axis == 2 )
{
// dead zone check
if( e.jaxis.value > -JOYSTICK_DEAD_ZONE && e.jaxis.value < JOYSTICK_DEAD_ZONE)
{
leftTrigger = 0;
}
else
{
leftTrigger = e.jaxis.value;
}
}
// Right X axis
else if( e.jaxis.axis == 3 )
{
// dead zone check
if( e.jaxis.value > -JOYSTICK_DEAD_ZONE && e.jaxis.value < JOYSTICK_DEAD_ZONE)
{
rightX = 0;
}
else
{
rightX = e.jaxis.value;
}
}
// Right Y axis
else if( e.jaxis.axis == 4 )
{
// dead zone check
if( e.jaxis.value > -JOYSTICK_DEAD_ZONE && e.jaxis.value < JOYSTICK_DEAD_ZONE)
{
rightY = 0;
}
else
{
rightY = e.jaxis.value;
}
}
// Right trigger
else if( e.jaxis.axis == 5 )
{
// dead zone check
if( e.jaxis.value > -JOYSTICK_DEAD_ZONE && e.jaxis.value < JOYSTICK_DEAD_ZONE)
{
rightTrigger = 0;
}
else
{
rightTrigger = e.jaxis.value;
}
}
}
}
else if ( e.type == SDL_JOYBUTTONUP || e.type == SDL_JOYBUTTONDOWN )
{
// now we are looking for button events.
if (e.jbutton.which == 0)
{
// event happened
eventupdate = true;
// buttons[e.jbutton.button] = e.jbutton.state;
if (e.jbutton.button == 0)
{
buttons[0] = e.jbutton.state;
}
if (e.jbutton.button == 1)
{
buttons[1] = e.jbutton.state;
}
if (e.jbutton.button == 2)
{
buttons[2] = e.jbutton.state;
}
if (e.jbutton.button == 3)
{
buttons[3] = e.jbutton.state;
}
if (e.jbutton.button == 4)
{
buttons[4] = e.jbutton.state;
}
if (e.jbutton.button == 5)
{
buttons[5] = e.jbutton.state;
}
if (e.jbutton.button == 6)
{
buttons[6] = e.jbutton.state;
}
if (e.jbutton.button == 7)
{
buttons[7] = e.jbutton.state;
}
if (e.jbutton.button == 8)
{
buttons[8] = e.jbutton.state;
}
if (e.jbutton.button == 9)
{
buttons[9] = e.jbutton.state;
}
if (e.jbutton.button == 10)
{
buttons[10] = e.jbutton.state;
}
if (e.jbutton.button == 11)
{
buttons[11] = e.jbutton.state;
}
}
}
else if ( e.type == SDL_JOYHATMOTION)
{
if (e.jhat.which == 0)
{
// event happened
eventupdate = true;
povhat = e.jhat.value;
}
}
}
// Now that we have read in the values directly from the joystick we need top convert the values properly.
leftstickx = clamp(intToDouble(leftX));
leftsticky = clamp(intToDouble(leftY));
rightstickx = clamp(intToDouble(rightX));
rightsticky = clamp(intToDouble(rightY));
lefttrigger = clamp(intToDouble(leftTrigger));
righttrigger = clamp(intToDouble(rightTrigger));
// rectify the buttons to become boolean values instead of integers.
abutton = buttons[0] > 0;
bbutton = buttons[1] > 0;
xbutton = buttons[2] > 0;
ybutton = buttons[3] > 0;
//
rightbut = buttons[4] > 0;
leftbut = buttons[5] > 0;
//
centbut = buttons[8] > 0;
startbut = buttons[7] > 0;
backbut = buttons[6] > 0;
//
leftstickbut = buttons[9] > 0;
rightstickbut = buttons[10] > 0;
// Transfer axis to the array.
joyvalues[0] = leftstickx;
joyvalues[1] = leftsticky;
joyvalues[2] = rightstickx;
joyvalues[3] = rightsticky;
joyvalues[4] = lefttrigger;
joyvalues[5] = righttrigger;
// We are using the "B" button to quit the program
if (bbutton)
{
quitrobot = true;
std::cout << "Shutting down program." << std::endl;
break;
}
if (eventupdate)
{
// This section of code is meant for running code that happens when SDL has detected an event.
// This code section can be used for something else as well.
if (e.key.keysym.sym == SDL_SCANCODE_RCTRL || e.key.keysym.sym == SDL_SCANCODE_LCTRL || e.key.keysym.sym == SDLK_LCTRL || e.key.keysym.sym == SDLK_RCTRL)
{
std::cout << "SDL Event: Ctrl pressed.\n" << std::endl;
}
// Simply print the event
eventupdate = false;
} else {}
if ( ! (printcounter = ((printcounter + 1) % printer)))
{
// const Uint8 *state = SDL_GetKeyboardState(NULL);
// if (state[SDL_SCANCODE_RETURN]) {
// printf("<RETURN> is pressed.\n");
// }
}
// Sleep the program for a short bit
wait(5);
}
// Reset SDL since the robot is no longer being actuated.
SDL_JoystickClose(joy);
// We get here only if the joystick has been disconnected.
std::cout << "Gamepad disconnected.\n" << std::endl;
}
// The program then completes.
return 0;
}
The most important part of that huge block of code is lines 129 to 179. I was doing more fooling around trying to get key capture to work but I could not get a response. Everywhere else is logic for the joystick reading (which has worked for me flawlessly). I have been referring to this link for all of the macros available to the programmer. I have not been able to capture the left control button or the right control button. I have also been trying the arrow keys for kicks as well and those are not working either. I know there are remnants of other code snippets thrown in there as I was testing. Given all of my testing, I am just not sure how to capture any keyboard keys, let alone Ctrl+c. None of my desired print statements print.
I am able to run the code on Ubuntu 1804 LTS with the stock GUI manager and window manager. I have a feeling the problem might also have something to do with the operating system not letting SDL2 capture the keyboard, but I don't know what to do to allow only the keyboard or certain keys to be consumed by SDL2.
I am trying to not use platform-specific code since I already have successfully used platform-specific signal interrupts. My goal is to simply make a certain combination of depressed keys result in a program terminating. I figured that, since SDL2 can access all keys on a keyboard, that I should be able to simply
Unless you want to read keyboard input from stdin you need to open a window and focus it to get key events in SDL. Here's an example (note the call to SDL_Init uses SDL_INIT_VIDEO and there's some code in there for rendering a background and handling resize events).
#include <iostream>
#include <SDL2/SDL.h>
int main(int argc, char** argv)
{
if (SDL_Init(SDL_INIT_VIDEO) < 0) { // also initialises the events subsystem
std::cout << "Failed to init SDL.\n";
return -1;
}
SDL_Window *window = SDL_CreateWindow(
"Window", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
680, 480, SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if(!window) {
std::cout << "Failed to create window.\n";
return -1;
}
// Create renderer and select the color for drawing.
SDL_Renderer* renderer = SDL_CreateRenderer(window, -1, 0);
SDL_SetRenderDrawColor(renderer, 200, 200, 200, 255);
while(true)
{
// Clear the entire screen and present.
SDL_RenderClear(renderer);
SDL_RenderPresent(renderer);
SDL_Event event;
while (SDL_PollEvent(&event))
{
if (event.type == SDL_QUIT) {
SDL_Quit();
return 0;
}
if(event.type == SDL_WINDOWEVENT) {
if (event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED) {
int width = event.window.data1;
int height = event.window.data2;
std::cout << "resize event: " << width << "," << height << std::endl;
}
}
if (event.type == SDL_KEYDOWN) {
int key = event.key.keysym.sym;
if (key == SDLK_ESCAPE) {
SDL_Quit();
return 0;
}
std::cout << "key event: " << key << std::endl;
}
}
}
return 0;
}
Key events are sent to the currently focused window and SDL uses the underlying OS to handle this. E.g. In Linux this means SDL calls X11 functions.
edit:
As detailed in this question it appears you can also get a snapshot of the state of the keys. In either case I think a window needs to be opened to receive events even though I've edited this multiple times to come to that conclusion (apologies if that caused any confusion). Your OS may provide functions for polling the state of the keyboard without using events or windows, such as GetAsyncKeyState in Windows.
I am trying to make a program that uses servos connected to an arduino that follows your face. I'm using visual c++ in visual studio 2017 with opencv 4.1 to do the facial recognition, then sending the location of the identified face to an arduino via serial connection.
I'm new to opencv so I've been going through many tutorials to try to make a code that works.
The plan is to do the facial recognition then calculate the location of the face in visual studio. Then combine the x and y locations to a single string and send it to the arduino with serial. The arduino then splits the coordinates, separated by a colon, using strtok(). It then will move servos accordingly to keep the tracked face in center screen.
I have tested the serial comm in a separate c++ project and it seems to work fine with the current arduino code (not anywhere near finished because I ran into a hiccup wiyh serial comm) except when I send the location string, the servo moves to the desired spot then returns to its starting location.
When I try to implement the serial communication in the project with opencv, it sends the location once, then appears to stop sending serial commands. I've tried debugging by manually, by calling the sendSerial function in other locations, to see if I can get it to send. I've tried looking around for solutions but haven't found any definite solutions other than it may be the waitKey(10) function. If this is so, is there a way around this?
Thanks.
###############SerialPort.h##############
#ifndef SERIALPORT_H
#define SERIALPORT_H
#define ARDUINO_WAIT_TIME 2000
#define MAX_DATA_LENGTH 255
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
class SerialPort
{
private:
HANDLE handler;
bool connected;
COMSTAT status;
DWORD errors;
public:
SerialPort(char *portName);
~SerialPort();
int readSerialPort(char *buffer, unsigned int buf_size);
bool writeSerialPort(char *buffer, unsigned int buf_size);
bool isConnected();
};
#endif // SERIALPORT_H
#################SerialSource.cpp##################
#include "SerialPort.h"
SerialPort::SerialPort(char *portName)
{
this->connected = false;
this->handler = CreateFileA(static_cast<LPCSTR>(portName),
GENERIC_READ | GENERIC_WRITE,
0,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (this->handler == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_FILE_NOT_FOUND) {
printf("ERROR: Handle was not attached. Reason: %s not available\n", portName);
}
else
{
printf("ERROR!!!");
}
}
else {
DCB dcbSerialParameters = { 0 };
if (!GetCommState(this->handler, &dcbSerialParameters)) {
printf("failed to get current serial parameters");
}
else {
dcbSerialParameters.BaudRate = CBR_9600;
dcbSerialParameters.ByteSize = 8;
dcbSerialParameters.StopBits = ONESTOPBIT;
dcbSerialParameters.Parity = NOPARITY;
dcbSerialParameters.fDtrControl = DTR_CONTROL_ENABLE;
if (!SetCommState(handler, &dcbSerialParameters))
{
printf("ALERT: could not set Serial port parameters\n");
}
else {
this->connected = true;
PurgeComm(this->handler, PURGE_RXCLEAR | PURGE_TXCLEAR);
Sleep(ARDUINO_WAIT_TIME);
}
}
}
}
SerialPort::~SerialPort()
{
if (this->connected) {
this->connected = false;
CloseHandle(this->handler);
}
}
int SerialPort::readSerialPort(char *buffer, unsigned int buf_size)
{
DWORD bytesRead;
unsigned int toRead = 0;
ClearCommError(this->handler, &this->errors, &this->status);
if (this->status.cbInQue > 0) {
if (this->status.cbInQue > buf_size) {
toRead = buf_size;
}
else toRead = this->status.cbInQue;
}
if (ReadFile(this->handler, buffer, toRead, &bytesRead, NULL)) return bytesRead;
return 0;
}
bool SerialPort::writeSerialPort(char *buffer, unsigned int buf_size)
{
DWORD bytesSend;
if (!WriteFile(this->handler, (void*)buffer, buf_size, &bytesSend, 0)) {
ClearCommError(this->handler, &this->errors, &this->status);
return false;
}
else return true;
}
bool SerialPort::isConnected()
{
return this->connected;
}
###################faceDetect.cpp################
// CPP program to detects face in a video
// Include required header files from OpenCV directory
#include <opencv2/objdetect.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <string>
#include <stdlib.h>
#include "SerialPort.h"
#include <sstream>
#include <iomanip>
using namespace std;
using namespace cv;
//Set up serial comm
char output[MAX_DATA_LENGTH];
char port[] = "\\\\.\\COM3";
char incoming[MAX_DATA_LENGTH];
// Function for Face Detection
void detectAndDraw(Mat& img, CascadeClassifier& cascade, double scale, SerialPort arduino);
string cascadeName;
// Function for sending locations to arduino
void sendSerial(string locations, SerialPort arduino);
int main(int argc, const char** argv)
{
//Establish connection to serial
SerialPort arduino(port);
if (arduino.isConnected()) {
cout << "COnnection Established" << endl;
}
else {
cout << "Error in port name" << endl;
}
// VideoCapture class for playing video for which faces to be detected
VideoCapture capture;
Mat frame, image;
// PreDefined trained XML classifiers with facial features
CascadeClassifier cascade;
double scale = 1;
// Change path before execution
cascade.load("C:/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml");
// Start Video..1) 0 for WebCam 2) "Path to Video" for a Local Video
capture.open(CAP_MSMF);
//sendSerial("400:100", arduino);
if (capture.isOpened())
{
// Capture frames from video and detect faces
cout << "Face Detection Started...." << endl;
while (1)
{
capture >> frame;
if (frame.empty())
break;
Mat frame1 = frame.clone();
detectAndDraw(frame1, cascade, scale, arduino);
char c = (char)waitKey(10);
// Press q to exit from window
if (c == 27 || c == 'q' || c == 'Q')
break;
}
}
else
cout << "Could not Open Camera";
return 0;
}
void sendSerial(string locations, SerialPort arduino) {
//string command;
//command = to_string(xloc);
cout << locations << endl;
char *charArray = new char[locations.size() + 1];
copy(locations.begin(), locations.end(), charArray);
charArray[locations.size()] = '\n';
arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
//arduino.readSerialPort(output, MAX_DATA_LENGTH);
//cout << output;
delete[] charArray;
//
//command = to_string(yloc);
//copy(command.begin(), command.end(), charArray);
//charArray[command.size()] = '\n';
//arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
////arduino.readSerialPort(output, MAX_DATA_LENGTH);
////cout << output;
//delete[] charArray;
}
void detectAndDraw(Mat& img, CascadeClassifier& cascade,
double scale, SerialPort arduino)
{
vector<Rect> faces;
Mat gray, smallImg;
cvtColor(img, gray, COLOR_BGR2GRAY); // Convert to Gray Scale
double fx = 1 / scale;
// Resize the Grayscale Image
resize(gray, smallImg, Size(), fx, fx, INTER_LINEAR);
equalizeHist(smallImg, smallImg);
// Detect faces of different sizes using cascade classifier
cascade.detectMultiScale(smallImg, faces, 1.1,
2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Draw circles around the faces
for (size_t i = 0; i < faces.size(); i++)
{
Rect r = faces[i];
Mat smallImgROI;
int x = faces[i].x;
int y = faces[i].y;
int h = y + faces[i].height;
int w = x + faces[i].width;
int centerX = x + (.5* faces[i].width);
int centerY = y + (.5* faces[i].height);
if (abs(320 - centerX) <= 50) {
if (abs(240 - centerY) <= 50) {
rectangle(img,
Point(x, y),
Point(w, h),
Scalar(0, 0, 275),
2,
8,
0);
}
}
else {
rectangle(img,
Point(x, y),
Point(w, h),
Scalar(275, 275, 275),
2,
8,
0);
}
stringstream stringX;
stringstream stringY;
stringX << std::setw(3) << std::setfill('0') << centerX;
stringY << std::setw(3) << std::setfill('0') << centerY;
std::stringstream ss;
//ss << std::setw(3) << std::setfill('0') << centerX << ":"<< centerY;
//std::string s = ss.str();
std::string s = stringX.str() + ":" + stringY.str();
//cout << s << endl;
sendSerial(s, arduino);
smallImgROI = smallImg(r);
if (arduino.isConnected()) {
cout << "COnnection Established" << endl;
//sendSerial("400:100", arduino);
}
}
// Show Processed Image with detected faces
imshow("Face Detection", img);
}
#####################arduino code################
#include <Servo.h>
String input;
char array[6];
char *strings[3];
char *ptr = NULL;
int xloc;
int yloc;
int hServoPin = 9;
Servo hServo;
int ledPin = 13;
void setup() {
//set up servos
hServo.attach(hServoPin);
//start serial connection
Serial.begin(9600);
//***** delete later *****
pinMode(ledPin, OUTPUT);
}
void loop() {
if(Serial.available()){
//grab "xloc:yloc" and convert to char array
input = Serial.readStringUntil('\n');
//delete later
//Serial.print("input; ");
//Serial.println(input);
for(int i = 0; i<6; i++){
array[i] = input.charAt(i);
//Serial.print(array[i]);
}
//split char array into two entities
byte index = 0;
ptr = strtok(array, ":;"); // takes a list of delimiters
while(ptr != NULL)
{
strings[index] = ptr;
index++;
ptr = strtok(NULL, ":;"); // takes a list of delimiters
//Serial.println("loop");
}
//set xloc and yloc respectively
xloc = atoi(strings[0]);
yloc = atoi(strings[1]);
}
if((xloc < 214)){
hServo.write(0);
delay(100);
}
else if((xloc > 214) && (xloc < 328)){
hServo.write(90);
delay(100);
}
else if((xloc > 328)){
hServo.write(180);
delay(100);
}
}
im reading a udp-mjpeg-stream with the ffmpeg-API. When i read and display the Stream with an ARM-Processor i have 2 Problems:
1- The Applikation is too slow and there is a big delay between network cam and displayed video.
2- the memory usage increases every time when i call the function av_read_frame().
The Source code
const char *cam1_url = "udp://192.168.1.1:1234";
AVCodec *pCodec;
AVFrame *pFrame, *pFrameRGB;
AVCodecContext *pCodecCon;
AVDictionary *pUdpStreamOptions = NULL;
AVInputFormat *pMjpegFormat = av_find_input_format("mjpeg");
av_dict_set(&pUdpStreamOptions, "fifo_size", "5000000", 0);
av_register_all();
avdevice_register_all();
avcodec_register_all();
avformat_network_init();
AVFormatContext *pFormatCont = avformat_alloc_context();
if(avformat_open_input(&pFormatCont,cam1_url,pMjpegFormat,&pUdpStreamOptions) < 0)
{
cout << "!! Error !! - avformat_open_input(): failed to open input URL" << endl;
}
if(avformat_find_stream_info(pFormatCont,NULL) < 0)
{
cout << "!! Error !! - avformat_find_stream_info(), Failed to retrieve stream info" << endl;
}
av_dump_format(pFormatCont, 0, cam1_url, 0);
int videoStream;
for(int i=0; i< pFormatCont->nb_streams; i++)
{
if(pFormatCont->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
cout << " videoStream = " << videoStream << endl;
}
}
pCodecCon = pFormatCont->streams[videoStream]->codec;
pCodec = avcodec_find_decoder(pCodecCon->codec_id);
if(NULL == pCodec)
{
cout << "couldnt find codec" << endl;
return EXIT_FAILURE;
}
if(avcodec_open2(pCodecCon,pCodec,NULL) < 0)
{
cout << "!! Error !! - in avcodec_open2()" << endl;
return EXIT_FAILURE;
}
uint8_t *frameBuffer;
int numRxBytes = 0;
AVPixelFormat pFormat =AV_PIX_FMT_BGR24;
int width_rgb = (int)((float)pCodecCon->width);
int height_rgb = (int)((float)pCodecCon->height);
numRxBytes = avpicture_get_size(pFormat,width_rgb,height_rgb);
frameBuffer = (uint8_t *) av_malloc(numRxBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrameRGB, frameBuffer, pFormat,width_rgb,height_rgb);
AVPacket rx_pkt; // received packet
int frameFinished = 0;
struct SwsContext *imgConvertCtx;
av_init_packet(&rx_pkt);
while(av_read_frame(pFormatCont, &rx_pkt) >= 0)
{
if(rx_pkt.stream_index == videoStream)
{
av_frame_free(&pFrame);
pFrame = av_frame_alloc();
av_frame_free(&pFrameRGB);
pFrameRGB = av_frame_alloc();
avcodec_decode_video2(pCodecCon, pFrame, &frameFinished,&rx_pkt);
if(frameFinished)
{
imgConvertCtx = sws_getCachedContext(NULL, pFrame->width,pFrame->height, AV_PIX_FMT_YUVJ420P,width_rgb,height_rgb,AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL,NULL);
sws_scale(imgConvertCtx, ((AVPicture*)pFrame)->data, ((AVPicture*)pFrame)->linesize, 0, pCodecCon->height, ((AVPicture *)pFrameRGB)->data, ((AVPicture *)pFrameRGB)->linesize);
av_frame_unref(pFrame);
av_frame_unref(pFrameRGB);
}
}
av_free_packet(&rx_pkt);
av_packet_unref(&rx_pkt);
}
//cvDestroyWindow("Cam1Video");
av_free_packet(&rx_pkt);
avcodec_close(pCodecCon);
av_free(pFrame);
av_free(pFrameRGB);
avformat_close_input(&pFormatCont);
I have read, the reason could be that the ffmpeg-Libs saves the incomming frames in the cache but the arm-processor isnt fast enough to process them. After like 4 minutes the system craches.
How could i solve the Problem.
one option could be to tell ffmpeg to act as frame grabber, also to read frames in real time, with the flag "-re". How can i set this Flag in the c++ source code. Or can anybody help me to solve that Problem.
Thank you very much