Debug glibc free(): invalid pointer - c++

I'm trying to debug code that eventually throws
*** glibc detected *** ./build/smonitor: free(): invalid pointer:
Its challenging because I don't use free... I've seen the other SO posts that have examples replicating the issue.. I need help on how to debug. First off, I'm a C/C++ n00b so my pointer skills are in-development but I'm not doing much dynamic memory allocation (I think).
I'm starting to write my own 'security' application where I take snapshots from cameras and write them to a NFS share, I'll eventually have a display of each camera's snapshot. Right now, I take captures from 1 camera and cycle them through my display window (using opencv). I can run for a while (~hour) before I get the core dump. I create a thread to run the window, I should loop until my run flag gets set to false and then I call cvReleaseImage.. I have no idea why this is failing, any guidance is greatly appreciated!
// will be replaced with camera X filename on NFS share
std::string generate_filename()
{
static const char alphanum[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
std::string filename = "";
std::stringstream ss;
for (int i = 0; i < 10; i++)
{
ss << alphanum[rand() % (sizeof(alphanum) - 1)];
}
ss << ".jpg";
printf("Generated filename: %s\n", ss.str().c_str());
return ss.str();
}
std::string generate_file_path()
{
std::stringstream ss;
ss << CAPTURES_PATH << generate_filename();
return ss.str();
}
void capture_photo(std::string& filepath)
{
time_t now;
time_t end;
double seconds;
bool cancelCapture = false;
int count = 0;
CvCapture* capture = cvCreateCameraCapture(0);
printf(“Opened camera capture\n");
IplImage* frame;
while(1)
{
frame = cvQueryFrame(capture);
if (!frame)
{
fprintf(stderr, "Could not read frame from video stream\n\n");
} else
{
cvShowImage(WINDOW, frame);
cvWaitKey(100);
if (get_snapshot_enabled()) // simulate delay between snapshots
{
filepath = generate_file_path();
printf("Saving image\n");
cvSaveImage(filepath.c_str(), frame);
break;
}
}
}
printf("Ending camera capture\n");
cvReleaseCapture(&capture);
}
void* manage_window(void* arg)
{
time_t now;
time_t end;
double seconds = 0;
double stateSec;
int i = 0;
int rem = 0;
IplImage* images[10];
time_t lastUpdate;
time_t tDiff; // time diff
cvNamedWindow(WINDOW, CV_WINDOW_FREERATIO);
cvSetWindowProperty(WINDOW, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
std::string filepath;
time(&now);
int lastPos = 0;
while (1)
{
if (get_snapshot_enabled())
{
write_console_log("Going to capture photo\n");
// camera was selected
filepath = generate_file_path();
printf("Generated filepath: %s\n", filepath.c_str());
capture_photo(filepath);
if (!filepath.empty())
{
printf("Received filepath: %s\n", filepath.c_str());
time(&now);
images[lastPos] = cvLoadImage(filepath.c_str());
cvShowImage(WINDOW, images[lastPos]);
cvWaitKey(100);
if (lastPos == 10) lastPos = 0;
else lastPos++;
}
}
time(&end);
seconds = difftime(end, now);
if (seconds >= 5)
{
cvShowImage(WINDOW, images[ i % 10])
cvWaitKey(100);
i++;
time(&now);
}
// check if we're running
if (!get_running())
{
// log some error we're not running...
write_logs("Window thread exiting, not running...");
break;
}
}
for (i=0; i < 10; i++)
cvReleaseImage(&images[i]);
pthread_exit(NULL);
}

In void* manage_window(void* arg) there are lines
IplImage* images[10];
and
images[lastPos] = cvLoadImage(filepath.c_str());
if (lastPos == 10) lastPos = 0;
else lastPos++;
where lastPos can be incremented to 10, leading to
images[10] = cvLoadImage(filepath.c_str());
i.e. invalid write beyond the end of array. I think something like valgrind would have detected this.

Related

std::thread and ros::ok() do not work in ROS

I have a function that is executing by std::thread. I want it works until the user closes the terminal that running roscore by pressing Ctrl+C. Because of that I use this inside the thread:
void publish_camera_on_topic(std::vector<Camera> cameras, const std::vector<ros::Publisher> publishers, const int camera_index)
{
int frameSize;
BYTE *imagePtr;
// frame id
int frame_id = 0;
cv_bridge::CvImage img_bridge;
sensor_msgs::Image img_msg;
while (ros::ok()) {
// Grab and display a single image from each camera
imagePtr = cameras[camera_index].getRawImage();
frameSize = cameras[camera_index].getFrameSize();
cameras[camera_index].createRGBImage(imagePtr,frameSize);
unsigned char* pImage = cameras[camera_index].getImage();
if (NULL != pImage) {
Mat image(cameras[camera_index].getMatSize(), CV_8UC3, pImage, Mat::AUTO_STEP);
// release asap
cameras[camera_index].releaseImage();
//cvtColor(image, image, CV_BGR2RGB,3);
// publish on ROS topic
std_msgs::Header header; // empty header
header.seq = frame_id; // user defined counter
header.stamp = ros::Time::now(); // time
img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::RGB8, image);
img_bridge.toImageMsg(img_msg); // from cv_bridge to sensor_msgs::Image
publishers[camera_index].publish(img_msg); // ros::Publisher pub_img = node.advertise<sensor_msgs::Image>("topic", queuesize);
}
// increase frame Id
frame_id = frame_id + 1;
}
std::cout << "ROS closing for thread of camera " << camera_index << " recieved." << std::endl;
}
Also, I create thread like this:
// image publisher
// for each camera create an publisher
std::vector<ros::Publisher> publishers;
for (size_t i = 0; i < cameras.size(); i++) {
char topic_name[200];
sprintf(topic_name, "/lumenera_camera_package/%d", i + 1);
publishers.push_back(nh.advertise<sensor_msgs::Image>(topic_name, 10));
}
// work with each camera on a seprate thread
std::vector<std::thread> thread_vector;
for(size_t i=0; i < cameras.size(); i++) {
thread_vector.push_back(std::thread(publish_camera_on_topic, cameras, publishers, i));
}
ros::spin();
std::for_each(thread_vector.begin(), thread_vector.end(), [](std::thread &t){t.join(); });
for(size_t i=0; i < cameras.size(); i++) {
cameras[i].stopStreaming();
}
ROS_INFO("Node: [lumenera_camera_node] has been Ended.");
However, when I press Ctrl+C in the terminal and stop the roscore, the threads keep running, and the value of ros::ok() does not change.
The problem is solved. The issue is ros::ok() does not check for ROS master. Instead of this line:
while (ros::ok()) { //do sth}
This line should be used:
while (ros::ok() && ros::master::check()) { // do sth}

Segmentation Fault(core dumped) when invoking execute() of an ExecutionContext object in TensorRT-8.4

My colleague built and serialized a TenosrRT engine. She handed me the serialized engine and an input image in two binary files. The image file contains 640x480x3 float numbers. In the following code snippet, I load the engine and the image data for inference, but when execute() method is called, segmentation fault occurs. I geuss there might be something wrong with the way I create buffers that causes the error. Any idea on how I can solve this?
#define OUTPUT_SIZE 1000
#define BATCH_SIZE 1
#define IMAGE_WIDTH 640
#define IMAGE_HEIGHT 480
bool InitAndInfer(std::string engine_path,std::string image_path)
{
std::ifstream trt_stream(engine_path, std::ifstream::binary | std::ifstream::in);
if (trt_stream.is_open()) {
trt_stream.seekg(0, trt_stream.end);
int size = trt_stream.tellg();
trt_stream.seekg(0, trt_stream.beg);
char *trtModel = new char[size];
trt_stream.read(trtModel, size);
trt_stream.close();
TensorLogger logger;
nvinfer1::IRuntime *runtime = createInferRuntime(logger);
nvinfer1::ICudaEngine *engine = runtime->deserializeCudaEngine(trtModel, size);
if (engine == nullptr) {
std::cout<<"Engine init failed\n";
return false;
}
std::cout<<"Ready to create context\n";
nvinfer1::IExecutionContext *context = engine->createExecutionContext();
if (context = nullptr) {
std::cout<<"Engine created failed!\n";
return false;
}
std::cout<<"Context created\n";
int binding_indecis = engine->getNbBindings();
std::cout<<"Engine binding num = "<<binding_indecis<<std::endl;
std::string binding_names[binding_indecis];
for (int i = 0; i < binding_indecis; i++) {
binding_names[i] = engine->getBindingName(i);
std::cout<<"The binding name for "<<i<<" is "<<binding_names[i]<<std::endl;
}
std::cout<<"Binding indecis got\n";
float image_data[640*480*3];
int image_pos{0};
std::ifstream image_stream(image_path, std::ifstream::binary | std::ifstream::in);
while(true) {
std::string line;
if (std::getline(image_stream, line)) {
std::stringstream ss(line);
while(ss>>image_data[image_pos]) {
image_pos++;
}
} else {
break;
}
}
std::cout<<image_pos<<" float num has been read\n";
void *buffers[4];
if (cudaSuccess != cudaMalloc(&buffers[0], 3*BATCH_SIZE*IMAGE_HEIGHT*IMAGE_WIDTH*sizeof(float)))
std::cout<<"Error when cudaMalloc for buffer[0]\n";
if (cudaSuccess != cudaMalloc(&buffers[1], BATCH_SIZE*OUTPUT_SIZE*sizeof(float)))
std::cout<<"Error when cudaMalloc for buffer[1]\n";
if (cudaSuccess != cudaMalloc(&buffers[2], BATCH_SIZE*100000000*sizeof(float)))
std::cout<<"Error when cudaMalloc for buffer[2]\n";
if (cudaSuccess != cudaMalloc(&buffers[3], BATCH_SIZE*100000000*sizeof(float)))
std::cout<<"Error when cudaMalloc for buffer[3]\n";
if (cudaSuccess != cudaMemcpy(buffers[0], image_data, 3*BATCH_SIZE*IMAGE_HEIGHT*IMAGE_WIDTH*sizeof(float), cudaMemcpyHostToDevice))
std::cout<<"Error when cudaMemcpy\n";
std::cout<<"Engine max batch size="<<engine->getMaxBatchSize()<<std::endl;
std::cout<<"Ready to infer\n";
context->execute(BATCH_SIZE, buffers);
std::cout<<"Infer finished\n";
char *result = static_cast<char*>(buffers[1]);
std::cout<<result<<std::endl;
} else {
std::cout<<"Can't open trt engine file - "<<engine_path<<std::endl;
return false;
}
return true;
}
And the runtime log is put below:

x264 output video is blank when threading is enabled

I am using libx264 compiled from source. It was configured to get both .dll and .lib by this command
./configure --disable-cli --enable-shared --extra-ldflags=-Wl,--output-def=libx264.def`
I am using the libx264 API in my screen-sharing program with the preset - "veryfast", tune - "zerolatency", profile - "high" and also the following settings.
param.i_csp = X264_CSP_BGRA;
param.i_threads = 1;
param.i_width = width;
param.i_height = height;
param.i_fps_num = fps;
param.i_fps_den = 1;
param.rc.i_bitrate = bitrate;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
For these settings the program works fine. I specified it as single threaded by setting param.i_threads = 1.
If this is removed, x264 defaults to using multiple threads and sets param.i_threads as 1.5x of number of cores in the CPU automatically. This will give faster performance than running in single thread.
But when I remove the param.i_threads = 1 to make it multi-threaded, the generated output is fully grey. I cannot see any output when I view the live stream with VLC or some times I can view a weird output.
I am using this bitmap image as an example (https://imgur.com/a/l8LCd1l). Only this same image is being encoded multiple times. When it is saved into .h264 video, it is viewable clearly. But when the encoded payload is sent through rtmp, the live stream produces very bad and weird output (or sometimes no output). This is the weird output which im seeing most of the time for this image: https://imgur.com/a/VdyX1Zm
This is the full example code in which I am both streaming and writing video file of the same picture. This is using the srs librtmp library. There is no error but the stream has weird output.
In this code if you set add param.i_threads = 1; then only the output stream will be viewable. The problem is that it should be viewable in both single-threaded and multi-threaded encoding.
#include <iostream>
#include <stdio.h>
#include <sstream>
#include <x264.h>
#include "srs_librtmp.h"
#pragma comment(lib, "C:/Softwares/x264/libx264.lib")
using namespace std;
int check_ret(int ret);
int main()
{
int dts = 0;
x264_param_t param;
x264_t* h;
x264_nal_t* nals;
int i_nal;
int pts = 0;
int i_frame_size;
x264_picture_t picIn;
x264_picture_t picOut;
x264_param_default_preset(&param, "veryfast", "zerolatency");
//x264 settings
param.i_csp = X264_CSP_BGRA;
param.i_width = 1920;
param.i_height = 1080;
param.i_fps_num = 30;
param.i_fps_den = 1;
param.rc.i_bitrate = 2500;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
x264_param_apply_profile(&param, "high");
h = x264_encoder_open(&param);
//allocate picture
x264_picture_alloc(&picIn, param.i_csp, param.i_width, param.i_height);
//picture settings
picIn.img.i_plane = 1;
picIn.img.i_stride[0] = 4 * param.i_width;
picIn.i_type = X264_TYPE_AUTO;
int header_size = x264_encoder_headers(h, &nals, &i_nal);
FILE* fptr;
fopen_s(&fptr, "example1.h264", "wb");
// write sps and pps in the video file
fwrite(nals->p_payload, header_size, 1, fptr);
int size = 1920 * 1080 * 4;
char* bmp = new char[size];
FILE* bitptr;
errno_t err = fopen_s(&bitptr, "flower.bmp", "rb");
fseek(bitptr, 54, SEEK_SET);
fread(bmp, size, 1, bitptr);
fclose(bitptr);
srs_rtmp_t rtmp = srs_rtmp_create("127.0.0.1:1935/live/test");
if (srs_rtmp_handshake(rtmp) != 0)
{
std::cout << "Simple handshake failed.";
return -1;
}
std::cout << "Handshake completed successfully.\n";
if (srs_rtmp_connect_app(rtmp) != 0) {
std::cout << "Connecting to host failed.";
return -1;
}
std::cout << "Connected to host successfully.\n";
if (srs_rtmp_publish_stream(rtmp) != 0) {
std::cout << "Publish signal failed.";
}
std::cout << "Publish signal success\n";
// write sps and pps in the live stream
int ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nals->p_payload), header_size, 0, 0);
ret = check_ret(ret);
if (!ret)
return -1;
std::cout << "SPS and PPS sent.\n";
// main loop
std::cout << "Now streaming and encoding\n";
int i = 1800;
while (i--)
{
picIn.img.plane[0] = reinterpret_cast<uint8_t*>(bmp);
picIn.i_pts = pts++;
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, &picIn, &picOut);
if (i_frame_size)
{
for (int j = 0; j < i_nal; j++)
{
x264_nal_t* nal = nals + j;
// write data in the video file
fwrite(nal->p_payload, nal->i_payload, 1, fptr);
// write data in the live stream
ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nal->p_payload), nal->i_payload, dts, dts);
ret = check_ret(ret);
if (!ret)
{
return -1;
}
}
}
else
{
std::cout << "i_frame_size = 0 (encoder failed)\n";
}
dts += 33;
}
while (x264_encoder_delayed_frames(h))
{
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, NULL, &picOut);
if (i_frame_size)
{
fwrite(nals->p_payload, i_frame_size, 1, fptr);
}
}
std::cout << "\nAll done\n";
std::cout << "Output video is example1.h264 and it is viewable in VLC";
return 0;
}
int check_ret(int ret)
{
if (ret != 0) {
if (srs_h264_is_dvbsp_error(ret)) {
srs_human_trace("ignoring drop video error, code=%d", ret);
}
else if (srs_h264_is_duplicated_sps_error(ret)) {
srs_human_trace("ignoring duplicated sps, code=%d", ret);
}
else if (srs_h264_is_duplicated_pps_error(ret)) {
srs_human_trace("ignoring duplicated pps, code=%d", ret);
}
else {
srs_human_trace("sending h264 raw data failed. ret=%d", ret);
return 0;
}
}
return 1;
}
If you would like to download the original flower.bmp file, here is the link: https://gofile.io/d/w2kX56
This error can be reproduced in any other bmp file also.
Please tell me what is causing this problem when multi-threading is enabled. Am I setting wrong values? Is the code in which I am streaming the encoded data wrong?

Ueye Camera snapshot is White using Qt

I made a code in c++ to take a snapshot with a Ueye camera, however the picture that gets saved is just white, in saying that sometimes to can see a tiny bit of the road but still just white. I believe it’s an issue with an auto parameter but I feel as if I have tried everything.
Below is my code:
void MainWindow::CaptureImage(){
int initcamera = is_InitCamera(&hCam, hWndDisplay);
if(initcamera != IS_SUCCESS)
{
cout<<endl<<"Failed to initialize the camera"<<endl;
exit(-1);
}else{
cout<<endl<<"Initialized Camera"<<endl;
}
int camerainfo = is_GetCameraInfo (hCam, &camera_info);
if(camerainfo != IS_SUCCESS)
{
cout<<endl<<"Unable to acquire camera information"<<endl;
exit(-1);
}else{
cout<<endl<<"Camera information required"<<endl;
}
int sensorinfo = is_GetSensorInfo (hCam, &sInfo);
if(sensorinfo != IS_SUCCESS)
{
cout<<endl<<"Unable to acquire sensor information"<<endl;
exit(-1);
}else{
cout<<endl<<"Sensor information acquired"<<endl;
}
int colormode = is_SetColorMode(hCam, IS_CM_BGR8_PACKED);
if(colormode != IS_SUCCESS)
{
cout<<endl<<"Unable to set the color mode"<<endl;
exit(-1);
}else{
cout<<endl<<"Color mode set"<<endl;
}
int pXPos = (sInfo.nMaxWidth);
int pYPos = (sInfo.nMaxHeight);
int rit = is_AllocImageMem (hCam,pXPos,pYPos, 24, &m_pcImageMemory, &m_lMemoryId);
if(rit != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO INITIALIZE MEMORY"<<endl;
system("PAUSE");
exit(-1);
}else{
cout<<endl<<"INITIALIZED MEMORY"<<endl;
}
int rat = is_SetImageMem (hCam, m_pcImageMemory, m_lMemoryId);
if(rat != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO ACTIVATE MEMORY"<<endl;
system("PAUSE");
exit(-1);
}else{
cout<<endl<<"ACTIVATE MEMORY"<<endl;
}
double strenght_factor = 1.0;
int colorcorrection = is_SetColorCorrection(hCam, IS_CCOR_ENABLE, &strenght_factor);
double pval = 1;
int whiteb = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &pval, 0);
double gval = 1;
int gains = is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &gval, 0);
int dummy;
char *pMem, *pLast;
IMAGE_FILE_PARAMS ImageFileParams;
ImageFileParams.pwchFileName = L"./TestImage.bmp"; /// <-- Insert name and location of the image
ImageFileParams.pnImageID = NULL;
ImageFileParams.ppcImageMem = NULL;
ImageFileParams.nQuality = 0;
ImageFileParams.nFileType = IS_IMG_BMP;
int sho = is_FreezeVideo(hCam, IS_WAIT);
if(sho != IS_SUCCESS)
{
cout<<endl<<"UNABLE TO ACQUIRE FROM THE CAMERA"<<endl;
system("PAUSE");
exit(-1);
}
if (sho == IS_SUCCESS){
int m_Ret = is_GetActiveImageMem(hCam, &pLast, &dummy);
int n_Ret = is_GetImageMem(hCam, (void**)&pLast);
}
if (is_ImageFile(hCam, IS_IMAGE_FILE_CMD_SAVE, (void*)&ImageFileParams, sizeof(ImageFileParams)) == IS_SUCCESS)
{
cout << endl << "An Image was saved" << endl;
}
else
{
cout << endl << "something went wrong" << endl;
}
// Releases an image memory that was allocated
//is_FreeImageMem(hCam, pcImageMemory, nMemoryId);
// Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
is_ExitCamera(hCam);
}
I have tried many things like the parameters below however it is still just white. I had the camera in the room which is darker and the image came out ok, so I defiantly think it’s due to the day light outside.
const wstring filenameU(filename.begin(), filename.end());
is_ParameterSet(hCam, IS_PARAMETERSET_CMD_LOAD_FILE,(void*) filenameU.c_str(), 0);
unsigned int range[3];
memset(range, 0, sizeof(range));
is_PixelClock(hCam, IS_PIXELCLOCK_CMD_GET_RANGE, (void*)range, sizeof(range));
unsigned int maximumPixelClock = range[1];
is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&maximumPixelClock, sizeof(maximumPixelClock));
double pval1 = auto_exposure, pval2 = 0;
double pval1 = 1, pval2 = 0;
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER,&pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_FRAMERATE,&pval1, &pval2);
is_SetAutoParameter(hCam, IS_SET_AUTO_WB_OFFSET, &pval1, &pval2);
int desiredFrameRate = 60;
is_SetFrameRate(hCam, desiredFrameRate, &m_actualFrameRate);
You cannot just take one picture because the exposure might be to high. You have to capture a few images to give the auto exposure control a chance to lower the exposure. The reason you have to capture some frames is that the AEC is done in software and can only start to change parameters if it gets some frames.

extract frame from a video for detection with opencv

i'm developing a program to detect object in video or image.
It works with the image, but now i want to use it with video. I use a specific folder to pick the image so i wanted to save frames from video in that folder before the detection.
The variable video and salvataggio are alredy setted.
In the following code i navigate throw the folder to analize the video:
DIR *dir;
dir = opendir(video.c_str());
string vidName;
struct dirent *ent;
if (dir != NULL) {
while ((ent = readdir (dir)) != NULL) {
vidName= ent->d_name;
if(vidName.compare(".")!= 0 && vidName.compare("..")!= 0)
{
//string vidPath(neg + vidName);
estraiframe(video, vidName, salvataggio);
}
}
closedir (dir);
}
else {
cout<<"directory "<< video << " not present"<<endl;
}
}
The function estraiframe save the frame in the output folder.
void estraiframe(string path, string vidName, string output){
string vidPath(path + vidName);
VideoCapture cap(vidPath);
if( !cap.isOpened()){
cout << "Cannot open the video file" << endl;
return;
}
double count = cap.get(CV_CAP_PROP_FRAME_COUNT);
double rate = cap.get(CV_CAP_PROP_FPS);
int counter = 0;
for (int i=1; i< count; i+=rate*5)
{
cap.set(CV_CAP_PROP_POS_FRAMES,i);
Mat frame;
cap.read(frame);
counter++;
string nomeframe = to_string(counter) + "-frame_from"+vidName+".jpg";
string percorso (output+nomeframe);
cout << percorso;
imwrite(percorso,frame);
}
}
Apparently it works but after the last frame it gave me the following error:
Assertion stream_index < ogg->nstreams failed at libavformat/oggdec.c:898
I locked for it but i didin't find where is the error
Your video contains various stream indexes, e.g. 3 audios, and 1 video stream would result in 4 stream indexes. I recommend that you check the amount of streams your video contains and print ogg->nstreams to check if this corresponds. Looking at the source code in oggdec.c at line 898
static int ogg_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + stream_index;
int ret;
av_assert0(stream_index < ogg->nstreams); /* Line 898 */
You're clearly going out of bounds here.
Apparently I manage to solve it, I modified the function that extract the frame.
void estraiframe(string path, string vidName, string output){
string vidPath(path + vidName);
VideoCapture cap(vidPath);
if( !cap.isOpened()){
cout << "Cannot open the video file" << endl;
return;
}
double rate = cap.get(CV_CAP_PROP_FPS);
int counter = 0;
Mat frame;
int i=1;
while(1)
{
cap.read ( frame);
if( frame.empty()) break;
counter++;
if (counter == rate*5*i){
i++;
string nomeframe = to_string(counter) + "-frame_from"+vidName+".jpg";
string percorso (output+nomeframe);
imwrite(percorso, frame);
}
char key = waitKey(10);
if ( key == 27) break;
}
}