I want to play video file, that i obtained from IP-camera (H264-codec) using libVLC and OpenCV, so i took the code from this post, then created project in VS 2010, and put my mp4 file into project folder ("5.mp4"). When i started it - i got this errors:
main error: open of `5.mp4' failed
main error: Your input can't be opened
main error: VLC is unable to open the MRL '5.mp4'. Check the log for details.
Here's my code:
#include <vlc/vlc.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
using namespace cv;
using namespace std;
struct VideoDataStruct {
int param;
};
int done = 0;
libvlc_media_player_t *mp;
unsigned int videoBufferSize = 0;
uint8_t *videoBuffer = 0;
void cbVideoPrerender(void *p_video_data, uint8_t **pp_pixel_buffer, int size) {
// Locking
if (size > videoBufferSize || !videoBuffer)
{
printf("Reallocate raw video buffer\n");
free(videoBuffer);
videoBuffer = (uint8_t *) malloc(size);
videoBufferSize = size;
}
*pp_pixel_buffer = videoBuffer;
}
void cbVideoPostrender(void *p_video_data, uint8_t *p_pixel_buffer, int width, int height, int pixel_pitch, int size, int64_t pts) {
// Unlocking
//CloseHandle(hMutex);
}
static void handleEvent(const libvlc_event_t* pEvt, void* pUserData) {
libvlc_time_t time;
switch(pEvt->type)
{
case libvlc_MediaPlayerTimeChanged:
time = libvlc_media_player_get_time(mp);
printf("MediaPlayerTimeChanged %lld ms\n", (long long)time);
break;
case libvlc_MediaPlayerEndReached:
printf ("MediaPlayerEndReached\n");
done = 1;
break;
default:
printf("%s\n", libvlc_event_type_name(pEvt->type));
}
}
int main(int argc, char* argv[]) {
// VLC pointers
libvlc_instance_t *inst;
libvlc_media_t *m;
void *pUserData = 0;
VideoDataStruct dataStruct;
// VLC options
char smem_options[1000];
// RV24
sprintf(smem_options
, "#transcode{vcodec=RV24}:smem{"
"video-prerender-callback=%lld,"
"video-postrender-callback=%lld,"
"video-data=%lld,"
"no-time-sync},"
, (long long int)(intptr_t)(void*)&cbVideoPrerender
, (long long int)(intptr_t)(void*)&cbVideoPostrender
, (long long int)(intptr_t)(void*)&dataStruct
);
const char * const vlc_args[] = {
"-I", "dummy", // Don't use any interface
"--ignore-config", // Don't use VLC's config
"--extraintf=logger", // Log anything
"--verbose=1", // Be verbose
"--sout", smem_options // Stream to memory
};
// We launch VLC
inst = libvlc_new(sizeof(vlc_args) / sizeof(vlc_args[0]), vlc_args);
/* Create a new item */
m = libvlc_media_new_location(inst, "5.mp4");
/* Create a media player playing environement */
mp = libvlc_media_player_new_from_media (m);
libvlc_event_manager_t* eventManager = libvlc_media_player_event_manager(mp);
libvlc_event_attach(eventManager, libvlc_MediaPlayerTimeChanged, handleEvent, pUserData);
libvlc_event_attach(eventManager, libvlc_MediaPlayerEndReached, handleEvent, pUserData);
libvlc_event_attach(eventManager, libvlc_MediaPlayerPositionChanged, handleEvent, pUserData);
libvlc_video_set_format(mp, "RV24", 1280, 720, 1280* 3 );
/* play the media_player */
libvlc_media_player_play (mp);
while(1)
{
if(videoBuffer) // Check for invalid input
{
// CV_8UC3 = 8 bits, 3 chanels
Mat img = Mat(Size(1280, 720), CV_8UC3, videoBuffer);
// cvtColor(img, img, CV_RGB2BGR);
namedWindow("Display window", WINDOW_AUTOSIZE); // Create a window for display.
imshow("Display window", img); // Show our image inside it.
}
}
libvlc_release (inst);
}
I guess, that is easy to fix, but i could't find any information at the Internet. I also tryied to put it into "C:\5.mp4", but i got the same errors. Thanks for any help.
EDIT:
Ok, so i fixed this issue, i need to put file:/// before "5.mp4", now my video is playing, but it looks like this:
EDIT02
Ok, with "*.avi" everything looks good, so i guess problem with this file - i recorded it frop IP-camera, using VLC, and saved it into *.mp4
Ok, so i found the problem, i made a mistake with video resolution, original resolution is 2560x1536, so when i put libvlc_video_set_format(mp,2560,1536,2560 *3); my picture looks fine, so after i pass it to OpenCV i could resize it.
Related
I am coding opencv source for playing video. I want to add trackBar and adjust video speed. But, The TrackBar does't move. and I can't focus the Video window. This is my code OpenCv C++. What should i do?
void onTrackbarSlide(int pos, void *){
sec = 1;
sec /= pos;
printf("pos = %d\n",pos);
}
int main(void)
{
strcat(InputFile, FileName); // input video file
//VideoCapture cap(0); // Create an object and open the default(0) camera. C++ Syntax: VideoCapture::VideoCapture(int device)
VideoCapture cap; // Class for video capturing from video files or cameras.
cap.open(InputFile); // Open video file or a capturing device for video capturing
if (!cap.isOpened()) { // Check if the file is opened sucessfully.
printf("\nFail to open a video file");
return -1;
}
// When querying a property that is not supported by the backend used by the VideoCapture class, value 0 is returned.
double fps = cap.get(CV_CAP_PROP_FPS); printf("\nCV_CAP_PROP_FPS = %f", fps);
double ToTalFrames = cap.get(CV_CAP_PROP_FRAME_COUNT); printf("\nCV_CAP_PROP_FRAME_COUNT = %f", fps);
CvSize FrameSize;
FrameSize.width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH);
FrameSize.height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT);
printf("\nWidth * Height = %d * %d\n", FrameSize.width, FrameSize.height);
VideoWriter wrt;
#define CODEC -1
namedWindow("original", 1);
int slider_position = 0;
int slider_max = 255;
createTrackbar("video speed", "original", &slider_position, slider_max, onTrackbarSlide);
wrt.open(OutputFile, CODEC, fps, FrameSize);
Mat frame, dst1;
Mat dst2;
for (int i = 1; i< ToTalFrames; i++)
{
cap.read(frame);
if (frame.data == NULL) { printf("\nNo image found!\n"); return(0); }
imshow("original", frame);
if( waitKey(sec /fps) == 0x1b ) break; // Break if key input is escape code.
}
return 0;
}
I'm having trouble displaying an image (PNG extracted with libpng) into an XCB window, it is always entirely empty/white. I'm pretty sure the PNG extraction is correct since I can perfectly re-write it into another file.
I've tried everything I found (explanations, guides, documentation) and I'm running out of ideas:
Creating an xcb_pixmap_t calling xcb_create_pixmap_from_bitmap_data() with the data taken from the PNG, then calling xcb_copy_area() into the EXPOSE part of the event loop.
Creating an xcb_image_t* calling xcb_image_create_from_bitmap_data() then trying to map it to the window with xcb_image_put(). I've even tried to display each pixel with xcb_image_put_pixel(), but without success.
Code sample:
xcb_pixmap_t pixmap = xcb_create_pixmap_from_bitmap_data(
connection, // xcb_connect(0, 0) (type: xcb_connection_t*)
window, // xcb_generate_id(connection) (type: xcb_window_t)
img.getData(), // uint8_t*
img.getWidth(), // 128
img.getHeight(), // 128
img.getBitDepth(), // 8
screen->black_pixel, // screen = xcb_setup_roots_iterator(xcb_get_setup(connection)).data (type: xcb_screen_t*)
screen->white_pixel,
nullptr);
// "img" is an instance of my own custom class, result of PNG reading
xcb_image_t* image = xcb_image_create_from_bitmap_data(
img.getData(),
img.getWidth(),
img.getHeight()); // image->data seems fine
xcb_image_put(connection,
window,
graphicsContext,
image, 0, 0, 0); // This does nothing
for (unsigned int i = 0; i < screen->height_in_pixels; ++i)
for (unsigned int j = 0; j < screen->width_in_pixels; ++j)
xcb_image_put_pixel(image, j, i, 0); // Displays nothing
[...]
// Into event loop
case XCB_EXPOSE: {
xcb_expose_event_t* exposeEvent = reinterpret_cast<xcb_expose_event_t*>(event);
xcb_copy_area(connection,
pixmap,
window,
graphicsContext,
exposeEvent->x, exposeEvent->y, // Top left x & y coordinates of the source's region to copy
exposeEvent->x, exposeEvent->y, // Top left x & y coordinates of the destination's region to copy to
exposeEvent->width,
exposeEvent->height);
xcb_flush(connection);
break;
}
From the examples I found I saw that it didn't need a colormap, but could that be the case? Could anyone tell me where I've gone wrong?
I threw together a simple xcb image viewer about 4 years ago, but just noticed this question, so apologies for the necromancy.
It uses xcb_image, stb_image and nanosvg, but compiles to a relatively small static binary (with a musl or uclibc toolchain)
#include <xcb/xcb.h>
#include <xcb/xcb_image.h>
#define STBI_NO_HDR
#define STBI_NO_LINEAR
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define NANOSVG_IMPLEMENTATION
#include "nanosvg.h"
#define NANOSVGRAST_IMPLEMENTATION
#include "nanosvgrast.h"
int main(int argc, char **argv){
xcb_connection_t *c = xcb_connect(0, 0);
xcb_screen_t *s = xcb_setup_roots_iterator(xcb_get_setup(c)).data;
int w, h, n,
depth = s->root_depth,
win_class = XCB_WINDOW_CLASS_INPUT_OUTPUT,
format = XCB_IMAGE_FORMAT_Z_PIXMAP;
xcb_colormap_t colormap = s->default_colormap;
xcb_drawable_t win = xcb_generate_id(c);
xcb_gcontext_t gc = xcb_generate_id(c);
xcb_pixmap_t pixmap = xcb_generate_id(c);
xcb_generic_event_t *ev;
xcb_image_t *image;
NSVGimage *shapes = NULL;
NSVGrasterizer *rast = NULL;
char *data = NULL;
unsigned *dp;
size_t i, len;
uint32_t mask = XCB_CW_BACK_PIXEL | XCB_CW_EVENT_MASK,
value_mask = XCB_EVENT_MASK_EXPOSURE | XCB_EVENT_MASK_BUTTON_PRESS,
values[] = { s->black_pixel, value_mask };
if (argc<2) return -1;
if ((data = stbi_load(argv[1], &w, &h, &n, 4)))
;
else if ((shapes = nsvgParseFromFile(argv[1], "px", 96.0f))) {
w = (int)shapes->width;
h = (int)shapes->height;
rast = nsvgCreateRasterizer();
data = malloc(w*h*4);
nsvgRasterize(rast, shapes, 0,0,1, data, w, h, w*4);
}else return -1;
for(i=0,len=w*h,dp=(unsigned *)data;i<len;i++) //rgba to bgra
dp[i]=dp[i]&0xff00ff00|((dp[i]>>16)&0xFF)|((dp[i]<<16)&0xFF0000);
xcb_create_window(c,depth,win,s->root,0,0,w,h,1,win_class,s->root_visual,mask,values);
xcb_create_pixmap(c,depth,pixmap,win,w,h);
xcb_create_gc(c,gc,pixmap,0,NULL);
image = xcb_image_create_native(c,w,h,format,depth,data,w*h*4,data);
xcb_image_put(c, pixmap, gc, image, 0, 0, 0);
xcb_image_destroy(image);
xcb_map_window(c, win);
xcb_flush(c);
while ((ev = xcb_wait_for_event(c))) {
switch (ev->response_type & ~0x80){
case XCB_EXPOSE: {
xcb_expose_event_t *x = (xcb_expose_event_t *)ev;
xcb_copy_area(c,pixmap,win,gc,x->x,x->y,x->x,x->y,x->width,x->height);
xcb_flush(c);
}break;
case XCB_BUTTON_PRESS: goto end;
default: break;
}
}
end:
xcb_free_pixmap(c, pixmap);
xcb_disconnect(c);
return 0;
}
I want to take images from my IP camera. I work in visual studio 2012.
At first, I've used openCV to connect to it. Here is my code.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp"
int main()
{
cv::VideoCapture vcap;
const std::string videoStreamAddress = "rtsp://admin:admin#192.168.0.120/snl/live/1/1/stream1.cgi";
if (!vcap.open(videoStreamAddress))
{
printf("camera is null\n");
return -1;
}
else
{
cv::Mat image;
cv::namedWindow("Video", CV_WINDOW_AUTOSIZE);
while(1)
{
vcap >> image;
imshow("Video", image);
if(cv::waitKey(10) == 99 ) break;
}
}
cv::waitKey(1000);
return 0;
}
It shows the stream. But some of its images have distortion. I think it is because of decoding error. The error is this:
[h264 # 00decfe0] error while decoding MB 37 22, bytestream <td>
To solve this error I write another code inspired from http://answers.opencv.org/question/65932/how-to-stream-h264-video-with-rtsp-in-opencv-partially-answered/ . It uses libVLC to get the stream. here is my second code.
#include "opencv2/opencv.hpp"
#include "vlc/libvlc.h"
#include "vlc/libvlc_media.h"
#include "vlc/libvlc_media_player.h"
#include "ctime"
#include "Windows.h"
#include "string"
struct ctx
{
IplImage* image;
HANDLE mutex;
uchar* pixels;
};
void *lock(void *data, void**p_pixels)
{
struct ctx *ctx = (struct ctx*)data;
WaitForSingleObject(ctx->mutex, INFINITE);
*p_pixels = ctx->pixels;
return NULL;
}
void display(void *data, void *id){
(void) data;
assert(id == NULL);
}
void unlock(void *data, void *id, void *const *p_pixels){
struct ctx *ctx = (struct ctx*)data;
/* VLC just rendered the video, but we can also render stuff */
uchar *pixels = (uchar*)*p_pixels;
cvShowImage("image", ctx->image);
ReleaseMutex(ctx->mutex);
assert(id == NULL); /* picture identifier, not needed here */
}
int main()
{
// VLC pointers
libvlc_instance_t *vlcInstance;
libvlc_media_player_t *mp;
libvlc_media_t *media;
const char * const vlc_args[] = {
"-I", "dummy", // Don't use any interface
"--ignore-config", // Don't use VLC's config
"--extraintf=logger", // Log anything
"--verbose=2", // Be much more verbose then normal for debugging purpose
};
vlcInstance = libvlc_new(sizeof(vlc_args) / sizeof(vlc_args[0]), vlc_args);
media = libvlc_media_new_location(vlcInstance, "rtsp://admin:admin#192.168.21.120/snl/live/1/1/stream1.cgi");
mp = libvlc_media_player_new_from_media(media);
libvlc_media_release(media);
context->mutex = CreateMutex(NULL, FALSE, NULL);
context->image = new Mat(VIDEO_HEIGHT, VIDEO_WIDTH, CV_8UC3);
context->pixels = (unsigned char *)context->image->data;
// show blank image
imshow("test", *context->image);
libvlc_video_set_callbacks(mp, lock, unlock, display, context);
libvlc_video_set_format(mp, "RV24", VIDEO_WIDTH, VIDEO_HEIGHT, VIDEO_WIDTH * 24 / 8); // pitch = width * BitsPerPixel / 8
int ii = 0;
int key = 0;
while(key != 27)
{
ii++;
if (ii > 5)
{
libvlc_media_player_play(mp);
}
float fps = libvlc_media_player_get_fps(mp);
//printf("fps:%f\r\n",fps);
key = waitKey(100); // wait 100ms for Esc key
}
libvlc_media_player_stop(mp);
return 0;
}
But it has nearly the same error. Some pixels losts and images has distortion.
[h264 # 02a3b660] Frame num gap 6 4
[h264 # 02a42220] no picture oooo
How Can I fix this problem? The problem rooted from my code and the using libraries or my camera?
im working on some project like you,this error is because openCVcant deal with H264 , i suggest you use VLC library as mentioned in here,and cause you use VS2012 you can use VLCDotNetin your project.other options is FFmpeg,aforgethat are compatible with H264.
I'm struggling with converting a QVideoFrame (QT) into a AVFrame in order to encode video coming from a webcam.
Documentation for QVideoFrame: http://qt-project.org/doc/qt-5/qvideoframe.html
I understand the basics of image formats, stride and such but I'm missing something. This is what I've got so far, adapted from a libav example (https://libav.org/doxygen/release/0.8/libavformat_2output-example_8c-example.html):
static void fill_yuv_image(const QVideoFrame &frame, AVFrame *pict, int frame_index, int width, int height)
{
pict->pts = frame.startTime() * 1000000.0; // time_base is 1/1000000
pict->width = frame.width();
pict->height = frame.height();
pict->format = STREAM_PIX_FMT; //todo: get this from the frame
pict->data[0] = (uint8_t*)frame.bits();
pict->linesize[0] = frame.bytesPerLine();
}
After which I'm attempting to encode:
AVPacket pkt;
int got_packet_ptr = 0;
int success = avcodec_encode_video2(c, &pkt, picture, &got_packet_ptr);
/* if zero size, it means the image was buffered */
if (success == 0) {
/* write the compressed frame in the media file */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
It's returning an error about the stride not matching. Any help would be greatly appreciated. Thanks!
I want to write a cross-platform application using OpenCV for video capture. In all the examples, i've found frames from the camera are processed using the grab function and waiting for a while. And i want to process every frame in a sequence. I want to define my own callback function, which will be executed every time, when a new frame is ready to be processed (like in directshow for Windows, when you defining and putting into the graph your own filter for such purposes).
So the question is: how can i do this?
According to the code below, all callbacks would have to follow this definition:
IplImage* custom_callback(IplImage* frame);
This signature means the callback is going to be executed on each frame retrieved by the system. On my example, make_it_gray() allocates a new image to save the result of the grayscale conversion and returns it. This means you must free this frame later on your code. I added comments on the code about it.
Note that if your callback demands a lot of processing, the system might skip a few frames from the camera. Consider the suggestions Paul R and diverscuba23 did.
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
typedef IplImage* (*callback_prototype)(IplImage*);
/*
* make_it_gray: our custom callback to convert a colored frame to its grayscale version.
* Remember that you must deallocate the returned IplImage* yourself after calling this function.
*/
IplImage* make_it_gray(IplImage* frame)
{
// Allocate space for a new image
IplImage* gray_frame = 0;
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
if (!gray_frame)
{
fprintf(stderr, "!!! cvCreateImage failed!\n" );
return NULL;
}
cvCvtColor(frame, gray_frame, CV_RGB2GRAY);
return gray_frame;
}
/*
* process_video: retrieves frames from camera and executes a callback to do individual frame processing.
* Keep in mind that if your callback takes too much time to execute, you might loose a few frames from
* the camera.
*/
void process_video(callback_prototype custom_cb)
{
// Initialize camera
CvCapture *capture = 0;
capture = cvCaptureFromCAM(-1);
if (!capture)
{
fprintf(stderr, "!!! Cannot open initialize webcam!\n" );
return;
}
// Create a window for the video
cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
IplImage* frame = 0;
char key = 0;
while (key != 27) // ESC
{
frame = cvQueryFrame(capture);
if(!frame)
{
fprintf( stderr, "!!! cvQueryFrame failed!\n" );
break;
}
// Execute callback on each frame
IplImage* processed_frame = (*custom_cb)(frame);
// Display processed frame
cvShowImage("result", processed_frame);
// Release resources
cvReleaseImage(&processed_frame);
// Exit when user press ESC
key = cvWaitKey(10);
}
// Free memory
cvDestroyWindow("result");
cvReleaseCapture(&capture);
}
int main( int argc, char **argv )
{
process_video(make_it_gray);
return 0;
}
EDIT:
I changed the code above so it prints the current framerate and performs a manual grayscale conversion. They are small tweaks on the code and I did it for education purposes so one knows how to perform operations at pixel level.
#include <stdio.h>
#include <time.h>
#include "cv.h"
#include "highgui.h"
typedef IplImage* (*callback_prototype)(IplImage*);
/*
* make_it_gray: our custom callback to convert a colored frame to its grayscale version.
* Remember that you must deallocate the returned IplImage* yourself after calling this function.
*/
IplImage* make_it_gray(IplImage* frame)
{
// New IplImage* to store the processed image
IplImage* gray_frame = 0;
// Manual grayscale conversion: ugly, but shows how to access each channel of the pixels individually
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
if (!gray_frame)
{
fprintf(stderr, "!!! cvCreateImage failed!\n" );
return NULL;
}
for (int i = 0; i < frame->width * frame->height * frame->nChannels; i += frame->nChannels)
{
gray_frame->imageData[i] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //B
gray_frame->imageData[i+1] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //G
gray_frame->imageData[i+2] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //R
}
return gray_frame;
}
/*
* process_video: retrieves frames from camera and executes a callback to do individual frame processing.
* Keep in mind that if your callback takes too much time to execute, you might loose a few frames from
* the camera.
*/
void process_video(callback_prototype custom_cb)
{
// Initialize camera
CvCapture *capture = 0;
capture = cvCaptureFromCAM(-1);
if (!capture)
{
fprintf(stderr, "!!! Cannot open initialize webcam!\n" );
return;
}
// Create a window for the video
cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
double elapsed = 0;
int last_time = 0;
int num_frames = 0;
IplImage* frame = 0;
char key = 0;
while (key != 27) // ESC
{
frame = cvQueryFrame(capture);
if(!frame)
{
fprintf( stderr, "!!! cvQueryFrame failed!\n" );
break;
}
// Calculating framerate
num_frames++;
elapsed = clock() - last_time;
int fps = 0;
if (elapsed > 1)
{
fps = floor(num_frames / (float)(1 + (float)elapsed / (float)CLOCKS_PER_SEC));
num_frames = 0;
last_time = clock() + 1 * CLOCKS_PER_SEC;
printf("FPS: %d\n", fps);
}
// Execute callback on each frame
IplImage* processed_frame = (*custom_cb)(frame);
// Display processed frame
cvShowImage("result", processed_frame);
// Release resources
cvReleaseImage(&processed_frame);
// Exit when user press ESC
key = cvWaitKey(10);
}
// Free memory
cvDestroyWindow("result");
cvReleaseCapture(&capture);
}
int main( int argc, char **argv )
{
process_video(make_it_gray);
return 0;
}
Quick thoughts would be to have 2 threads, the first thread is responsible for grabbing the frames and notifiy the second thread when they are available (places them in a processing queue), the second thread does all your processing in an event loop type manner.
See boost::thread and boost::signals2 as those two together should provide most of the framework (except for the queue) for what I described above.