libav live transcode to SFML SoundStream, garbled and noise - c++

I'm so close to have this working but playing with the output sample format or codec context doesn't seem to solve and don't know where to go from here.
#include <iostream>
#include <SFML/Audio.hpp>
#include "MyAudioStream.h"
extern "C"
{
#include <libavutil/opt.h>
#include <libavutil/avutil.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/audio_fifo.h>
#include <libswresample/swresample.h>
}
void setupInput(AVFormatContext *input_format_context, AVCodecContext **input_codec_context, const char *streamURL)
{
// av_find_input_format("mp3");
avformat_open_input(&input_format_context, streamURL, NULL, NULL);
avformat_find_stream_info(input_format_context, NULL);
AVDictionary *metadata = input_format_context->metadata;
AVDictionaryEntry *name = av_dict_get(metadata, "icy-name", NULL, 0);
if (name != NULL)
{
std::cout << name->value << std::endl;
}
AVDictionaryEntry *title = av_dict_get(metadata, "StreamTitle", NULL, 0);
if (title != NULL)
{
std::cout << title->value << std::endl;
}
AVStream *stream = input_format_context->streams[0];
AVCodecParameters *codec_params = stream->codecpar;
AVCodec *codec = avcodec_find_decoder(codec_params->codec_id);
*input_codec_context = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(*input_codec_context, codec_params);
avcodec_open2(*input_codec_context, codec, NULL);
}
void setupOutput(AVCodecContext *input_codec_context, AVCodecContext **output_codec_context)
{
AVCodec *output_codec = avcodec_find_encoder(AV_CODEC_ID_PCM_S16LE); // AV_CODEC_ID_PCM_S16LE ?? AV_CODEC_ID_PCM_S16BE
*output_codec_context = avcodec_alloc_context3(output_codec);
(*output_codec_context)->channels = 2;
(*output_codec_context)->channel_layout = av_get_default_channel_layout(2);
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0]; // AV_SAMPLE_FMT_S16 ??
avcodec_open2(*output_codec_context, output_codec, NULL);
}
void setupResampler(AVCodecContext *input_codec_context, AVCodecContext *output_codec_context, SwrContext **resample_context)
{
*resample_context = swr_alloc_set_opts(
*resample_context,
output_codec_context->channel_layout,
output_codec_context->sample_fmt,
output_codec_context->sample_rate,
input_codec_context->channel_layout,
input_codec_context->sample_fmt,
input_codec_context->sample_rate,
0, NULL);
swr_init(*resample_context);
}
MyAudioStream::MyAudioStream()
{
input_format_context = avformat_alloc_context();
resample_context = swr_alloc();
}
MyAudioStream::~MyAudioStream()
{
// clean up
avformat_close_input(&input_format_context);
avformat_free_context(input_format_context);
}
void MyAudioStream::load(const char *streamURL)
{
setupInput(input_format_context, &input_codec_context, streamURL);
setupOutput(input_codec_context, &output_codec_context);
setupResampler(input_codec_context, output_codec_context, &resample_context);
initialize(output_codec_context->channels, output_codec_context->sample_rate);
}
bool MyAudioStream::onGetData(Chunk &data)
{
// init
AVFrame *input_frame = av_frame_alloc();
AVPacket *input_packet = av_packet_alloc();
input_packet->data = NULL;
input_packet->size = 0;
// read
av_read_frame(input_format_context, input_packet);
avcodec_send_packet(input_codec_context, input_packet);
avcodec_receive_frame(input_codec_context, input_frame);
// convert
uint8_t *converted_input_samples = (uint8_t *)calloc(output_codec_context->channels, sizeof(*converted_input_samples));
av_samples_alloc(&converted_input_samples, NULL, output_codec_context->channels, input_frame->nb_samples, output_codec_context->sample_fmt, 0);
swr_convert(resample_context, &converted_input_samples, input_frame->nb_samples, (const uint8_t **)input_frame->extended_data, input_frame->nb_samples);
data.sampleCount = input_frame->nb_samples;
data.samples = (sf::Int16 *)converted_input_samples;
// av_freep(&converted_input_samples[0]);
// free(converted_input_samples);
av_packet_free(&input_packet);
av_frame_free(&input_frame);
return true;
}
void MyAudioStream::onSeek(sf::Time timeOffset)
{
// no op
}
sf::Int64 MyAudioStream::onLoop()
{
// no loop
return -1;
}
Called with
#include <iostream>
#include "./MyAudioStream.h"
extern "C"
{
#include <libavutil/opt.h>
#include <libavutil/avutil.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
const char *streamURL = "http://s5radio.ponyvillelive.com:8026/stream.mp3";
int main(int, char **)
{
MyAudioStream myStream;
myStream.load(streamURL);
std::cout << "Hello, world!" << std::endl;
myStream.play();
while (myStream.getStatus() == MyAudioStream::Playing)
{
sf::sleep(sf::seconds(0.1f));
}
return 0;
}

I solved. The count returned by swr_get_out_samples is per channel it seems. I multiplied by 2 when setting sf::SoundStream::Chunk::sampleCount, as in:
data.sampleCount = out_samples * 2;
and that works.

Related

config.h: No such file or directory in ssh_client from libssh's example

When I compile ssh_client code from example folder of libssh source directory( I have wrote about building process of this library in this link : libssh's functions couldn't be found on qt):
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/select.h>
#include <sys/time.h>
#ifdef HAVE_TERMIOS_H
#include <termios.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_PTY_H
#include <pty.h>
#endif
#include <sys/ioctl.h>
#include <signal.h>
#include <errno.h>
#include <fcntl.h>
#include <libssh/callbacks.h>
#include <libssh/libssh.h>
#include <libssh/sftp.h>
#include "examples_common.h"
#define MAXCMD 10
static char *host = NULL;
static char *user = NULL;
static char *cmds[MAXCMD];
static char *config_file = NULL;
static struct termios terminal;
static char *pcap_file = NULL;
static char *proxycommand;
static int auth_callback(const char *prompt,
char *buf,
size_t len,
int echo,
int verify,
void *userdata)
{
(void) verify;
(void) userdata;
return ssh_getpass(prompt, buf, len, echo, verify);
}
struct ssh_callbacks_struct cb = {
.auth_function = auth_callback,
.userdata = NULL,
};
static void add_cmd(char *cmd)
{
int n;
for (n = 0; (n < MAXCMD) && cmds[n] != NULL; n++);
if (n == MAXCMD) {
return;
}
cmds[n] = strdup(cmd);
}
static void usage(void)
{
fprintf(stderr,
"Usage : ssh [options] [login#]hostname\n"
"sample client - libssh-%s\n"
"Options :\n"
" -l user : log in as user\n"
" -p port : connect to port\n"
" -d : use DSS to verify host public key\n"
" -r : use RSA to verify host public key\n"
" -F file : parse configuration file instead of default one\n"
#ifdef WITH_PCAP
" -P file : create a pcap debugging file\n"
#endif
#ifndef _WIN32
" -T proxycommand : command to execute as a socket proxy\n"
#endif
"\n",
ssh_version(0));
exit(0);
}
static int opts(int argc, char **argv)
{
int i;
while((i = getopt(argc,argv,"T:P:F:")) != -1) {
switch(i){
case 'P':
pcap_file = optarg;
break;
case 'F':
config_file = optarg;
break;
#ifndef _WIN32
case 'T':
proxycommand = optarg;
break;
#endif
default:
fprintf(stderr, "Unknown option %c\n", optopt);
usage();
}
}
if (optind < argc) {
host = argv[optind++];
}
while(optind < argc) {
add_cmd(argv[optind++]);
}
if (host == NULL) {
usage();
}
return 0;
}
#ifndef HAVE_CFMAKERAW
static void cfmakeraw(struct termios *termios_p)
{
termios_p->c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP|INLCR|IGNCR|ICRNL|IXON);
termios_p->c_oflag &= ~OPOST;
termios_p->c_lflag &= ~(ECHO|ECHONL|ICANON|ISIG|IEXTEN);
termios_p->c_cflag &= ~(CSIZE|PARENB);
termios_p->c_cflag |= CS8;
}
#endif
static void do_cleanup(int i)
{
/* unused variable */
(void) i;
tcsetattr(0, TCSANOW, &terminal);
}
static void do_exit(int i)
{
/* unused variable */
(void) i;
do_cleanup(0);
exit(0);
}
static ssh_channel chan;
static int signal_delayed = 0;
static void sigwindowchanged(int i)
{
(void) i;
signal_delayed = 1;
}
static void setsignal(void)
{
signal(SIGWINCH, sigwindowchanged);
signal_delayed = 0;
}
static void sizechanged(void)
{
struct winsize win = {
.ws_row = 0,
};
ioctl(1, TIOCGWINSZ, &win);
ssh_channel_change_pty_size(chan,win.ws_col, win.ws_row);
setsignal();
}
static void select_loop(ssh_session session,ssh_channel channel)
{
ssh_connector connector_in, connector_out, connector_err;
int rc;
ssh_event event = ssh_event_new();
/* stdin */
connector_in = ssh_connector_new(session);
ssh_connector_set_out_channel(connector_in, channel, SSH_CONNECTOR_STDINOUT);
ssh_connector_set_in_fd(connector_in, 0);
ssh_event_add_connector(event, connector_in);
/* stdout */
connector_out = ssh_connector_new(session);
ssh_connector_set_out_fd(connector_out, 1);
ssh_connector_set_in_channel(connector_out, channel, SSH_CONNECTOR_STDINOUT);
ssh_event_add_connector(event, connector_out);
/* stderr */
connector_err = ssh_connector_new(session);
ssh_connector_set_out_fd(connector_err, 2);
ssh_connector_set_in_channel(connector_err, channel, SSH_CONNECTOR_STDERR);
ssh_event_add_connector(event, connector_err);
while (ssh_channel_is_open(channel)) {
if (signal_delayed) {
sizechanged();
}
rc = ssh_event_dopoll(event, 60000);
if (rc == SSH_ERROR) {
fprintf(stderr, "Error in ssh_event_dopoll()\n");
break;
}
}
ssh_event_remove_connector(event, connector_in);
ssh_event_remove_connector(event, connector_out);
ssh_event_remove_connector(event, connector_err);
ssh_connector_free(connector_in);
ssh_connector_free(connector_out);
ssh_connector_free(connector_err);
ssh_event_free(event);
}
static void shell(ssh_session session)
{
ssh_channel channel;
struct termios terminal_local;
int interactive=isatty(0);
channel = ssh_channel_new(session);
if (channel == NULL) {
return;
}
if (interactive) {
tcgetattr(0, &terminal_local);
memcpy(&terminal, &terminal_local, sizeof(struct termios));
}
if (ssh_channel_open_session(channel)) {
printf("Error opening channel : %s\n", ssh_get_error(session));
ssh_channel_free(channel);
return;
}
chan = channel;
if (interactive) {
ssh_channel_request_pty(channel);
sizechanged();
}
if (ssh_channel_request_shell(channel)) {
printf("Requesting shell : %s\n", ssh_get_error(session));
ssh_channel_free(channel);
return;
}
if (interactive) {
cfmakeraw(&terminal_local);
tcsetattr(0, TCSANOW, &terminal_local);
setsignal();
}
signal(SIGTERM, do_cleanup);
select_loop(session, channel);
if (interactive) {
do_cleanup(0);
}
ssh_channel_free(channel);
}
static void batch_shell(ssh_session session)
{
ssh_channel channel;
char buffer[1024];
size_t i;
int s = 0;
for (i = 0; i < MAXCMD && cmds[i]; ++i) {
s += snprintf(buffer + s, sizeof(buffer) - s, "%s ", cmds[i]);
free(cmds[i]);
cmds[i] = NULL;
}
channel = ssh_channel_new(session);
if (channel == NULL) {
return;
}
ssh_channel_open_session(channel);
if (ssh_channel_request_exec(channel, buffer)) {
printf("Error executing '%s' : %s\n", buffer, ssh_get_error(session));
ssh_channel_free(channel);
return;
}
select_loop(session, channel);
ssh_channel_free(channel);
}
static int client(ssh_session session)
{
int auth = 0;
char *banner;
int state;
if (user) {
if (ssh_options_set(session, SSH_OPTIONS_USER, user) < 0) {
return -1;
}
}
if (ssh_options_set(session, SSH_OPTIONS_HOST, host) < 0) {
return -1;
}
if (proxycommand != NULL) {
if (ssh_options_set(session, SSH_OPTIONS_PROXYCOMMAND, proxycommand)) {
return -1;
}
}
/* Parse configuration file if specified: The command-line options will
* overwrite items loaded from configuration file */
if (config_file != NULL) {
ssh_options_parse_config(session, config_file);
} else {
ssh_options_parse_config(session, NULL);
}
if (ssh_connect(session)) {
fprintf(stderr, "Connection failed : %s\n", ssh_get_error(session));
return -1;
}
state = verify_knownhost(session);
if (state != 0) {
return -1;
}
ssh_userauth_none(session, NULL);
banner = ssh_get_issue_banner(session);
if (banner) {
printf("%s\n", banner);
free(banner);
}
auth = authenticate_console(session);
if (auth != SSH_AUTH_SUCCESS) {
return -1;
}
if (cmds[0] == NULL) {
shell(session);
} else {
batch_shell(session);
}
return 0;
}
static ssh_pcap_file pcap;
static void set_pcap(ssh_session session)
{
if (pcap_file == NULL) {
return;
}
pcap = ssh_pcap_file_new();
if (pcap == NULL) {
return;
}
if (ssh_pcap_file_open(pcap, pcap_file) == SSH_ERROR) {
printf("Error opening pcap file\n");
ssh_pcap_file_free(pcap);
pcap = NULL;
return;
}
ssh_set_pcap_file(session, pcap);
}
static void cleanup_pcap(void)
{
if (pcap != NULL) {
ssh_pcap_file_free(pcap);
}
pcap = NULL;
}
int main(int argc, char **argv)
{
ssh_session session;
session = ssh_new();
ssh_callbacks_init(&cb);
ssh_set_callbacks(session,&cb);
if (ssh_options_getopt(session, &argc, argv)) {
fprintf(stderr,
"Error parsing command line: %s\n",
ssh_get_error(session));
usage();
}
opts(argc, argv);
signal(SIGTERM, do_exit);
set_pcap(session);
client(session);
ssh_disconnect(session);
ssh_free(session);
cleanup_pcap();
ssh_finalize();
return 0;
}
I got the following error:
/home/heydari.f/projects/ssh_client/ssh_client/main.c:100: error: config.h: No such file or directory
#include "config.h"
^~~~~~~~~~
Is there anything to do with configure or what?
I use Qt and my os is ubuntu 18.04.
Config headers as config.h normally aren't needed to compile a program against a library. Those are generated to compile the library, not the programs that link against them. If not, there will be lot of trouble as lots of software use them and there would be lots of collisions between them.
Being an example, it may be that it uses the config.h, but in that case I'm pretty sure you should compile with the system libssh uses to compile. (You may need to specify an option to compile examples when calling configure or specify something in DefineOptions.cmake or something in the same line.)
If you copied the sources (as it seems as the error states projects/ssh_client/) to build with Qt, you probably can remove that include unless it is a config from Qt itself.
Also, if you are compiling with Qt you surely need to install the lib and follow #Dmitry advice about -I, -L and -l flags to compiler.

How to use the libuv to accept the tcp connection with multi-thread?

I write a C++ dome of tcp server with the libuv. When I check the cpu performance, I found the dome is a single thread running, how can I implement it with multi-thread?
Currently, the dome can hanlde 100,000+ tcp request per second, it can only eat 1 CPU.
Code:
#include <iostream>
#include <atomic>
#include "uv.h"
#include <thread>
#include <mutex>
#include <map>
using namespace std;
auto loop = uv_default_loop();
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
typedef struct {
uv_stream_t* client;
uv_alloc_cb alloc_cb;
uv_read_cb read_cb;
} begin_read_req;
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*)req;
free(wr->buf.base);
free(wr);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
auto req = (write_req_t*)malloc(sizeof(write_req_t));
auto *aaa = (char*)malloc(5);
aaa[0] = '+';
aaa[1] = 'O';
aaa[2] = 'K';
aaa[3] = '\r';
aaa[4] = '\n';
req->buf = uv_buf_init(aaa, 5);
uv_write((uv_write_t*)req, client, &req->buf, 1, echo_write);
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(static_cast<unsigned int>(nread)));
uv_close((uv_handle_t*)client, nullptr);
}
free(buf->base);
}
void acceptClientRead(uv_work_t *req) {
begin_read_req *data = (begin_read_req *)req->data;
uv_read_start(data->client, data->alloc_cb, data->read_cb);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
cout << "New connection error:" << uv_strerror(status);
return;
}
uv_tcp_t *client = (uv_tcp_t *)malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
uv_work_t *req = (uv_work_t *)malloc(sizeof(uv_work_t));
begin_read_req *read_req = (begin_read_req *)malloc(sizeof(begin_read_req));
read_req->client = (uv_stream_t *)client;
read_req->read_cb = echo_read;
read_req->alloc_cb = alloc_buffer;
req->data = read_req;
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, alloc_buffer, echo_read);
// uv_queue_work(workloop[0], req, acceptClientRead, nullptr);
}
else {
uv_close((uv_handle_t *)client, nullptr);
}
}
void timer_callback(uv_timer_t* handle) {
cout << std::this_thread::get_id() << "---------" << "hello" << endl;
}
int main() {
uv_tcp_t server{};
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", 8790, &addr);
uv_tcp_bind(&server, (const struct sockaddr *) &addr, 0);
uv_listen((uv_stream_t *)&server, 511, on_new_connection);
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
Of course, I can make the write step asynchronous in the method "echo_read", but I didn't do anything before the write, can I make the demo multi-thread in another way to improve the throughput?

Visual C++ Error: Debug Assertion Failed

Hello i compliled my c++ program and if i start the .exe i got a error
Image from the error
This is my source(main.cpp):
#include <iostream>
#include <string>
#include <Windows.h>
#include <dos.h>
#include <stdio.h>
#include <fstream>
#include <ctime>
// using
using namespace std;
bool fexists(const char *filename);
int main() {
try {
HANDLE h;
string cClipboard = "";
CreateDirectory("C:\\Program Files\\Clipboard Logger", NULL);
if (!fexists("C:\\Program Files\\Clipboard Logger\\log.txt")) {
FILE *fp;
fp = fopen("C:\\Program Files\\Clipboard Logger\\log.txt", "w");
fclose(fp);
}
while (true) {
if (!OpenClipboard(0)) {
Sleep(2000);
continue;
}
h = GetClipboardData(CF_TEXT);
CloseClipboard();
if ((char *)h == cClipboard) {
Sleep(2000);
continue;
}
cClipboard = (char *)h;
time_t t = time(0);
struct tm * now = localtime(&t);
FILE *fp;
fp = fopen("C:\\Program Files\\Clipboard Logger\\log.txt", "a");
int day = now->tm_mday;
int month = now->tm_mon + 1;
int year = now->tm_year + 1900;
int sec = now->tm_sec;
int min = now->tm_min;
int hour = now->tm_hour;
char logLine[sizeof((char *)h) + 64];
sprintf(logLine, "%d.%d.%d %d.%d.%d %s\n", hour, min, sec, day, month, year, (char *)h);
fprintf(fp, (char *)logLine);
fclose(fp);
cout << (char *)logLine << endl;
Sleep(2000);
}
getchar();
return 0;
} catch (...) {
}
}
bool fexists(const char *filename) {
ifstream ifile(filename);
if (ifile)
return true;
return false;
}
iam new in c++ and i dont know how to fix it, because if i debug the program all works fine but with the exe it doesnt work.

FFmpeg C++ api decode h264 error

I'm trying to use the C++ API of FFMpeg (version 20150526) under Windows using the prebuilt binaries to decode an h264 video file (*.ts).
I've written a very simple code that automatically detects the required codec from the file itself (and it is AV_CODEC_ID_H264, as expected).
Then I re-open the video file in read-binary mode and I read a fixed-size buffer of bytes from it and provide the read bytes to the decoder within a while-loop until the end of file. However when I call the function avcodec_decode_video2 a large amount of errors happen like the following ones:
[h264 # 008df020] top block unavailable for requested intro mode at 34 0
[h264 # 008df020] error while decoding MB 34 0, bytestream 3152
[h264 # 008df020] decode_slice_header error
Sometimes the function avcodec_decode_video2 sets the value of got_picture_ptr to 1 and hence I expect to find a good frame. Instead, though all the computations are successful, when I view the decoded frame (using OpenCV only for visualization purposes) I see a gray one with some artifacts.
If I employ the same code to decode an *.avi file it works fine.
Reading the examples of FFMpeg I did not find a solution to my problem. I've also implemented the solution proposed in the simlar question FFmpeg c++ H264 decoding error but it did not work.
Does anyone know where the error is?
Thank you in advance for any reply!
The code is the following [EDIT: code updated including the parser management]:
#include <iostream>
#include <iomanip>
#include <string>
#include <sstream>
#include <opencv2/opencv.hpp>
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libpostproc/postprocess.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
} // end extern "C".
#endif // __cplusplus
#define INBUF_SIZE 4096
void main()
{
AVCodec* l_pCodec;
AVCodecContext* l_pAVCodecContext;
SwsContext* l_pSWSContext;
AVFormatContext* l_pAVFormatContext;
AVFrame* l_pAVFrame;
AVFrame* l_pAVFrameBGR;
AVPacket l_AVPacket;
AVPacket l_AVPacket_out;
AVStream* l_pStream;
AVCodecParserContext* l_pParser;
FILE* l_pFile_in;
FILE* l_pFile_out;
std::string l_sFile;
int l_iResult;
int l_iFrameCount;
int l_iGotFrame;
int l_iBufLength;
int l_iParsedBytes;
int l_iPts;
int l_iDts;
int l_iPos;
int l_iSize;
int l_iDecodedBytes;
uint8_t l_auiInBuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
uint8_t* l_pData;
cv::Mat l_cvmImage;
l_pCodec = NULL;
l_pAVCodecContext = NULL;
l_pSWSContext = NULL;
l_pAVFormatContext = NULL;
l_pAVFrame = NULL;
l_pAVFrameBGR = NULL;
l_pParser = NULL;
l_pStream = NULL;
l_pFile_in = NULL;
l_pFile_out = NULL;
l_iPts = 0;
l_iDts = 0;
l_iPos = 0;
l_pData = NULL;
l_sFile = "myvideo.ts";
avdevice_register_all();
avfilter_register_all();
avcodec_register_all();
av_register_all();
avformat_network_init();
l_pAVFormatContext = avformat_alloc_context();
l_iResult = avformat_open_input(&l_pAVFormatContext,
l_sFile.c_str(),
NULL,
NULL);
if (l_iResult >= 0)
{
l_iResult = avformat_find_stream_info(l_pAVFormatContext, NULL);
if (l_iResult >= 0)
{
for (int i=0; i<l_pAVFormatContext->nb_streams; i++)
{
if (l_pAVFormatContext->streams[i]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO)
{
l_pCodec = avcodec_find_decoder(
l_pAVFormatContext->streams[i]->codec->codec_id);
l_pStream = l_pAVFormatContext->streams[i];
}
}
}
}
av_init_packet(&l_AVPacket);
av_init_packet(&l_AVPacket_out);
memset(l_auiInBuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
if (l_pCodec)
{
l_pAVCodecContext = avcodec_alloc_context3(l_pCodec);
l_pParser = av_parser_init(l_pAVCodecContext->codec_id);
if (l_pParser)
{
av_register_codec_parser(l_pParser->parser);
}
if (l_pAVCodecContext)
{
if (l_pCodec->capabilities & CODEC_CAP_TRUNCATED)
{
l_pAVCodecContext->flags |= CODEC_FLAG_TRUNCATED;
}
l_iResult = avcodec_open2(l_pAVCodecContext, l_pCodec, NULL);
if (l_iResult >= 0)
{
l_pFile_in = fopen(l_sFile.c_str(), "rb");
if (l_pFile_in)
{
l_pAVFrame = av_frame_alloc();
l_pAVFrameBGR = av_frame_alloc();
if (l_pAVFrame)
{
l_iFrameCount = 0;
avcodec_get_frame_defaults(l_pAVFrame);
while (1)
{
l_iBufLength = fread(l_auiInBuf,
1,
INBUF_SIZE,
l_pFile_in);
if (l_iBufLength == 0)
{
break;
}
else
{
l_pData = l_auiInBuf;
l_iSize = l_iBufLength;
while (l_iSize > 0)
{
if (l_pParser)
{
l_iParsedBytes = av_parser_parse2(
l_pParser,
l_pAVCodecContext,
&l_AVPacket_out.data,
&l_AVPacket_out.size,
l_pData,
l_iSize,
l_AVPacket.pts,
l_AVPacket.dts,
AV_NOPTS_VALUE);
if (l_iParsedBytes <= 0)
{
break;
}
l_AVPacket.pts = l_AVPacket.dts = AV_NOPTS_VALUE;
l_AVPacket.pos = -1;
}
else
{
l_AVPacket_out.data = l_pData;
l_AVPacket_out.size = l_iSize;
}
l_iDecodedBytes =
avcodec_decode_video2(
l_pAVCodecContext,
l_pAVFrame,
&l_iGotFrame,
&l_AVPacket_out);
if (l_iDecodedBytes >= 0)
{
if (l_iGotFrame)
{
l_pSWSContext = sws_getContext(
l_pAVCodecContext->width,
l_pAVCodecContext->height,
l_pAVCodecContext->pix_fmt,
l_pAVCodecContext->width,
l_pAVCodecContext->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
if (l_pSWSContext)
{
l_iResult = avpicture_alloc(
reinterpret_cast<AVPicture*>(l_pAVFrameBGR),
AV_PIX_FMT_BGR24,
l_pAVFrame->width,
l_pAVFrame->height);
l_iResult = sws_scale(
l_pSWSContext,
l_pAVFrame->data,
l_pAVFrame->linesize,
0,
l_pAVCodecContext->height,
l_pAVFrameBGR->data,
l_pAVFrameBGR->linesize);
if (l_iResult > 0)
{
l_cvmImage = cv::Mat(
l_pAVFrame->height,
l_pAVFrame->width,
CV_8UC3,
l_pAVFrameBGR->data[0],
l_pAVFrameBGR->linesize[0]);
if (l_cvmImage.empty() == false)
{
cv::imshow("image", l_cvmImage);
cv::waitKey(10);
}
}
}
l_iFrameCount++;
}
}
else
{
break;
}
l_pData += l_iParsedBytes;
l_iSize -= l_iParsedBytes;
}
}
} // end while(1).
}
fclose(l_pFile_in);
}
}
}
}
}
EDIT: The following is the final code that solves my problem, thanks to the suggestions of Ronald.
#include <iostream>
#include <iomanip>
#include <string>
#include <sstream>
#include <opencv2/opencv.hpp>
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libpostproc/postprocess.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
} // end extern "C".
#endif // __cplusplus
void main()
{
AVCodec* l_pCodec;
AVCodecContext* l_pAVCodecContext;
SwsContext* l_pSWSContext;
AVFormatContext* l_pAVFormatContext;
AVFrame* l_pAVFrame;
AVFrame* l_pAVFrameBGR;
AVPacket l_AVPacket;
std::string l_sFile;
uint8_t* l_puiBuffer;
int l_iResult;
int l_iFrameCount;
int l_iGotFrame;
int l_iDecodedBytes;
int l_iVideoStreamIdx;
int l_iNumBytes;
cv::Mat l_cvmImage;
l_pCodec = NULL;
l_pAVCodecContext = NULL;
l_pSWSContext = NULL;
l_pAVFormatContext = NULL;
l_pAVFrame = NULL;
l_pAVFrameBGR = NULL;
l_puiBuffer = NULL;
l_sFile = "myvideo.ts";
av_register_all();
l_iResult = avformat_open_input(&l_pAVFormatContext,
l_sFile.c_str(),
NULL,
NULL);
if (l_iResult >= 0)
{
l_iResult = avformat_find_stream_info(l_pAVFormatContext, NULL);
if (l_iResult >= 0)
{
for (int i=0; i<l_pAVFormatContext->nb_streams; i++)
{
if (l_pAVFormatContext->streams[i]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO)
{
l_iVideoStreamIdx = i;
l_pAVCodecContext =
l_pAVFormatContext->streams[l_iVideoStreamIdx]->codec;
if (l_pAVCodecContext)
{
l_pCodec = avcodec_find_decoder(l_pAVCodecContext->codec_id);
}
break;
}
}
}
}
if (l_pCodec && l_pAVCodecContext)
{
l_iResult = avcodec_open2(l_pAVCodecContext, l_pCodec, NULL);
if (l_iResult >= 0)
{
l_pAVFrame = av_frame_alloc();
l_pAVFrameBGR = av_frame_alloc();
l_iNumBytes = avpicture_get_size(PIX_FMT_BGR24,
l_pAVCodecContext->width,
l_pAVCodecContext->height);
l_puiBuffer = (uint8_t *)av_malloc(l_iNumBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)l_pAVFrameBGR,
l_puiBuffer,
PIX_FMT_RGB24,
l_pAVCodecContext->width,
l_pAVCodecContext->height);
l_pSWSContext = sws_getContext(
l_pAVCodecContext->width,
l_pAVCodecContext->height,
l_pAVCodecContext->pix_fmt,
l_pAVCodecContext->width,
l_pAVCodecContext->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
while (av_read_frame(l_pAVFormatContext, &l_AVPacket) >= 0)
{
if (l_AVPacket.stream_index == l_iVideoStreamIdx)
{
l_iDecodedBytes = avcodec_decode_video2(
l_pAVCodecContext,
l_pAVFrame,
&l_iGotFrame,
&l_AVPacket);
if (l_iGotFrame)
{
if (l_pSWSContext)
{
l_iResult = sws_scale(
l_pSWSContext,
l_pAVFrame->data,
l_pAVFrame->linesize,
0,
l_pAVCodecContext->height,
l_pAVFrameBGR->data,
l_pAVFrameBGR->linesize);
if (l_iResult > 0)
{
l_cvmImage = cv::Mat(
l_pAVFrame->height,
l_pAVFrame->width,
CV_8UC3,
l_pAVFrameBGR->data[0],
l_pAVFrameBGR->linesize[0]);
if (l_cvmImage.empty() == false)
{
cv::imshow("image", l_cvmImage);
cv::waitKey(1);
}
}
}
l_iFrameCount++;
}
}
}
}
}
}
You're never using the l_pParser object, or in other words, you're not using a H264 parser, you're just sending raw file data into the decoder without proper NAL packetization. Please read the frame parsing API docs to figure out how to use the parser.

What to do when last_pts > current_pts using ffmpeg libs (C++)

Im having some hard time figuring out where to find about this..
Im building a simple recorder to learn about this video compression universe and Im facing some weird behaviors..
Before all I need to explain the scenario...
Its very simple... everytime I call av_read_frame( input_context, input_packet ) I save the pts into the last_pts variable...
So...
Whats bothering me is the fact that about 10% of my calls to av_read_frame I get
input_packet.pts > last_pts
Resulting in a error message from the encoder when I try to do it...
Having it in mind I decided to just drop those frames when it happens....
I think it is not a good idea to just drop frames because if I get them, its needed somehow...
So... what to do when last_pts > current_pts ?
I will paste my test code that Im using capturing the video from webcam and saving to mp4 file with libx264 encoder
#include <QCoreApplication>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/avutil.h>
#include <libavdevice/avdevice.h>
}
#include <QTime>
#include <QThread>
#include <QDebug>
#define SM_DEBUG
static const double max_fps = 30;
static const double min_loop_duration = 1000 / max_fps;
static const double max_duration = 5; // in seconds
static void sleep_if_needed(const int &elapsed) {
int sleep_duration = min_loop_duration - elapsed;
if (sleep_duration > 0) {
QThread::msleep(sleep_duration);
}
}
#ifdef SM_DEBUG
static void log_packet(const AVPacket *pkt,
const AVRational &time_base,
int is_input=0)
{
qDebug() << ((is_input) ? QString(">>") : QString("<<")) << "Size:" << QString::number(pkt->size) <<
"pts:" << QString::number(pkt->pts) <<
"pts_time:" << QString::number(av_q2d(time_base) * pkt->pts) <<
"dts:" << QString::number(pkt->dts) <<
"dts_time:" << QString::number(av_q2d(time_base) * pkt->dts);
}
#endif
int main()
{
int input_w, input_h, output_w = 640, output_h = 480;
av_register_all();
avdevice_register_all();
avcodec_register_all();
#ifdef SM_DEBUG
av_log_set_level(AV_LOG_DEBUG);
#else
av_log_set_level(AV_LOG_ERROR);
#endif
AVFormatContext *ic;
AVFormatContext *oc;
AVInputFormat *ifmt;
AVDictionary *opts = 0;
AVCodecContext* dec_ctx;
AVCodecContext* enc_ctx;
AVCodec *dec;
AVCodec *enc;
AVStream* ist;
AVStream* ost;
ifmt = av_find_input_format("v4l2");
av_dict_set(&opts, "tune", "zerolatency", AV_DICT_APPEND);
ic = avformat_alloc_context();
ic->flags |= AVFMT_FLAG_NONBLOCK;
avformat_open_input(&ic, "/dev/video0", ifmt, &opts);
avformat_find_stream_info(ic, NULL);
av_dump_format(ic, 0, ic->filename, 0);
AVFrame *frame;
AVFrame *tmp_frame;
ist = ic->streams[0];
dec_ctx = ist->codec;
input_w = dec_ctx->width;
input_h = dec_ctx->height;
dec_ctx->flags |= CODEC_FLAG_LOW_DELAY;
dec = avcodec_find_decoder(dec_ctx->codec_id);
av_format_set_video_codec(ic, dec);
avcodec_open2(dec_ctx, dec, NULL);
// output
avformat_alloc_output_context2(&oc, NULL, "MP4", "/home/poste9/grava.mp4");
enc = avcodec_find_encoder(AV_CODEC_ID_H264);
ost = avformat_new_stream(oc, enc);
enc_ctx = ost->codec;
enc_ctx->codec_id = AV_CODEC_ID_H264;
enc_ctx->width = output_w;
enc_ctx->height = output_h;
ost->time_base.num = ist->time_base.num;
ost->time_base.den = ist->time_base.den;
enc_ctx->time_base = ost->time_base;
enc_ctx->gop_size = 250;
enc_ctx->keyint_min = 25;
enc_ctx->qmax = 51;
enc_ctx->qmin = 30;
enc_ctx->pix_fmt = AV_PIX_FMT_YUV422P;
enc_ctx->max_b_frames = 6;
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
enc_ctx->flags |= CODEC_FLAG_LOW_DELAY;
avcodec_open2(enc_ctx, enc, NULL);
avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&oc->interrupt_callback, NULL);
av_dump_format(oc, 0, oc->filename, 1);
avformat_write_header(oc, NULL);
struct SwsContext *sws_ctx;
sws_ctx = sws_getContext(input_w, input_h,
dec_ctx->pix_fmt,
output_w, output_h, enc_ctx->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
frame = av_frame_alloc();
tmp_frame = av_frame_alloc();
frame->format = enc_ctx->pix_fmt;
frame->width = output_w;
frame->height = output_h;
frame->pts = AV_NOPTS_VALUE;
av_frame_get_buffer(frame, 32);
av_frame_make_writable(frame);
int got_picture=0;
int got_packet=0;
double recording_duration = 0;
QTime timer;
AVPacket pkt_out;
av_init_packet(&pkt_out);
timer.start();
bool started_recording = false;
int64_t start_time = 0;
int64_t last_pts = INT64_MIN;
while(1) {
timer.restart();
AVPacket pkt_in;
av_read_frame(ic, &pkt_in);
if (pkt_in.size == 0) {
sleep_if_needed(timer.elapsed());
continue;
}
avcodec_decode_video2(dec_ctx, tmp_frame, &got_picture, &pkt_in);
#ifdef SM_DEBUG
log_packet(&pkt_in, ist->time_base, 1);
#endif
if (!started_recording) {
start_time = pkt_in.dts;
started_recording = true;
}
if (pkt_in.pts < last_pts) {
sleep_if_needed(timer.elapsed());
continue;
}
last_pts = pkt_in.pts;
frame->pts = (pkt_in.dts - start_time);
if (!got_picture) {
av_free_packet(&pkt_in);
sleep_if_needed(timer.elapsed());
continue;
} else {
sws_scale(sws_ctx, tmp_frame->data, tmp_frame->linesize,
0, input_h, frame->data, frame->linesize);
av_free_packet(&pkt_in);
}
av_init_packet(&pkt_out);
avcodec_encode_video2(enc_ctx, &pkt_out, frame, &got_packet);
if (got_packet) {
if (pkt_out.pts < pkt_out.dts) {
pkt_out.dts = pkt_out.pts;
}
pkt_out.stream_index = 0;
recording_duration = pkt_out.pts * av_q2d(ost->time_base);
#ifdef SM_DEBUG
log_packet(&pkt_out, ost->time_base, 0);
#endif
av_interleaved_write_frame(oc, &pkt_out);
av_free_packet(&pkt_out);
}
if (recording_duration >= max_duration) {
break;
} else {
sleep_if_needed(timer.elapsed());
}
}
av_write_trailer(oc);
av_dict_free(&opts);
av_frame_free(&frame);
av_frame_free(&tmp_frame);
sws_freeContext(sws_ctx);
avcodec_close(dec_ctx);
avcodec_close(enc_ctx);
avio_close(oc->pb);
avformat_free_context(oc);
avformat_close_input(&ic);
return 0;
}
These frames are B frames. B frames are saved to the stream in decoding order, not presentation order. If you look at the DTS it will probablly look ok. it is the job of the decoder to reorder frames into presentation order after they are decoded.
EDIT. to fix your code, use the PTS from the decoded frame, not the packet.