Converting uint8_t data to AVFrame with FFmpeg - c++

I am currently working in C++ with the Autodesk 3DStudio Max 2014 SDK (toolset 100) and the Ffmpeg library in Visual Studio 2015 and trying to convert a DIB (Device Independent Bitmap) to uint8_t pointer array and then convert these data to an AVFrame.
I don't have any errors, but my video is still black and without meta data.
(no time display, etc)
I made approximatively the same with a Visual Studio Console application to convert jpeg image sequence from disk and this is working fine.
(The only difference is that instead of converting jpeg to AVFrame with the Ffmpeg library, I try to convert raw data to an AVFrame.)
So I think the problem could be either on the DIB conversion to the uint8_t data or the uint8_t data to the AVFrame.
(The second is more plausible, because I used the SFML library to display a window with my rgb uint8_t* data for debuging and it is working fine.)
I first initialize the ffmpeg library :
This function is called once at the beginning.
int Converter::Initialize(AVCodecID codec_id, int width, int height, int fps, const char *filename)
{
avcodec_register_all();
av_register_all();
AVCodec *codec;
inputFrame = NULL;
codecContext = NULL;
pkt = NULL;
file = NULL;
outputFilename = new char[strlen(filename)]();
*outputFilename = '\0';
strcpy(outputFilename, filename);
int ret;
//Initializing AVCodecContext and getting PixelFormat supported by encoder
codec = avcodec_find_encoder(codec_id);
if (!codec)
return 1;
AVPixelFormat pixFormat = codec->pix_fmts[0];
codecContext = avcodec_alloc_context3(codec);
if (!codecContext)
return 1;
codecContext->bit_rate = 400000;
codecContext->width = width;
codecContext->height = height;
codecContext->time_base.num = 1;
codecContext->time_base.den = fps;
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = pixFormat;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(codecContext->priv_data, "preset", "slow", 0);
//Actually opening the encoder
if (avcodec_open2(codecContext, codec, NULL) < 0)
return 1;
file = fopen(outputFilename, "wb");
if (!file)
return 1;
inputFrame = av_frame_alloc();
inputFrame->format = codecContext->pix_fmt;
inputFrame->width = codecContext->width;
inputFrame->height = codecContext->height;
ret = av_image_alloc(inputFrame->data, inputFrame->linesize, codecContext->width, codecContext->height, codecContext->pix_fmt, 32);
if (ret < 0)
return 1;
return 0;
}
Then for each frame, I get the DIB and convert to a uint8_t* it with this function :
uint8_t* Util::ToUint8_t(RGBQUAD *data, int width, int height)
{
uint8_t* buf = (uint8_t*)data;
int imageSize = width * height;
size_t rgbquad_size = sizeof(RGBQUAD);
size_t total_bytes = imageSize * rgbquad_size;
uint8_t * pCopyBuffer = new uint8_t[total_bytes];
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
int index = (x + width * y) * rgbquad_size;
int invertIndex = (x + width* (height - y - 1)) * rgbquad_size;
//BGRA to RGBA
pCopyBuffer[index] = buf[invertIndex + 2];
pCopyBuffer[index + 1] = buf[invertIndex + 1];
pCopyBuffer[index + 2] = buf[invertIndex];
pCopyBuffer[index + 3] = 0xFF;
}
}
return pCopyBuffer;
}
void GetDIBBuffer(Interface* ip, BITMAPINFO *bmi, uint8_t** outBuffer)
{
int size;
ViewExp& view = ip->GetActiveViewExp();
view.getGW()->getDIB(NULL, &size);
bmi = (BITMAPINFO *)malloc(size);
BITMAPINFOHEADER *bmih = (BITMAPINFOHEADER *)bmi;
view.getGW()->getDIB(bmi, &size);
uint8_t * pCopyBuffer = Util::ToUint8_t(bmi->bmiColors, bmih->biWidth, bmih->biHeight);
*outBuffer = pCopyBuffer;
}
This function is used to get the DIB :
void GetViewportDIB(Interface* ip, BITMAPINFO *bmi, BITMAPINFOHEADER *bmih, BitmapInfo biFile, Bitmap *map)
{
int size;
if (!biFile.Name()[0])
return;
ViewExp& view = ip->GetActiveViewExp();
view.getGW()->getDIB(NULL, &size);
bmi = (BITMAPINFO *)malloc(size);
bmih = (BITMAPINFOHEADER *)bmi;
view.getGW()->getDIB(bmi, &size);
biFile.SetWidth((WORD)bmih->biWidth);
biFile.SetHeight((WORD)bmih->biHeight);
biFile.SetType(BMM_TRUE_32);
map = TheManager->Create(&biFile);
map->OpenOutput(&biFile);
map->FromDib(bmi);
map->Write(&biFile);
map->Close(&biFile);
}
And after the conversion to AVFrame and video encoding :
The EncodeFromMem function is call each frame.
int Converter::EncodeFromMem(const char *outputDir, int frameNumber, uint8_t* data)
{
int ret;
inputFrame->pts = frameNumber;
EncodeFrame(data, codecContext, inputFrame, &pkt, file);
return 0;
}
static void RgbToYuv(uint8_t *rgb, AVCodecContext *c, AVFrame *frame)
{
struct SwsContext *swsCtx = NULL;
const int in_linesize[1] = { 3 * c->width };// RGB stride
swsCtx = sws_getCachedContext(swsCtx, c->width, c->height, AV_PIX_FMT_RGB24, c->width, c->height, AV_PIX_FMT_YUV420P, 0, 0, 0, 0);
sws_scale(swsCtx, (const uint8_t * const *)&rgb, in_linesize, 0, c->height, frame->data, frame->linesize);
}
static void EncodeFrame(uint8_t *rgb, AVCodecContext *c, AVFrame *frame, AVPacket **pkt, FILE *file)
{
int ret, got_output;
RgbToYuv(rgb, c, frame);
*pkt = av_packet_alloc();
av_init_packet(*pkt);
(*pkt)->data = NULL;
(*pkt)->size = 0;
ret = avcodec_encode_video2(c, *pkt, frame, &got_output);
if (ret < 0)
{
fprintf(stderr, "Error encoding frame/n");
exit(1);
}
if (got_output)
{
fwrite((*pkt)->data, 1, (*pkt)->size, file);
av_packet_unref(*pkt);
}
}
To finish I have a function that write the packets and free the memory :
This function is called once at the end of the time range.
int Converter::Finalize()
{
int ret, got_output;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
/* get the delayed frames */
do
{
fflush(stdout);
ret = avcodec_encode_video2(codecContext, pkt, NULL, &got_output);
if (ret < 0)
{
fprintf(stderr, "Error encoding frame/n");
return 1;
}
if (got_output)
{
fwrite(pkt->data, 1, pkt->size, file);
av_packet_unref(pkt);
}
} while (got_output);
fwrite(endcode, 1, sizeof(endcode), file);
fclose(file);
avcodec_close(codecContext);
av_free(codecContext);
av_frame_unref(inputFrame);
av_frame_free(&inputFrame);
//av_freep(&inputFrame->data[0]); //Crash
delete outputFilename;
outputFilename = 0;
return 0;
}
EDIT :
I modify my RgbToYuv function and create another one to convert back the yuv frame to an rgb one.
This not really solve the problem, but maybe focus the problem on the conversion from YuvToRgb.
This is the result of the conversion from YUV to RGB :
![YuvToRgb result]: https://img42.com/kHqpt+
static void YuvToRgb(AVCodecContext *c, AVFrame *frame)
{
struct SwsContext *img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, c->width, c->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
AVFrame * rgbPictInfo = av_frame_alloc();
avpicture_fill((AVPicture*)rgbPictInfo, *(frame)->data, AV_PIX_FMT_RGB24, c->width, c->height);
sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, c->height, rgbPictInfo->data, rgbPictInfo->linesize);
Util::DebugWindow(c->width, c->height, rgbPictInfo->data[0]);
}
static void RgbToYuv(uint8_t *rgb, AVCodecContext *c, AVFrame *frame)
{
AVFrame * rgbPictInfo = av_frame_alloc();
avpicture_fill((AVPicture*)rgbPictInfo, rgb, AV_PIX_FMT_RGBA, c->width, c->height);
struct SwsContext *swsCtx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGBA, c->width, c->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
avpicture_fill((AVPicture*)frame, rgb, AV_PIX_FMT_YUV420P, c->width, c->height);
sws_scale(swsCtx, rgbPictInfo->data, rgbPictInfo->linesize, 0, c->height, frame->data, frame->linesize);
YuvToRgb(c, frame);
}

Related

FFMPEG convert NV12 format to NV12 with the same height and width

I want to use FFmpeg4.2.2 to convert the input NV12 format to output NV12 format with the same height and width. I used sws_scale conversion, but the output frame's colors are all green.
P.S. It seems no need to use swscale to get the same width,same height and same format frame,but it is neccessary in my project for dealing with other frames.
I have successfully converted the input NV12 format to output NV12 format with the different height and width, the output frame's colors were right.But I FAILED to convert NV12 to NV12 with the same height and width. It was so weird, I couldn't know why:(
I want to know what the reason is and what I should do.
The following is my code.swsCtx4 was used for converting NV12 format to output NV12 format. Others were used for other formats converted test.
Thank you for you help~
//the main code is
AVFrame* frame_nv12 = av_frame_alloc();
frame_nv12->width = in_width;
frame_nv12->height = in_height;
frame_nv12->format = AV_PIX_FMT_NV12;
uint8_t* frame_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width, in_height , 1));
av_image_fill_arrays(frame_nv12->data, frame_nv12->linesize, frame_buffer_nv12, AV_PIX_FMT_NV12, in_width, in_height, 1);
AVFrame* frame2_nv12 = av_frame_alloc();
frame2_nv12->width = in_width1;
frame2_nv12->height = in_height1;
frame2_nv12->format = AV_PIX_FMT_NV12;
uint8_t* frame2_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width1, in_height1, 1));
av_image_fill_arrays(frame2_nv12->data, frame2_nv12->linesize, frame2_buffer_nv12, AV_PIX_FMT_NV12, in_width1, in_height1, 1);
SwsContext* swsCtx4 = nullptr;
swsCtx4 = sws_getContext(in_width, in_height, AV_PIX_FMT_NV12, in_width1, in_height1, AV_PIX_FMT_NV12,
SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
printf("swsCtx4\n");
ret = sws_scale(swsCtx4, frame_nv12->data, frame_nv12->linesize, 0, frame_nv12->height, frame2_nv12->data, frame2_nv12->linesize);
if (ret < 0) {
printf("sws_4scale failed\n");
}
//the complete code
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
#include <seeker/loggerApi.h>
#include "seeker/common.h"
#include <iostream>
//解决原因:pts设置为0,dts设置为0
#define FILE_SRC "testPicFilter.yuv" //源文件
#define FILE_DES "test11.yuv" //源文件
int count = 0;
int main(int argc, char* argv[])
{
av_register_all();
int ret = 0;
//std::this_thread::sleep_for(std::chrono::milliseconds(5000));
int count1 = 1;
int piccount;
int align = 1;
/*打开输入yuv文件*/
FILE* fp_in = fopen(FILE_SRC, "rb+");
if (fp_in == NULL)
{
printf("文件打开失败\n");
return 0;
}
int in_width = 640;
int in_height = 360;
int in_width1 = 640;
int in_height1 = 360;
/*处理后的文件*/
FILE* fp_out = fopen(FILE_DES, "wb+");
if (fp_out == NULL)
{
printf("文件创建失败\n");
return 0;
}
char buff[50];
AVFrame* frame_in = av_frame_alloc();
unsigned char* frame_buffer_in;
frame_buffer_in = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
/*根据图像设置图像指针和内存对齐方式*/
av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in, AV_PIX_FMT_YUV420P, in_width, in_height, 1);
frame_in->width = in_width;
frame_in->height = in_height;
frame_in->format = AV_PIX_FMT_YUV420P;
//输入yuv转成frame_nv12
AVFrame* frame_nv12 = av_frame_alloc();
frame_nv12->width = in_width;
frame_nv12->height = in_height;
frame_nv12->format = AV_PIX_FMT_NV12;
uint8_t* frame_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width, in_height , 1));
av_image_fill_arrays(frame_nv12->data, frame_nv12->linesize, frame_buffer_nv12, AV_PIX_FMT_NV12, in_width, in_height, 1);
AVFrame* frame2_nv12 = av_frame_alloc();
frame2_nv12->width = in_width1;
frame2_nv12->height = in_height1;
frame2_nv12->format = AV_PIX_FMT_NV12;
uint8_t* frame2_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width1, in_height1, 1));
av_image_fill_arrays(frame2_nv12->data, frame2_nv12->linesize, frame2_buffer_nv12, AV_PIX_FMT_NV12, in_width1, in_height1, 1);
//输入rgb转成yuv
AVFrame* frame_yuv = av_frame_alloc();
frame_yuv->width = in_width;
frame_yuv->height = in_height;
frame_yuv->format = AV_PIX_FMT_YUV420P;
uint8_t* frame_buffer_yuv = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
av_image_fill_arrays(frame_yuv->data, frame_yuv->linesize, frame_buffer_yuv,
AV_PIX_FMT_YUV420P, in_width, in_height, 1);
SwsContext* swsCtx = nullptr;
swsCtx = sws_getContext(in_width, in_height, AV_PIX_FMT_YUV420P, in_width, in_height, AV_PIX_FMT_NV12,
SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
printf("swsCtx\n");
SwsContext* swsCtx4 = nullptr;
swsCtx4 = sws_getContext(in_width, in_height, AV_PIX_FMT_NV12, in_width1, in_height1, AV_PIX_FMT_NV12,
SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
printf("swsCtx4\n");
SwsContext* swsCtx2 = nullptr;
swsCtx2 = sws_getContext(in_width1, in_height1, AV_PIX_FMT_NV12, in_width, in_height, AV_PIX_FMT_YUV420P,
SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
printf("swsCtx2\n");
while (1)
{
count++;
if (fread(frame_buffer_in, 1, in_width * in_height * 3 / 2, fp_in) != in_width * in_height * 3 / 2)
{
break;
}
frame_in->data[0] = frame_buffer_in;
frame_in->data[1] = frame_buffer_in + in_width * in_height;
frame_in->data[2] = frame_buffer_in + in_width * in_height * 5 / 4;
//转NV12格式
int ret = sws_scale(swsCtx, frame_in->data, frame_in->linesize, 0, frame_in->height, frame_nv12->data, frame_nv12->linesize);
if (ret < 0) {
printf("sws_scale swsCtx failed\n");
}
ret = sws_scale(swsCtx4, frame_nv12->data, frame_nv12->linesize, 0, frame_nv12->height, frame2_nv12->data, frame2_nv12->linesize);
if (ret < 0) {
printf("sws_scale swsCtx4 failed\n");
}
if (ret > 0) {
int ret2 = sws_scale(swsCtx2, frame2_nv12->data, frame2_nv12->linesize, 0, frame2_nv12->height, frame_yuv->data, frame_yuv->linesize);
if (ret2 < 0) {
printf("sws_scale swsCtx2 failed\n");
}
I_LOG("frame_yuv:{},{}", frame_yuv->width, frame_yuv->height);
//I_LOG("frame_yuv:{}", frame_yuv->format);
if (frame_yuv->format == AV_PIX_FMT_YUV420P)
{
for (int i = 0; i < frame_yuv->height; i++)
{
fwrite(frame_yuv->data[0] + frame_yuv->linesize[0] * i, 1, frame_yuv->width, fp_out);
}
for (int i = 0; i < frame_yuv->height / 2; i++)
{
fwrite(frame_yuv->data[1] + frame_yuv->linesize[1] * i, 1, frame_yuv->width / 2, fp_out);
}
for (int i = 0; i < frame_yuv->height / 2; i++)
{
fwrite(frame_yuv->data[2] + frame_yuv->linesize[2] * i, 1, frame_yuv->width / 2, fp_out);
}
printf("yuv to file\n");
}
}
}
fclose(fp_in);
fclose(fp_out);
av_frame_free(&frame_in);
av_frame_free(&frame_nv12);
av_frame_free(&frame_yuv);
sws_freeContext(swsCtx);
sws_freeContext(swsCtx2);
sws_freeContext(swsCtx4);
//std::this_thread::sleep_for(std::chrono::milliseconds(8000));
return 0;
}
You found a bug in the ffmpeg library. Report it!(See this.)
As a remedy, I suggest you call av_frame_copy() if the format, width and height of a source frame are the same as ones of a destination frame.
As a side note, you can see the problematic code at line #1817.

Error submitting the frame for encoding when submitting NV12 texture

I'm trying to encode D3D11 NV12 Texture on QSV encoder but getting [h264_qsv # 00000244ce6f50c0] Error submitting the frame for encoding.
Main:
int width = 1920;
int height = 1080;
FILE* outfile;
fopen_s(&outfile, "D:\\Sources\\D3D11QSV\\x64\\Debug\\outfile.264", "wb");
const AVCodec* codec = avcodec_find_encoder_by_name("h264_qsv");
AVCodecContext* ctx = avcodec_alloc_context3(codec);
ctx->width = width;
ctx->height = height;
ctx->time_base = AVRational{ 1, 60 };
ctx->framerate = AVRational{ 60, 1 };
ctx->slices = 1;
ctx->sw_pix_fmt = AV_PIX_FMT_NV12;
ctx->pix_fmt = AV_PIX_FMT_NV12;
ctx->bit_rate = 400000;
ctx->gop_size = 10;
ctx->max_b_frames = 1;
auto status = avcodec_open2(ctx, codec, NULL);
if (status < 0) {
std::cout << "Open codec error!\n";
}
AVFrame* sw_frame = av_frame_alloc();
sw_frame->format = ctx->sw_pix_fmt;
sw_frame->width = ctx->width;
sw_frame->height = ctx->height;
status = av_frame_get_buffer(sw_frame, 0);
fill_frame(sw_frame, ctx);
Filling the frame:
auto ret = 0;
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
int i, y, x, c = 0;
for (i = 0; i < 60; i++) {
fflush(stdout);
ret = av_frame_make_writable(frame);
auto texture = create_texture();
auto desc = (AVD3D11FrameDescriptor*)frame->buf[0]->data;
desc->texture = (ID3D11Texture2D*)texture;
desc->index = 0;
frame->data[0] = (std::uint8_t*)texture;
frame->data[1] = 0;
frame->linesize[0] = width * 4;
frame->pts = i;
encode(frame, ctx);
}
Creating Texture:
D3D11_TEXTURE2D_DESC const desc = CD3D11_TEXTURE2D_DESC(
DXGI_FORMAT_NV12, // HoloLens PV camera format, common for video sources
width, // Width of the video frames
height, // Height of the video frames
1, // Number of textures in the array
1, // Number of miplevels in each texture
D3D11_BIND_SHADER_RESOURCE, // We read from this texture in the shader
D3D11_USAGE_DYNAMIC, // Because we'll be copying from CPU memory
D3D11_CPU_ACCESS_WRITE // We only need to write into the texture
);
ID3D11Device* pd3dDevice = create_d3d11_device();
ID3D11Texture2D* pTexture = NULL;
HRESULT err = pd3dDevice->CreateTexture2D(&desc, nullptr, &pTexture);
if (SUCCEEDED(err)) {
D3D11_SHADER_RESOURCE_VIEW_DESC SRVDesc = CD3D11_SHADER_RESOURCE_VIEW_DESC(
pTexture,
D3D11_SRV_DIMENSION_TEXTURE2D,
DXGI_FORMAT_R8_UNORM
);
ID3D11ShaderResourceView* texSRV = NULL;
err = pd3dDevice->CreateShaderResourceView(pTexture,
&SRVDesc, &texSRV);
D3D11_SHADER_RESOURCE_VIEW_DESC const chrominancePlaneDesc = CD3D11_SHADER_RESOURCE_VIEW_DESC(
pTexture,
D3D11_SRV_DIMENSION_TEXTURE2D,
DXGI_FORMAT_R8G8_UNORM
);
ID3D11ShaderResourceView* m_chrominanceView = NULL;
err = pd3dDevice->CreateShaderResourceView(pTexture,
&chrominancePlaneDesc, &m_chrominanceView);
}
if (FAILED(err))
{
fprintf(stderr, "Error creating texture\n");
exit(1);
}
return pTexture;
Creating D3D11 device:
ID3D11Device* dev11 = NULL;
ID3D11DeviceContext* devcon11 = NULL;
D3D_FEATURE_LEVEL featureLevels[]{
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
int err = D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
D3D11_CREATE_DEVICE_VIDEO_SUPPORT,
featureLevels, sizeof(featureLevels) / sizeof(D3D_FEATURE_LEVEL),
D3D11_SDK_VERSION,
&dev11,
nullptr,
&devcon11);
return dev11;
Encoding:
auto status = 0;
status = avcodec_send_frame(ctx, frame); //error happening here
AVPacket* pkt;
pkt = av_packet_alloc();
if (status < 0) {
fprintf(stderr, "Error sending a frame for encoding\n");
exit(1);
}
while (status >= 0) {
status = avcodec_receive_packet(ctx, pkt);
if (status == AVERROR(EAGAIN) || status == AVERROR_EOF)
return;
else if (status < 0) {
fprintf(stderr, "Error during encoding\n");
exit(1);
}
printf("Write packet \n", pkt->pts, pkt->size);
fwrite(pkt->data, 1, pkt->size, outfile);
av_packet_unref(pkt);
}
Everything runs well until encoding the frame. I have tried sending a dummy nv12 data (not a d3d11 texture) and it works well.

ffmpeg AVFrame to opencv Mat conversion

I am currently work on a project which decode the received frame using ffmepg, after decode, I want to convert the AVFrame to opencv Mat frame so that I can play it on the imShow function.
What I have is the byte stream, I read it into buffer, decoded to AVFrame:
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
framergb = avcodec_alloc_frame();
if (!framergb) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
bytes=avpicture_get_size(PIX_FMT_RGB24, CAMER_WIDTH, CAMER_HEIGHT);
buffer=(uint8_t *)av_malloc(bytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)framergb, buffer, PIX_FMT_RGB24,
CAMER_WIDTH, CAMER_HEIGHT);
frame_count = 0;
for(;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
break;
avpkt.data = inbuf;
while (avpkt.size > 0)
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
exit(1);
}
avpkt.data = NULL;
avpkt.size = 0;
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
and the decode_write_frame defined like this:
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
int len, got_frame;
char buf[1024];
struct SwsContext *convert_ctx;
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
return len;
}
if (got_frame) {
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
fflush(stdout);
int w = avctx->width;
int h = avctx->height;
convert_ctx = sws_getContext(w, h, avctx->pix_fmt,
w, h, PIX_FMT_RGB24, SWS_BICUBIC,
NULL, NULL, NULL);
if(convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
sws_scale(convert_ctx, frame->data,
frame->linesize, 0,
h,
framergb->data, framergb->linesize);
/* the picture is allocated by the decoder, no need to free it */
snprintf(buf, sizeof(buf), outfilename, *frame_count);
bmp_save(framergb->data[0], framergb->linesize[0],
avctx->width, avctx->height, buf);
(*frame_count)++;
}
if (pkt->data) {
pkt->size -= len;
pkt->data += len;
}
return 0;
}
here the bmp_save() is defined by the original code author to realise AVFrame to bmp picture conversion. I want to modify here so that let the AVFrame convert to opencv Mat frame. How should I do this conversion?
Thanks in advance.
Using the appropriate Mat constructor, replace the bmp_save line by:
Mat mat(avctx->height, avctx->width, CV_8UC3, framergb->data[0], framergb->linesize[0]);
imshow("frame", mat);
waitKey(10);
Also replace the PIX_FMT_RGB24 flag in sws_getContext by PIX_FMT_BGR24, because OpenCV use BGR format internally.
Thank you for your answer, I also solved by this way:
say AVFrame *frame is the original ffmepg frame ready to be convert,
Mat m;
AVFrame dst;
int w = frame->width;
int h = frame->height;
m = cv::Mat(h, w, CV_8UC3);
dst.data[0] = (uint8_t *)m.data;
avpicture_fill( (AVPicture *)&dst, dst.data[0], PIX_FMT_BGR24, w, h);
enum PixelFormat src_pixfmt = (enum PixelFormat)frame->format;
enum PixelFormat dst_pixfmt = PIX_FMT_BGR24;
convert_ctx = sws_getContext(w, h, src_pixfmt, w, h, dst_pixfmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if(convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
sws_scale(convert_ctx, frame->data, frame->linesize, 0, h,
dst.data, dst.linesize);
imshow("MyVideo", m);
waitKey(30);
Worked nicely!

FFMPEG Encoding Issues

I am having issues encoding screen captures, into a h.264 file for viewing. The program below is cobbled together from examples here and here. The first example, uses an older version of the ffmpeg api. So I tried to update that example for use in my program. The file is created and has something written to it, but when I view the file. The encoded images are all distorted. I am able to run the video encoding example from the ffmpeg api successfully. This is my first time posting, so if I missed anything please let me know.
I appreciate any assistance that is given.
My program:
#include <Windows.h>
#include <string>
#include <sstream>
#include <time.h>
#include <iostream>
#include <dwmapi.h>
extern "C"{
#include <stdint.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
}
using namespace std;
void ScreenShot(const char* BmpName, uint8_t *frame)
{
HWND DesktopHwnd = GetDesktopWindow();
RECT DesktopParams;
HDC DevC = GetDC(DesktopHwnd);
GetWindowRect(DesktopHwnd,&DesktopParams);
DWORD Width = DesktopParams.right - DesktopParams.left;
DWORD Height = DesktopParams.bottom - DesktopParams.top;
DWORD FileSize = sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER)+(sizeof(RGBTRIPLE)+1*(Width*Height*4));
char *BmpFileData = (char*)GlobalAlloc(0x0040,FileSize);
PBITMAPFILEHEADER BFileHeader = (PBITMAPFILEHEADER)BmpFileData;
PBITMAPINFOHEADER BInfoHeader = (PBITMAPINFOHEADER)&BmpFileData[sizeof(BITMAPFILEHEADER)];
BFileHeader->bfType = 0x4D42; // BM
BFileHeader->bfSize = sizeof(BITMAPFILEHEADER);
BFileHeader->bfOffBits = sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER);
BInfoHeader->biSize = sizeof(BITMAPINFOHEADER);
BInfoHeader->biPlanes = 1;
BInfoHeader->biBitCount = 32;
BInfoHeader->biCompression = BI_RGB;
BInfoHeader->biHeight = Height;
BInfoHeader->biWidth = Width;
RGBTRIPLE *Image = (RGBTRIPLE*)&BmpFileData[sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER)];
RGBTRIPLE color;
//pPixels = (RGBQUAD **)new RGBQUAD[sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)];
int start = clock();
HDC CaptureDC = CreateCompatibleDC(DevC);
HBITMAP CaptureBitmap = CreateCompatibleBitmap(DevC,Width,Height);
SelectObject(CaptureDC,CaptureBitmap);
BitBlt(CaptureDC,0,0,Width,Height,DevC,0,0,SRCCOPY|CAPTUREBLT);
GetDIBits(CaptureDC,CaptureBitmap,0,Height,frame,(LPBITMAPINFO)BInfoHeader, DIB_RGB_COLORS);
int end = clock();
cout << "it took " << end - start << " to capture a frame" << endl;
DWORD Junk;
HANDLE FH = CreateFileA(BmpName,GENERIC_WRITE,FILE_SHARE_WRITE,0,CREATE_ALWAYS,0,0);
WriteFile(FH,BmpFileData,FileSize,&Junk,0);
CloseHandle(FH);
GlobalFree(BmpFileData);
}
void video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
cin.get();
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
cin.get();
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base.num=1;
c->time_base.den = 25;
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if(codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* the image can be allocated by any means and av_image_alloc() is
just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
/* encode 1 second of video */
for(i=0;i<250;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
/* prepare a dummy image */
/* Y */
for(y=0;y<c->height;y++) {
for(x=0;x<c->width;x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;y<c->height/2;y++) {
for(x=0;x<c->width/2;x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
printf("\n");
}
void write_video_frame()
{
}
int lineSizeOfFrame(int width)
{
return (width*24 + 31)/32 * 4;//((width*24 / 8) + 3) & ~3;//(width*24 + 31)/32 * 4;
}
int getScreenshotWithCursor(uint8_t* frame)
{
int successful = 0;
HDC screen, bitmapDC;
HBITMAP screen_bitmap;
screen = GetDC(NULL);
RECT DesktopParams;
HWND desktop = GetDesktopWindow();
GetWindowRect(desktop, &DesktopParams);
int width = DesktopParams.right;
int height = DesktopParams.bottom;
bitmapDC = CreateCompatibleDC(screen);
screen_bitmap = CreateCompatibleBitmap(screen, width, height);
SelectObject(bitmapDC, screen_bitmap);
if (BitBlt(bitmapDC, 0, 0, width, height, screen, 0, 0, SRCCOPY))
{
int pos_x, pos_y;
HICON hcur;
ICONINFO icon_info;
CURSORINFO cursor_info;
cursor_info.cbSize = sizeof(CURSORINFO);
if (GetCursorInfo(&cursor_info))
{
if (cursor_info.flags == CURSOR_SHOWING)
{
hcur = CopyIcon(cursor_info.hCursor);
if (GetIconInfo(hcur, &icon_info))
{
pos_x = cursor_info.ptScreenPos.x - icon_info.xHotspot;
pos_y = cursor_info.ptScreenPos.y - icon_info.yHotspot;
DrawIcon(bitmapDC, pos_x, pos_y, hcur);
if (icon_info.hbmColor) DeleteObject(icon_info.hbmColor);
if (icon_info.hbmMask) DeleteObject(icon_info.hbmMask);
}
}
}
int header_size = sizeof(BITMAPINFOHEADER) + 256*sizeof(RGBQUAD);
size_t line_size = lineSizeOfFrame(width);
PBITMAPINFO lpbi = (PBITMAPINFO) malloc(header_size);
lpbi->bmiHeader.biSize = header_size;
lpbi->bmiHeader.biWidth = width;
lpbi->bmiHeader.biHeight = height;
lpbi->bmiHeader.biPlanes = 1;
lpbi->bmiHeader.biBitCount = 24;
lpbi->bmiHeader.biCompression = BI_RGB;
lpbi->bmiHeader.biSizeImage = height*line_size;
lpbi->bmiHeader.biXPelsPerMeter = 0;
lpbi->bmiHeader.biYPelsPerMeter = 0;
lpbi->bmiHeader.biClrUsed = 0;
lpbi->bmiHeader.biClrImportant = 0;
if (GetDIBits(bitmapDC, screen_bitmap, 0, height, (LPVOID)frame, lpbi, DIB_RGB_COLORS))
{
int i;
uint8_t *buf_begin = frame;
uint8_t *buf_end = frame + line_size*(lpbi->bmiHeader.biHeight - 1);
void *temp = malloc(line_size);
for (i = 0; i < lpbi->bmiHeader.biHeight / 2; ++i)
{
memcpy(temp, buf_begin, line_size);
memcpy(buf_begin, buf_end, line_size);
memcpy(buf_end, temp, line_size);
buf_begin += line_size;
buf_end -= line_size;
}
cout << *buf_begin << endl;
free(temp);
successful = 1;
}
free(lpbi);
}
DeleteObject(screen_bitmap);
DeleteDC(bitmapDC);
ReleaseDC(NULL, screen);
return successful;
}
int main()
{
RECT DesktopParams;
HWND desktop = GetDesktopWindow();
GetWindowRect(desktop, &DesktopParams);
int width = DesktopParams.right;
int height = DesktopParams.bottom;
uint8_t *frame = (uint8_t *)malloc(width * height);
AVCodec *codec;
AVCodecContext *codecContext = NULL;
AVPacket packet;
FILE *f;
AVFrame *pictureYUV = NULL;
AVFrame *pictureRGB;
avcodec_register_all();
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if(!codec)
{
cout << "codec not found!" << endl;
cin.get();
return 1;
}
else
{
cout << "codec h265 found!" << endl;
}
codecContext = avcodec_alloc_context3(codec);
codecContext->bit_rate = width * height * 4;
codecContext->width = width;
codecContext->height = height;
codecContext->time_base.num = 1;
codecContext->time_base.den = 250;
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->keyint_min = 1;
codecContext->i_quant_factor = (float)0.71; // qscale factor between P and I frames
codecContext->b_frame_strategy = 20; ///// find out exactly what this does
codecContext->qcompress = (float)0.6; ///// find out exactly what this does
codecContext->qmin = 20; // minimum quantizer
codecContext->qmax = 51; // maximum quantizer
codecContext->max_qdiff = 4; // maximum quantizer difference between frames
codecContext->refs = 4; // number of reference frames
codecContext->trellis = 1;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
codecContext->codec_id = AV_CODEC_ID_H264;
codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
if(avcodec_open2(codecContext, codec, NULL) < 0)
{
cout << "couldn't open codec" << endl;
cout << stderr << endl;
cin.get();
return 1;
}
else
{
cout << "opened h265 codec!" << endl;
cin.get();
}
f = fopen("test.h264", "wb");
if(!f)
{
cout << "Unable to open file" << endl;
return 1;
}
struct SwsContext *img_convert_ctx = sws_getContext(codecContext->width, codecContext->height, PIX_FMT_RGB32, codecContext->width,
codecContext->height, codecContext->pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
int got_output = 0, i = 0;
uint8_t encode[] = { 0, 0, 1, 0xb7 };
try
{
for(i = 0; i < codecContext->time_base.den; i++)
{
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
pictureRGB = av_frame_alloc();
pictureYUV = av_frame_alloc();
getScreenshotWithCursor(frame);
//ScreenShot("example.bmp", frame);
int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, codecContext->width, codecContext->height); // allocating outbuffer
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes*sizeof(uint8_t));
pictureRGB = av_frame_alloc();
pictureYUV = av_frame_alloc();
avpicture_fill((AVPicture*)pictureRGB, frame, PIX_FMT_RGB32, codecContext->width, codecContext->height); // fill image with input screenshot
avpicture_fill((AVPicture*)pictureYUV, outbuffer, PIX_FMT_YUV420P, codecContext->width, codecContext->height);
av_image_alloc(pictureYUV->data, pictureYUV->linesize, codecContext->width, codecContext->height, codecContext->pix_fmt, 32);
sws_scale(img_convert_ctx, pictureRGB->data, pictureRGB->linesize, 0, codecContext->height, pictureYUV->data, pictureYUV->linesize);
pictureYUV->pts = i;
avcodec_encode_video2(codecContext, &packet, pictureYUV, &got_output);
if(got_output)
{
printf("Write frame %3d (size=%5d)\n", i, packet.size);
fwrite(packet.data, 1, packet.size, f);
av_free_packet(&packet);
}
//av_frame_free(&pictureRGB);
//av_frame_free(&pictureYUV);
}
for(got_output = 1; got_output; i++)
{
fflush(stdout);
avcodec_encode_video2(codecContext, &packet, NULL, &got_output);
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, packet.size);
fwrite(packet.data, 1, packet.size, f);
av_free_packet(&packet);
}
}
}
catch(std::exception ex)
{
cout << ex.what() << endl;
}
avcodec_close(codecContext);
av_free(codecContext);
av_freep(&pictureYUV->data[0]);
//av_frame_free(&picture);
fwrite(encode, 1, sizeof(encode), f);
fclose(f);
cin.get();
return 0;
}

ffmpeg sws_scale got distorted image from YUV420P to RGB24

I got distorted image when try to convert YUV420p to RGB24 using
sws_scale.
Code:
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame\n");
return ret;
}
if (*got_frame)
{
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number,
"#"/*av_ts2timestr(frame->pts, &video_dec_ctx->time_base)*/);
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t **)(frame->data), frame->linesize,
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
AVPicture pic;
avpicture_alloc( &pic, AV_PIX_FMT_RGB24, frame->width, frame->height);
SwsContext *ctxt = sws_getContext(frame->width, frame->height, static_cast<AVPixelFormat>(frame->format),
frame->width, frame->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
if ( NULL == ctxt )
{
//Log("failed to get sws context");
}
if ( 0 < sws_scale(ctxt, frame->data, frame->linesize, 0, frame->height, pic.data, pic.linesize))
{
char szPic[256] = { 0 };
sprintf( szPic, "decoded/%d.bmp", video_frame_count );
FILE *pf = fopen(szPic,"w");
if ( NULL != pf )
{
BITMAPFILEHEADER bmpFileHeader = {0};
bmpFileHeader.bfReserved1 = 0;
bmpFileHeader.bfReserved2 = 0;
bmpFileHeader.bfType = 0x4D42;
bmpFileHeader.bfSize = sizeof(bmpFileHeader) + sizeof(BITMAPINFOHEADER) + pic.linesize[0] * frame->height;
bmpFileHeader.bfOffBits = sizeof(bmpFileHeader) + sizeof(BITMAPINFOHEADER);
BITMAPINFOHEADER bmiHeader = { 0 };
bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmiHeader.biWidth = frame->width;
bmiHeader.biHeight = 0 - frame->height;
bmiHeader.biPlanes = 1;
bmiHeader.biBitCount = 24;
bmiHeader.biCompression = BI_RGB;
bmiHeader.biSizeImage = pic.linesize[0] * frame->height;
bmiHeader.biXPelsPerMeter = 0;
bmiHeader.biYPelsPerMeter = 0;
bmiHeader.biClrUsed = 0;
bmiHeader.biClrImportant = 0;
fwrite( &bmpFileHeader, 1, sizeof(bmpFileHeader), pf );
fwrite( &bmiHeader, 1, sizeof(bmiHeader), pf );
fwrite( pic.data[0], 1, pic.linesize[0] * frame->height, pf );
fclose( pf );
}
}
// pic.data[0] now contains the image data in RGB format (3 bytes)
// and pic.linesize[0] is the pitch of the data (ie. size of a row in memory, which can be larger than width*sizeof(pixel))
avpicture_free(&pic);
sws_freeContext(ctxt);
}
above only decode frame then convert this from to RGB24, then write a bitmap.
original video frame like this,
but converted image,
is there missing some code or some code is wrong?
thanks in advance.
fwrite( pic.data[0], 1, pic.linesize[0] * frame->height, pf );
For an image of e.g. 1280x720, linesize is typically larger, e.g. 1312, so you'll be writing more data than image size if you write linesize*height. You want to write (in a loop) width pixels offset by linesize bytes:
uint8_t *ptr = pic.data[0];
for (int y = 0; y < frame->height; y++) {
fwrite(ptr, 1, frame->width, pf);
ptr += pic.linesize[0];
}
And then it should work correctly.
maybe these codes can help you. these works good.
int got_frame = 0;
auto len = avcodec_decode_video2(m_avCodecContext
, m_avFrame
, &got_frame
, &avpkt);
if (len < 0)
{
return;
}
if (got_frame /*&& !silentMode*/)
{
//if (videoRenderer != nullptr)
{
if (frameSize == NULL)
{
return;
}
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_w, dst_h;
int ret = 0;
if (1)// avcodec_alloc_frame()
{
auto stride = m_avFrame->linesize;
auto scan0 = m_avFrame->data;
SwsContext *scaleContext = sws_getContext(m_avCodecContext->width
, m_avCodecContext->height
, m_avCodecContext->pix_fmt
, m_avCodecContext->width
, m_avCodecContext->height
, PixelFormat::PIX_FMT_BGR24
, SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (scaleContext == NULL)
{
//TODO: log error
return;
}
try
{
//*vb->signal = 1;
ret = avpicture_alloc(&m_dst_picture
, PixelFormat::PIX_FMT_BGR24
, m_avCodecContext->width
, m_avCodecContext->height);
// AVFrame *picture_RGB;
// uint8_t *bufferRGB;
// picture_RGB = avcodec_alloc_frame();
// bufferRGB = (uint8_t*)malloc(720*576*(24/8)/*avpicture_get_size(PIX_FMT_RGB24, 720, 576)*/);
// avpicture_fill((AVPicture *)picture_RGB, bufferRGB, PIX_FMT_RGB24, 720, 576);
if (ret < 0)
{
return;
}
int retScale = sws_scale(scaleContext
, scan0
, stride
, 0
, m_avCodecContext->height
, m_dst_picture.data //picture_RGB->data
, m_dst_picture.linesize //picture_RGB->linesize
);
if (1)
{
HWND hwnd = m_pParent->GetSafeHwnd();
SetFocus(hwnd);
CRect rc;
m_pParent->GetClientRect(rc);
CDC *cdc = m_pParent->GetDC();
char* bitmap = (char*)m_dst_picture.data[0];
// static unsigned int i = 0;
// bmp_save(bitmap, m_avCodecContext->width, m_avCodecContext->height, i++);
BITMAPINFO bmpinfo;
bmpinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.bmiHeader.biWidth = m_avCodecContext->width;
bmpinfo.bmiHeader.biHeight = -m_avCodecContext->height;
bmpinfo.bmiHeader.biPlanes = 1;
bmpinfo.bmiHeader.biBitCount = 24;
bmpinfo.bmiHeader.biCompression = BI_RGB;
bmpinfo.bmiHeader.biSizeImage =
m_avCodecContext->width * m_avCodecContext->height * (24 / 8);
bmpinfo.bmiHeader.biXPelsPerMeter = 100;
bmpinfo.bmiHeader.biYPelsPerMeter = 100;
bmpinfo.bmiHeader.biClrUsed = 0;
bmpinfo.bmiHeader.biClrImportant = 0;
HBITMAP hBitmap = CreateDIBitmap(cdc->GetSafeHdc(), &bmpinfo.bmiHeader, CBM_INIT, bitmap, &bmpinfo/*bi*/, DIB_RGB_COLORS);
DrawBitmap(cdc, hBitmap, m_pParent);
::DeleteObject(hBitmap);
::DeleteObject(cdc->GetSafeHdc());
}
avpicture_free(&m_dst_picture);
sws_freeContext(scaleContext);
}
catch (int e)
{
sws_freeContext(scaleContext);
}
}
}
}
void DrawBitmap(CDC *pDC, HBITMAP hbitmap,CWnd *wnd)
{
CBitmap *pBitmap = CBitmap::FromHandle(hbitmap);
BITMAP bm;
pBitmap -> GetBitmap(&bm);
CDC MemDC;
MemDC.CreateCompatibleDC(pDC);
HGDIOBJ gob= MemDC.SelectObject(pBitmap);
CRect rc;
wnd->GetClientRect(rc);
pDC->SetStretchBltMode( COLORONCOLOR);
pDC->StretchBlt(0, 0,rc.Width(),rc.Height() , &MemDC, 0, 0, bm.bmWidth, bm.bmHeight, SRCCOPY);
MemDC.SelectObject(gob);
DeleteObject(pBitmap);
DeleteObject(MemDC);
DeleteObject(&bm);
ReleaseDC(wnd->GetSafeHwnd(), MemDC);
}
void initDecoder()
{
m_avCodecContext = avcodec_alloc_context();
if (!m_avCodecContext)
{
//failed to allocate codec context
Cleanup();
return;
}
m_avCodecContext->flags = 0;
uint8_t startCode[] = { 0x00, 0x00, 0x01 };
//////////////////////////////////////////////////////////////////////////
//I thought for play live video you can comment these lines.
if (m_sProps != NULL)
{
// USES_CONVERSION;
// ::MessageBox(NULL, A2T(sprops), TEXT("sprops"), MB_OK);
unsigned spropCount;
SPropRecord* spropRecords = parseSPropParameterSets(m_sProps, spropCount);
try
{
for (unsigned i = 0; i < spropCount; ++i)
{
AddExtraData(startCode, sizeof(startCode));
AddExtraData(spropRecords[i].sPropBytes, spropRecords[i].sPropLength);
}
}
catch (void*)
{
//extradata exceeds size limit
delete[] spropRecords;
Cleanup();
return;
}
delete[] spropRecords;
m_avCodecContext->extradata = extraDataBuffer;
m_avCodecContext->extradata_size = extraDataSize;
}
AddExtraData(startCode, sizeof(startCode));
bInitEx = true;
av_register_all();
avcodec_register_all();
m_codecId = CODEC_ID_H264;
m_avCodec = avcodec_find_decoder(m_codecId);
if (m_avCodec == NULL)
{
return;
}
if (avcodec_open(m_avCodecContext, m_avCodec) < 0)
{
//failed to open codec
Cleanup();
return;
}
if (m_avCodecContext->codec_id == CODEC_ID_H264)
{
m_avCodecContext->flags2 |= CODEC_FLAG2_CHUNKS;
//avCodecContext->flags2 |= CODEC_FLAG2_SHOW_ALL;
}
m_avFrame = avcodec_alloc_frame();
if (!m_avFrame)
{
//failed to allocate frame
Cleanup();
return;
}
}