您的位置:首页 > 其它

FFMPEG学习----分离视频里的H.264与YUV数据

2016-08-19 11:23 267 查看
#include <stdio.h>

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
};

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avutil.lib")

int main(int argc, char* argv[])
{
AVFormatContext		*pFormatCtx = NULL;
AVCodecContext		*pCodecCtx = NULL;
AVCodec				*pCodec = NULL;
AVFrame				*pFrame = NULL, *pFrameYUV = NULL;
unsigned char		*out_buffer = NULL;
AVPacket			packet;
struct SwsContext	*img_convert_ctx = NULL;
int					y_size;
int					got_picture;
int					i, videoIndex;
int					frame_cnt = 1;

char filepath[] = "Titanic.ts";

FILE *fp_yuv = fopen("film.yuv", "wb+");
FILE *fp_h264 = fopen("film.h264", "wb+");
if (fp_yuv == NULL || fp_h264 == NULL)
{
printf("FILE open error");
return -1;
}

av_register_all();

if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
printf("Couldn't open an input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0){
printf("Couldn't find stream information.\n");
return -1;
}
videoIndex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
videoIndex = i;
break;
}

if (videoIndex == -1){
printf("Couldn't find a video stream.\n");
return -1;
}

pCodecCtx = pFormatCtx->streams[videoIndex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL){
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
printf("Could not open codec.\n");
return -1;
}

pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
if (pFrame == NULL || pFrameYUV == NULL)
{
printf("memory allocation error\n");
return -1;
}
out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoIndex)
{
//分离出h.264数据
fwrite(packet.data, 1, packet.size, fp_h264);

if (avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet) < 0)
{
printf("Decode Error.\n");
return -1;
}
if (got_picture)
{
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);

//分离出YUV数据
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);		//Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);	//U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);	//V

printf("Succeed to decode %d frame!\n", frame_cnt);
frame_cnt++;
}
}
av_free_packet(&packet);
}

fclose(fp_yuv);
fclose(fp_h264);
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);

return 0;
}



压缩率大于100倍了。

当av_read_frame()循环退出的时候,实际上解码器中可能还包含剩余的几帧数据。因此需要通过“flush_decoder”将这几帧数据输出。“flush_decoder”功能简而言之即直接调用avcodec_decode_video2()获得AVFrame,而不再向解码器传递AVPacket。参考代码如下: //FIX: Flush Frames remained in Codec
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0)
break;
if (!got_picture)
break;
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
//处理...
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: