运用ffmpeg SDK解264码流(来源FFmpeg工程组)
2011-12-25 22:38
288 查看
Link:http://www.ffmpeg.com.cn/index.php/%E8%BF%90%E7%94%A8SDK%E8%A7%A3264%E7%A0%81%E6%B5%81
方法二(比较麻烦一点):
fastreaming的方法
有关该问题的讨论帖可参考ffmpeg工程组论坛中的相关讨论:
运用SDK解264码流 方法一:
最好参考ffmpeg自带的两个例子,outputexample.c和apiexample.c文件, 亦或直接看ffmpeg和ffplay的例程也可,如果你是需要重量级的使用ffmpeg的话
方法二(比较麻烦一点):
这是一个网上的例程http://www.inb.uni-luebeck.de/~boehme/avcodec_sample.cpp为方便全部贴出如下:
#include "avcodec.h" #include "avformat.h" #include <stdlib.h> #include <time.h> #include <stdio.h> /* new types */ enum bool{false=0,true}; typedef enum bool bool; static bool GetNextFrame(AVFormatContext *pFormatCtx, AVCodecContext *pCodecCtx,int videoStream, AVFrame *pFrame) { static AVPacket packet; static int bytesRemaining=0; static uint8_t *rawData; static bool fFirstTime=true; int bytesDecoded; int frameFinished; // First time we're called, set packet.data to NULL to indicate it // doesn't have to be freed if (fFirstTime){ fFirstTime = false; packet.data = NULL; } // Decode packets until we have decoded a complete frame while (true) { // Work on the current packet until we have decoded all of it while (bytesRemaining > 0) { // Decode the next chunk of data bytesDecoded = avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, rawData, bytesRemaining); // Was there an error? if (bytesDecoded < 0){ fprintf(stderr, "Error while decoding frame\n"); return false; } bytesRemaining -= bytesDecoded; rawData += bytesDecoded; // Did we finish the current frame? Then we can return if (frameFinished) return true; } // Read the next packet, skipping all packets that aren't for this // stream do{ // Free old packet if(packet.data != NULL) av_free_packet(&packet); // Read new packet if(av_read_packet(pFormatCtx, &packet) < 0) goto loop_exit; } while(packet.stream_index != videoStream); bytesRemaining = packet.size; rawData = packet.data; } loop_exit: // Decode the rest of the last frame bytesDecoded = avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, rawData, bytesRemaining); // Free last packet if(packet.data != NULL) av_free_packet(&packet); return frameFinished != 0; } int main() { AVFormatContext *pFormatCtx; int i, videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVFrame *pFrameYUV; clock_t t; double fps; int y_size, i_frame=0; int numBytes; uint8_t *buffer; char* infile="test.264"; char* outfile="out.yuv"; FILE* fp=fopen(outfile, "wb"); if (fp==NULL){ fprintf(stderr, "\nCan't open file %s!", infile); return -1; } // Register all formats and codecs av_register_all(); // Open video file if (av_open_input_file(&pFormatCtx, infile, NULL, 0, NULL) != 0) return -1; // Couldn't open file // Retrieve stream information if (av_find_stream_info(pFormatCtx) < 0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, infile, false); t = clock(); // Find the first video stream videoStream = -1; for (i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[/*此处不隔开,后面的字体全部是斜体*/i]->codec->codec_type == CODEC_TYPE_VIDEO){ videoStream=i; break; } if (videoStream == -1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx = pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) return -1; // Codec not found // Inform the codec that we can handle truncated bitstreams -- i.e., // bitstreams where frame boundaries can fall in the middle of packets if(pCodec->capabilities & CODEC_CAP_TRUNCATED) pCodecCtx->flags|=CODEC_FLAG_TRUNCATED; // Open codec if (avcodec_open(pCodecCtx, pCodec) < 0) return -1; // Could not open codec // Allocate video frame pFrame = avcodec_alloc_frame(); // Allocate an AVFrame structure pFrameYUV=avcodec_alloc_frame(); if(pFrameYUV == NULL) return -1; // Determine required buffer size and allocate buffer numBytes=avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); buffer = (uint8_t*)malloc(numBytes); // Assign appropriate parts of buffer to image planes in pFrameRGB avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); // Read frames while(GetNextFrame(pFormatCtx, pCodecCtx, videoStream, pFrame)) { img_convert((AVPicture *)pFrameYUV, PIX_FMT_YUV420P, (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); i_frame++; y_size = pCodecCtx->width * pCodecCtx->height; #if 1 if (i_frame==1) //only output onr time { printf("\n:lolpFrame->linesize[0]=%d, pFrame->linesize[1]=%d, pFrame->linesize[2]=%d!\n", pFrame->linesize[0], pFrame->linesize[1], pFrame->linesize[2]); printf("\n:lolpFrameYUV->linesize[0]=%d, pFrameYUV->linesize[1]=%d, pFrameYUV->linesize[2]=%d!", pFrameYUV->linesize[0], pFrameYUV->linesize[1], pFrameYUV->linesize[2]); } #endif fwrite(pFrameYUV->data[0], 1, y_size, fp); fwrite(pFrameYUV->data[1], 1, (y_size/4), fp); fwrite(pFrameYUV->data[2], 1, (y_size/4), fp); } //calculate decode rate fclose(fp); t = clock() - t; fps = (double)(t) / CLOCKS_PER_SEC; fps = i_frame / fps; printf("\n==>Decode rate %.4f fps!\n", fps); // Free the YUV image free(buffer); av_free(pFrameYUV); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file av_close_input_file(pFormatCtx); return 0; }
将以上例程作如下修改: 1.将GetNextFrame里面的函数av_read_packet改成av_read_frame; 2.解码后一帧YUV的保存,可以按fastreaming的方法(具体方法如下),另外重新定义一YUV420P格式,调用img_convert 将解码后的帧转换到新定义的帧里面去。后者可能稍微耗时一些,但这样接口更清晰一些,便于封装,可直接用于显示等。 3.该程序对解码后的最后一帧,需在循环体while后面,才能再次写解码帧的数据。否则你将会看到解码少1帧。可以参考apiexample里面。 4.如果要使用以下这一段的话 if(g_ffmpeg_pCodec->capabilities&CODEC_CAP_TRUNCATED) g_ffmpeg_pCodecCtx->flags|= CODEC_FLAG_TRUNCATED; 这一段在使用av_read_frame的时候是一定要去掉的,否则严重丢失数据。我用av_read_frame的时候没有去掉那两句。结果380多桢的一个视频文件只 解出190多桢。而且中间有很多桢都是花的。类似劣质VCD被卡的那种画面。去掉以后一切OK。
fastreaming的方法
source :352x288 internal: (16+352+16) x288 FFmpeg adds 16 pixels at four edges of a frame just for enhancing MC/ME result: you get: linesize[0] = (16+352+16) = 384 linesize[1] = linesize[0]/2; linesize[2] = linesize[0]/2 but data[0],data[1],data[2] is just the address of valid YUV pixels, but the valid data length is 352,176,176 separately Now I think you can figure out the layout of a YUV frame which is generated by FFmpeg decoder Please ref the following function to dump yuv data int g_yuv_index = 1; void smartAV_dump_yuv(char *file_name,AVPicture *pic,int width,int height) { FILE *fp =0; char filename[128],index_name[32]; int i,j,shift; uint8_t *yuv_factor; strcpy(filename,file_name); sprintf(index_name,"new_yuv_dump_%d.yuv",g_yuv_index); strcat(filename,index_name); fp = fopen(filename,"wb"); if(fp) { for(i = 0; i < 3; i++) { shift = (i == 0 ? 0:1); yuv_factor = pic->data; for(j = 0; j < (height>>shift); j++) { fwrite(yuv_factor,(width>>shift),1,fp); yuv_factor += pic->linesize; } } fclose(fp); g_yuv_index++; } } ource :352x288 internal: (16+352+16) x(16+288+16) FFmpeg adds 16 pixels at four edges of a frame just for enhancing MC/ME result: you get: linesize[0] = (16+352+16) = 384 linesize[1] = linesize[0]/2; linesize[2] = linesize[0]/2 but data[0],data[1],data[2] is just the address of valid YUV pixels, but the valid data length is 352,176,176 per line separately and there is 288 lines valid for data[0], data[1]: 144,data[2] :144 Now I think you can figure out the layout of a YUV frame which is generated by FFmpeg decoder Please ref the following function to dump yuv data int g_yuv_index = 1; void smartAV_dump_yuv(char *file_name,AVPicture *pic,int width,int height) { FILE *fp =0; char filename[128],index_name[32]; int i,j,shift; uint8_t *yuv_factor; strcpy(filename,file_name); sprintf(index_name,"new_yuv_dump_%d.yuv",g_yuv_index); strcat(filename,index_name); fp = fopen(filename,"wb"); if(fp) { for(i = 0; i < 3; i++) { shift = (i == 0 ? 0:1); yuv_factor = pic->data; for(j = 0; j < (height>>shift); j++) { fwrite(yuv_factor,(width>>shift),1,fp); yuv_factor += pic->linesize; } } fclose(fp); g_yuv_index++; } }
有关该问题的讨论帖可参考ffmpeg工程组论坛中的相关讨论:
有关运用SDK解264码流的讨论
相关文章推荐
- 运用ffmpeg SDK解264码流(来源FFmpeg工程组)
- 【FFMpeg视频开发与应用基础】三、调用FFmpeg SDK对H.264格式的视频压缩码流进行解码
- 基于ffmpeg摄像头采集编码成264码流
- ffmpeg,264编码,得到码流有延迟。实时编码
- iOS工程中创建并使用SDK
- ffmpeg 转换VC工具已经可以生成工程文件
- VS2005工程由Pocket PC 2003 SDK转为WINCE6.0 SDK的问题
- FFmpeg来源简单分析:结构会员管理系统-AVClass
- 护照扫描仪出入境海关运用SDK
- 轻松使用 ffmpeg sdk 实现各种格式的rgb以及yuv raw
- Cannot run program "/android-sdk-linux/aapt":(xxx工程目录下) error=2, 没有那个文件或目录
- ffmpeg转码器移植VC的工程:ffmpeg for MFC
- 深入浅出FFMPEG(二) SDK version 0.11
- ffmpeg使用二:录屏数据直接264编码
- ffmpeg3.2.2sdk demo vs2015测试工程
- IOS支付宝SDK集成到工程中(小白适用大神勿扰)
- ffmpeg SDK开发手册
- FFMPEG 提取码流的基本信息
- 如何使用SDK在Ubuntu设备(包括仿真器和桌面)上运用应用程序
- Java利用JNI调用FFMpeg对h264码流进行解码