您的位置:首页 > 移动开发 > Android开发

ffmpeg入门系列教程(新API)示例 02

2017-04-06 21:25 896 查看

ffmpeg入门系列教程(新API)示例 02

环境搭建传送门:http://blog.csdn.net/DaveBobo/article/details/51123890

教程原文:http://dranger.com/ffmpeg/tutorial02.html

工具:

VS 2013 社区版

FFmpeg version: 20170321-db7a05d(3.2.4以上)

SDL2(需要在官网下载库和头文件并引入到工程)

新API实现教程示例 02

linux下编译命令:

gcc -o tutorial02 test2.c -lavformat -lavcodec -lswscale -lavutil -lSDL2 -lz -lm


以下代码在http://dranger.com/ffmpeg/提供的示例代码的基础上修改而来。 使用了最新的ffmpeg api代替废弃的api,并通过编译验证的可用代码。

// testffmpeg.cpp : Defines the entry point for the console application.
//

#include "stdafx.h"
// tutorial01.c
// Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
// With updates from https://github.com/chelyaev/ffmpeg-tutorial // Updates tested on:
// LAVC 54.59.100, LAVF 54.29.104, LSWS 2.1.101
// on GCC 4.7.2 in Debian February 2015

// A small sample program that shows how to use libavformat and libavcodec to
// read video from a file.
//
// Use
//
// gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lswscale -lz
//
// to build (assuming libavformat and libavcodec are correctly installed
// your system).
//
// Run using
//
// tutorial01 myvideofile.mpg
//
// to write the first five frames from "myvideofile.mpg" to disk in PPM
// format.
extern "C"{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <stdio.h>
#include "include\libsdl\SDL.h"
#include "include\libsdl\SDL_thread.h"
}

int main(int argc, char *argv[]) {
// Initalizing these to NULL prevents segfaults!
AVFormatContext   *pFormatCtx = NULL;
int               i, videoStream;
AVCodecParameters    *pCodecpar = NULL;
AVCodecContext    *pCodecCtx = NULL;
AVCodec           *pCodec = NULL;
AVFrame           *pFrame = NULL;
AVFrame           *pFrameYUV = NULL;
AVPacket          packet;
int               frameFinished;
int               numBytes;
uint8_t           *buffer = NULL;
struct SwsContext *sws_ctx = NULL;

SDL_Rect rect;
SDL_Event event;

// Register all formats and codecs
av_register_all();
//debug程序需要将test.flv放在对应的project目录下,跟引用的ffmpeg的dll库同一目录
char filepath[] = "test.flv";
// Open video file
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0)
return -1; // Couldn't open file
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
return -1; // Couldn't find stream information

// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, filepath, 0);

// Find the first video stream
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
if (videoStream == -1)
return -1; // Didn't find a video stream

// Get a pointer to the codec context for the video stream
pCodecpar = pFormatCtx->streams[videoStream]->codecpar;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pFormatCtx->streams[videoStream]->codecpar->codec_id);
if (pCodec == NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}

// Copy context
pCodecCtx = avcodec_alloc_context3(pCodec);
if (avcodec_parameters_to_context(pCodecCtx, pCodecpar) != 0) {
fprintf(stderr, "Couldn't copy codec context");
return -1; // Error copying codec context
}

// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
return -1; // Could not open codec

// Allocate video frame
pFrame = av_frame_alloc();

// Allocate an AVFrame structure
pFrameYUV = av_frame_alloc();
if (pFrameYUV == NULL)
return -1;

// Determine required buffer size and allocate buffer
numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
pCodecCtx->height, 1);
buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameYUV
// Note that pFrameYUV is an AVFrame, but AVFrame is a superset
// of AVPicture
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, buffer, AV_PIX_FMT_YUV420P,
pCodecCtx->width, pCodecCtx->height, 1);

// initialize SWS context for software scaling
sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL);

SDL_Window *sdlWindow = SDL_CreateWindow("test2",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
pCodecCtx->width, pCodecCtx->height,
SDL_WINDOW_RESIZABLE | SDL_WINDOW_OPENGL);
SDL_Renderer *sdlRenderer = SDL_CreateRenderer(sdlWindow, -1, 0);
SDL_Texture* sdlTexture = SDL_CreateTexture(
sdlRenderer,
SDL_PIXELFORMAT_IYUV,
SDL_TEXTUREACCESS_STREAMING,
pCodecCtx->width,
pCodecCtx->height);

// Read frames and display it
i = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if (packet.stream_index == videoStream) {
// Decode video frame
avcodec_send_packet(pCodecCtx, &packet);
if (avcodec_receive_frame(pCodecCtx, pFrame) != 0)
continue;

// Convert the image from its native format to YUV
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);

// display the frame to screen
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;

SDL_UpdateTexture(sdlTexture, &rect,
pFrameYUV->data[0],
pFrameYUV->linesize[0]);
SDL_RenderClear(sdlRenderer);
SDL_RenderCopy(sdlRenderer, sdlTexture, &rect, &rect);
SDL_RenderPresent(sdlRenderer);
SDL_Delay(20);
}

// Free the packet that was allocated by av_read_frame
av_packet_unref(&packet);

SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
SDL_Quit();
exit(0);
break;
default:
break;
}
}

SDL_DestroyTexture(sdlTexture);
// Free the RGB image
av_free(buffer);
av_frame_free(&pFrameYUV);

// Free the YUV frame
av_frame_free(&pFrame);

// Close the codecs
avcodec_close(pCodecCtx);

// Close the video file
avformat_close_input(&pFormatCtx);

return 0;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息