您的位置:首页 > 移动开发 > Android开发

函数数据Android(2.3+)源码分析MediaPlayer之RTSP

2013-05-17 22:19 483 查看
新手发帖,很多方面都是刚入门,有错误的地方请大家见谅,欢迎批评指正

在前面的博客中有简单分析MediaPlayer,近来又开始研讨这块货色,在此把阅读代码的理解记录下来方便当前快速查阅。

播放普通文件传入的url是一个当地的绝对路径,但是流媒体的话传入的就是一个网络地址如以"http://“扫尾的流媒体和以"rtsp://"扫尾的流媒体协议。

上面从源码中的Awesomeplayer.cpp finishSetDataSource_l函数入手(也相称与mediaplayer调用了prepare后,开始做一些准备工作,如音频流和视频流都要准备好)

status_t AwesomePlayer::finishSetDataSource_l() {
sp<DataSource> dataSource;

bool isM3u8 = false;
String8 surfix;
surfix.setTo(mUri.string() + (mUri.size() - 5));
isM3u8 = !strncasecmp(".m3u8", surfix, 5);
if (!strncasecmp("http://", mUri.string(), 7) && (!isM3u8)) {
mConnectingDataSource = new NuHTTPDataSource;

mLock.unlock();
status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders);
...
} else if (!strncasecmp("rtsp://", mUri.string(), 7)) {
if (mLooper == NULL) {
mLooper = new ALooper;
mLooper->setName("rtsp");
mLooper->start();
}
mRTSPController = new ARTSPController(mLooper);
status_t err = mRTSPController->connect(mUri.string());

LOGI("ARTSPController::connect returned %d", err);

if (err != OK) {
mRTSPController.clear();
return err;
}

sp<MediaExtractor> extractor = mRTSPController.get();
return setDataSource_l(extractor);
}

首先awesomeplayer会对url停止解析,由此来辨别创立不同的媒体提取器(MediaExtractor),相称于将mp4文件打开并且解析了它里头有哪些流信息(音,视频,字幕等)而RTSP则当然是要和用户所输入的网络地址建立连接准备好随时接受媒体数据。

重点是在ARTSPController的connect函数

status_t ARTSPController::connect(const char *url) {
Mutex::Autolock autoLock(mLock);

if (mState != DISCONNECTED) {
return ERROR_ALREADY_CONNECTED;
}

sp<AMessage> msg = new AMessage(kWhatConnectDone, mReflector->id());

mHandler = new MyHandler(url, mLooper);

mState = CONNECTING;

mHandler->connect(msg);

while (mState == CONNECTING) {
mCondition.wait(mLock);
}

if (mState != CONNECTED) {
mHandler.clear();
}

return mConnectionResult;
}

可以看到函数里头有一个

while (mState == CONNECTING) {
mCondition.wait(mLock);
}

在等待状态变成CONNECTED此函数才会返回。

我们须要清除状态是如何变更的:

首先创立了一个sp<AMessage> msg = new AMessage(kWhatConnectDone, mReflector->id()); 对象类型是kWhatConnectDone,id是mReflector->id()

这个id非常症结,它确定了此消息的最终接收对象,我们看mReflector的由来,

ARTSPController::ARTSPController(const sp<ALooper> &looper)
: mState(DISCONNECTED),
mLooper(looper),
mSeekDoneCb(NULL),
mSeekDoneCookie(NULL),
mLastSeekCompletedTimeUs(-1) {
mReflector = new AHandlerReflector<ARTSPController>(this);
looper->registerHandler(mReflector);
}

它是在ARTSPController的构造函数中创立的,并且被注册到了looper里头,这个looper不知道的可以自己看下源码,简单解释下就是一个循环的消息队列,它里头保存了一个handler列表(每个handler有独一的id,也就是上面我们提到的id),当消息被触发时就会通过id来找到对应的handler。

紧接着我们再看MyHandler的connect函数

void connect(const sp<AMessage> &doneMsg) {
mDoneMsg = doneMsg;

mLooper->registerHandler(this);
mLooper->registerHandler(mConn);
(1 ? mNetLooper : mLooper)->registerHandler(mRTPConn);

sp<AMessage> notify = new AMessage('biny', id());
mConn->observeBinaryData(notify);

sp<AMessage> reply = new AMessage('conn', id());
mConn->connect(mOriginalSessionURL.c_str(), reply);
}

它传入的参数实际就是上面的sp<AMessage> msg = new AMessage(kWhatConnectDone, mReflector->id());

也就是什么时候完成连接的动作就会触发此message

两个主要的地方

sp<AMessage> notify = new AMessage('biny', id());
mConn->observeBinaryData(notify);

void ARTSPConnection::observeBinaryData(const sp<AMessage> &reply) {
sp<AMessage> msg = new AMessage(kWhatObserveBinaryData, id());
msg->setMessage("reply", reply);
msg->post();
}

此消息通过id可以看出是发送给ARTSPConnection自己的,注意setMessage参数reply,字面上理解是回复,也就是说执行完此消息后须要回复的调用reply,而这里的reply就是

AMessage('biny', id());

void ARTSPConnection::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatConnect:
onConnect(msg);
break;

case kWhatDisconnect:
onDisconnect(msg);
break;

case kWhatCompleteConnection:
onCompleteConnection(msg);
break;

case kWhatSendRequest:
onSendRequest(msg);
break;

case kWhatReceiveResponse:
onReceiveResponse();
break;

case kWhatObserveBinaryData:
{
CHECK(msg->findMessage("reply", &mObserveBinaryMessage));
break;
}

default:
TRESPASS();
break;
}
}

在 ARTSPConnection::onMessageReceived可以找到kWhatObserveBinaryData,然后将reply的值保存在了mObserveBinaryMessage中,此消息在前面的receiveRTSPReponse会调用到。我们先看下一个重点的地方

sp<AMessage> reply = new AMessage('conn', id());

mConn->connect(mOriginalSessionURL.c_str(), reply);

这个才是连接的症结
首先reply绑定的id是MyHandler的id,也就是说最终会回到MyHandler的onMessageReceived case 'conn':中来。

void ARTSPConnection::connect(const char *url, const sp<AMessage> &reply) {
sp<AMessage> msg = new AMessage(kWhatConnect, id());
msg->setString("url", url);
msg->setMessage("reply", reply);
msg->post();
}

此处发出了一个kWhatConnect的消息给自己,在onMessageReceived中收到后调用

void ARTSPConnection::onConnect(const sp<AMessage> &msg) {
++mConnectionID;

if (mState != DISCONNECTED) {
close(mSocket);
mSocket = -1;

flushPendingRequests();
}

mState = CONNECTING;

AString url;
CHECK(msg->findString("url", &url));

sp<AMessage> reply;
CHECK(msg->findMessage("reply", &reply));

AString host, path;
unsigned port;
if (!ParseURL(url.c_str(), &host, &port, &path, &mUser, &mPass)
|| (mUser.size() > 0 && mPass.size() == 0)) {
// If we have a user name but no password we have to give up
// right here, since we currently have no way of asking the user
// for this information.

LOGE("Malformed rtsp url %s", url.c_str());

reply->setInt32("result", ERROR_MALFORMED);
reply->post();

mState = DISCONNECTED;
return;
}

if (mUser.size() > 0) {
LOGV("user = '%s', pass = '%s'", mUser.c_str(), mPass.c_str());
}

struct hostent *ent = gethostbyname(host.c_str());
if (ent == NULL) {
LOGE("Unknown host %s", host.c_str());

reply->setInt32("result", -ENOENT);
reply->post();

mState = DISCONNECTED;
return;
}

mSocket = socket(AF_INET, SOCK_STREAM, 0);

MakeSocketBlocking(mSocket, false);

struct sockaddr_in remote;
memset(remote.sin_zero, 0, sizeof(remote.sin_zero));
remote.sin_family = AF_INET;
remote.sin_addr.s_addr = *(in_addr_t *)ent->h_addr;
remote.sin_port = htons(port);

int err = ::connect(
mSocket, (const struct sockaddr *)&remote, sizeof(remote));

reply->setInt32("server-ip", ntohl(remote.sin_addr.s_addr));

if (err < 0) {
if (errno == EINPROGRESS) {
sp<AMessage> msg = new AMessage(kWhatCompleteConnection, id());
msg->setMessage("reply", reply);
msg->setInt32("connection-id", mConnectionID);
msg->post();
return;
}

reply->setInt32("result", -errno);
mState = DISCONNECTED;

close(mSocket);
mSocket = -1;
} else {
reply->setInt32("result", OK);
mState = CONNECTED;
mNextCSeq = 1;

postReceiveReponseEvent();
}

reply->post();
}

在connect中会解析url得到ip port等,然后通过socket连接到此ip

int err = ::connect(
mSocket, (const struct sockaddr *)&remote, sizeof(remote));

连接成功当前ARTSPConnection中的状态变为 mState = CONNECTED;

并且调用postReceiveReponseEvent函数:

void ARTSPConnection::postReceiveReponseEvent() {
if (mReceiveResponseEventPending) {
return;
}

sp<AMessage> msg = new AMessage(kWhatReceiveResponse, id());
msg->post();

mReceiveResponseEventPending = true;
}

发送kWhatReceiveResponse消息给自己

最终在

void ARTSPConnection::onReceiveResponse() {
mReceiveResponseEventPending = false;

if (mState != CONNECTED) {
return;
}

struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = kSelectTimeoutUs;

fd_set rs;
FD_ZERO(&rs);
FD_SET(mSocket, &rs);

int res = select(mSocket + 1, &rs, NULL, NULL, &tv);
CHECK_GE(res, 0);

if (res == 1) {
MakeSocketBlocking(mSocket, true);

bool success = receiveRTSPReponse();

MakeSocketBlocking(mSocket, false);

if (!success) {
// Something horrible, irreparable has happened.
flushPendingRequests();
return;
}
}

postReceiveReponseEvent();
}

中通过select监听当地的mSocket,看socket中是不是有可读数据,也就是看remote是不是有发送数据过来。

当有数据过来时res == 1

就通过receiveRTSPReponse函数来接受发送过来的数据

大家可能有印象,这个函数在之前有遇到请查找kWhatObserveBinaryData就知道了。

postReceiveReponseEvent以后会调用 reply->post();注意这个就是前面MyHandler中发送出来的AMessage('conn', id());

此时代码将回到MyHandler的onMessageReceived case 'conn':中来。

这里有 sp<AMessage> reply = new AMessage('desc', id());
mConn->sendRequest(request.c_str(), reply);

原理同之前的一样也是发送请求并且有reply

有接触过SIP的知道有SDP这个协议,这个是在会话建立以前两边用来协商媒体信息的。

不重复解释,直接看case 'desc': 这里通过与对方的协商,回复的response code ,大家都知道如果http的话回复200则代表拜访成功,这里也是一样的
我们看200里头的操作:

if (response->mStatusCode != 200) {
result = UNKNOWN_ERROR;
} else {
mSessionDesc = new ASessionDescription;

mSessionDesc->setTo(
response->mContent->data(),
response->mContent->size());

if (!mSessionDesc->isValid()) {
LOGE("Failed to parse session description.");
result = ERROR_MALFORMED;
} else {
ssize_t i = response->mHeaders.indexOfKey("content-base");
if (i >= 0) {
mBaseURL = response->mHeaders.valueAt(i);
} else {
i = response->mHeaders.indexOfKey("content-location");
if (i >= 0) {
mBaseURL = response->mHeaders.valueAt(i);
} else {
mBaseURL = mSessionURL;
}
}

if (!mBaseURL.startsWith("rtsp://")) {
// Some misbehaving servers specify a relative
// URL in one of the locations above, combine
// it with the absolute session URL to get
// something usable...

LOGW("Server specified a non-absolute base URL"
", combining it with the session URL to "
"get something usable...");

AString tmp;
CHECK(MakeURL(
mSessionURL.c_str(),
mBaseURL.c_str(),
&tmp));

mBaseURL = tmp;
}

CHECK_GT(mSessionDesc->countTracks(), 1u);
setupTrack(1);
}
}

首先一点是将协商后的sdp信息存储起来放到ASessionDescription里头。

然后再看setupTrack(1),这里还不确定有几个track,只是先设置track 1:

每日一道理

美丽是平凡的,平凡得让你感觉不到她的存在;美丽是平淡的,平淡得只剩下温馨的回忆;美丽又是平静的,平静得只有你费尽心思才能激起她的涟漪。

void setupTrack(size_t index) {
sp<APacketSource> source =
new APacketSource(mSessionDesc, index);

if (source->initCheck() != OK) {
LOGW("Unsupported format. Ignoring track #%d.", index);

sp<AMessage> reply = new AMessage('setu', id());
reply->setSize("index", index);
reply->setInt32("result", ERROR_UNSUPPORTED);
reply->post();
return;
}

AString url;
CHECK(mSessionDesc->findAttribute(index, "a=control", &url));

AString trackURL;
CHECK(MakeURL(mBaseURL.c_str(), url.c_str(), &trackURL));

mTracks.push(TrackInfo());
TrackInfo *info = &mTracks.editItemAt(mTracks.size() - 1);
info->mURL = trackURL;
info->mPacketSource = source;
info->mUsingInterleavedTCP = false;
info->mFirstSeqNumInSegment = 0;
info->mNewSegment = true;

LOGV("track #%d URL=%s", mTracks.size(), trackURL.c_str());

AString request = "SETUP ";
request.append(trackURL);
request.append(" RTSP/1.0\r\n");

if (mTryTCPInterleaving) {
size_t interleaveIndex = 2 * (mTracks.size() - 1);
info->mUsingInterleavedTCP = true;
info->mRTPSocket = interleaveIndex;
info->mRTCPSocket = interleaveIndex + 1;

request.append("Transport: RTP/AVP/TCP;interleaved=");
request.append(interleaveIndex);
request.append("-");
request.append(interleaveIndex + 1);
} else {
unsigned rtpPort;
ARTPConnection::MakePortPair(
&info->mRTPSocket, &info->mRTCPSocket, &rtpPort);

request.append("Transport: RTP/AVP/UDP;unicast;client_port=");
request.append(rtpPort);
request.append("-");
request.append(rtpPort + 1);
}

request.append("\r\n");

if (index > 1) {
request.append("Session: ");
request.append(mSessionID);
request.append("\r\n");
}

request.append("\r\n");

sp<AMessage> reply = new AMessage('setu', id());
reply->setSize("index", index);
reply->setSize("track-index", mTracks.size() - 1);
mConn->sendRequest(request.c_str(), reply);
}

根据mSessionDesc创立了一个APacketSource对象,setupTrack独一的参数是index,此参数标记了是第几个track,这里的track实际相称与一个媒体源,音频流/视频流

当track 1设置完成后又发送setu给自己然后设置第二个track ......

设置完的track会mRTPConn->addStream(
track->mRTPSocket, track->mRTCPSocket,
mSessionDesc, index,
notify, track->mUsingInterleavedTCP);

将track的流信息加入到ARTPConnection中,ARTPConnection相称于是对外的接口

注意这里传进去的sp<AMessage> notify = new AMessage('accu', id());
notify->setSize("track-index", trackIndex);

这个也是一个回复的消息。

void ARTPConnection::addStream(
int rtpSocket, int rtcpSocket,
const sp<ASessionDescription> &sessionDesc,
size_t index,
const sp<AMessage> ¬ify,
bool injected) {
sp<AMessage> msg = new AMessage(kWhatAddStream, id());
msg->setInt32("rtp-socket", rtpSocket);
msg->setInt32("rtcp-socket", rtcpSocket);
msg->setObject("session-desc", sessionDesc);
msg->setSize("index", index);
msg->setMessage("notify", notify);
msg->setInt32("injected", injected);
msg->post();
}

再看ARTPConnection的case:kWhatAddStream

void ARTPConnection::onAddStream(const sp<AMessage> &msg) {
mStreams.push_back(StreamInfo());
StreamInfo *info = &*--mStreams.end();

int32_t s;
CHECK(msg->findInt32("rtp-socket", &s));
info->mRTPSocket = s;
CHECK(msg->findInt32("rtcp-socket", &s));
info->mRTCPSocket = s;

int32_t injected;
CHECK(msg->findInt32("injected", &injected));

info->mIsInjected = injected;

sp<RefBase> obj;
CHECK(msg->findObject("session-desc", &obj));
info->mSessionDesc = static_cast<ASessionDescription *>(obj.get());

CHECK(msg->findSize("index", &info->mIndex));
CHECK(msg->findMessage("notify", &info->mNotifyMsg));

info->mNumRTCPPacketsReceived = 0;
info->mNumRTPPacketsReceived = 0;
memset(&info->mRemoteRTCPAddr, 0, sizeof(info->mRemoteRTCPAddr));

if (!injected) {
postPollEvent();
}
}

此处在List<StreamInfo> mStreams;Stream列表中加入了一条stream并且停止了初始化操作(rtp socket rtcp socket)

以及对info->mNotifyMsg的赋值(记着这个是AMessage('accu', id());)

在最后 调用了postPollEvent函数

这个函数实际是等待对方发来多媒体数据的:

void ARTPConnection::onPollStreams() {
mPollEventPending = false;

if (mStreams.empty()) {
return;
}

struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = kSelectTimeoutUs;

fd_set rs;
FD_ZERO(&rs);

int maxSocket = -1;
for (List<StreamInfo>::iterator it = mStreams.begin();
it != mStreams.end(); ++it) {
if ((*it).mIsInjected) {
continue;
}

FD_SET(it->mRTPSocket, &rs);
FD_SET(it->mRTCPSocket, &rs);

if (it->mRTPSocket > maxSocket) {
maxSocket = it->mRTPSocket;
}
if (it->mRTCPSocket > maxSocket) {
maxSocket = it->mRTCPSocket;
}
}

if (maxSocket == -1) {
return;
}

int res = select(maxSocket + 1, &rs, NULL, NULL, &tv);
CHECK_GE(res, 0);

if (res > 0) {
for (List<StreamInfo>::iterator it = mStreams.begin();
it != mStreams.end(); ++it) {
if ((*it).mIsInjected) {
continue;
}

if (FD_ISSET(it->mRTPSocket, &rs)) {
receive(&*it, true);
}
if (FD_ISSET(it->mRTCPSocket, &rs)) {
receive(&*it, false);
}
}
}

postPollEvent();

int64_t nowUs = ALooper::GetNowUs();
if (mLastReceiverReportTimeUs <= 0
|| mLastReceiverReportTimeUs + 5000000ll <= nowUs) {
sp<ABuffer> buffer = new ABuffer(kMaxUDPSize);
for (List<StreamInfo>::iterator it = mStreams.begin();
it != mStreams.end(); ++it) {
StreamInfo *s = &*it;

if (s->mIsInjected) {
continue;
}

if (s->mNumRTCPPacketsReceived == 0) {
// We have never received any RTCP packets on this stream,
// we don't even know where to send a report.
continue;
}

buffer->setRange(0, 0);

for (size_t i = 0; i < s->mSources.size(); ++i) {
sp<ARTPSource> source = s->mSources.valueAt(i);

source->addReceiverReport(buffer);

if (mFlags & kRegularlyRequestFIR) {
source->addFIR(buffer);
}
}

if (buffer->size() > 0) {
LOGV("Sending RR...");

ssize_t n = sendto(
s->mRTCPSocket, buffer->data(), buffer->size(), 0,
(const struct sockaddr *)&s->mRemoteRTCPAddr,
sizeof(s->mRemoteRTCPAddr));
CHECK_EQ(n, (ssize_t)buffer->size());

mLastReceiverReportTimeUs = nowUs;
}
}
}
}

此函数非常症结,在int res = select(maxSocket + 1, &rs, NULL, NULL, &tv);中监听了rtp socket和rtcp socket ,看这两个套接字中是不是有数据可读,当发现有数据可读时就调用:

status_t ARTPConnection::receive(StreamInfo *s, bool receiveRTP) {
LOGV("receiving %s", receiveRTP ? "RTP" : "RTCP");

CHECK(!s->mIsInjected);

sp<ABuffer> buffer = new ABuffer(65536);

socklen_t remoteAddrLen =
(!receiveRTP && s->mNumRTCPPacketsReceived == 0)
? sizeof(s->mRemoteRTCPAddr) : 0;

ssize_t nbytes = recvfrom(
receiveRTP ? s->mRTPSocket : s->mRTCPSocket,
buffer->data(),
buffer->capacity(),
0,
remoteAddrLen > 0 ? (struct sockaddr *)&s->mRemoteRTCPAddr : NULL,
remoteAddrLen > 0 ? &remoteAddrLen : NULL);

if (nbytes < 0) {
return -1;
}

buffer->setRange(0, nbytes);

// LOGI("received %d bytes.", buffer->size());

status_t err;
if (receiveRTP) {
err = parseRTP(s, buffer);
} else {
err = parseRTCP(s, buffer);
}

return err;
}

来停止接受接受到RTP数据后通过parseRTP来解析,在解析的第一句话就是

if (s->mNumRTPPacketsReceived++ == 0) {
sp<AMessage> notify = s->mNotifyMsg->dup();
notify->setInt32("first-rtp", true);
notify->post();
}

这就是收到的第一个rtp包,然后就触发了前面的AMessage('accu', id());)消息,进而回到

MyHandler的 case 'accu':

而在这里头有这么一句话非常症结:

if (mFirstAccessUnit) {
mDoneMsg->setInt32("result", OK);
mDoneMsg->post();
mDoneMsg = NULL;

mFirstAccessUnit = false;
mFirstAccessUnitNTP = ntpTime;
}

大家或许早就忘却这个mDoneMsg是什么货色,你可当前面的ARTSPController::connect函数中找到答案,没错!它就是kWhatConnectDone

case kWhatConnectDone:
{
Mutex::Autolock autoLock(mLock);

CHECK(msg->findInt32("result", &mConnectionResult));
mState = (mConnectionResult == OK) ? CONNECTED : DISCONNECTED;

mCondition.signal();
break;
}

看到没有 mCondition.signal(); 发送唤醒消息,这时候 ARTSPController::connect 才能真正返回。

前面看似很简单的一个函数mRTSPController->connect(mUri.string());竟然经历了这么多才返来!!!!!!

总结一下:

1.ARTSPController主要控制流媒体的连接,断开和快进,但是真正办事的是它的成员变量MyHandler

2.MyHandler类里头的ARTSPConnection担任流媒体的收发操作。

3.ARTSPConnection首先解析URL,得到主机的地址和端口号,然后建立socket 连接到远端的主机,并且建立监听机制等待远端主机发送过来的数据

先通过sdp的形式和远端主机协商媒体类型,然后根据类型建立不同的StreamInfo包含音频和视频的(并且每个StreamInfo都有一个rtp socket 和一个rtcp socket)

并且监听这些socket,当有第一个rtp数据传过来时,我们就认为socket连接建立并返回。

前面这些都是RTSP里头所完成的,RTSP也属于stagefright框架中的一部分,所以它的一些类都是继承的stagefright中的基本类

struct APacketSource : public MediaSource

struct ARTSPController : public MediaExtractor

ARTSPController 在调用finishSetDataSource_l后被设置为数据源。

status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
// Attempt to approximate overall stream bitrate by summing all
// tracks' individual bitrates, if not all of them advertise bitrate,
// we have to fail.

int64_t totalBitRate = 0;

for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);

int32_t bitrate;
if (!meta->findInt32(kKeyBitRate, &bitrate)) {
totalBitRate = -1;
break;
}

totalBitRate += bitrate;
}

mBitrate = totalBitRate;

LOGV("mBitrate = %lld bits/sec", mBitrate);

bool haveAudio = false;
bool haveVideo = false;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);

const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));

if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
setVideoSource(extractor->getTrack(i));
haveVideo = true;
} else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
setAudioSource(extractor->getTrack(i));
haveAudio = true;

if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
// Only do this for vorbis audio, none of the other audio
// formats even support this ringtone specific hack and
// retrieving the metadata on some extractors may turn out
// to be very expensive.
sp<MetaData> fileMeta = extractor->getMetaData();
int32_t loop;
if (fileMeta != NULL
&& fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
mFlags |= AUTO_LOOPING;
}
}
}

if (haveAudio && haveVideo) {
break;
}
}

if (!haveAudio && !haveVideo) {
return UNKNOWN_ERROR;
}

mExtractorFlags = extractor->flags();

return OK;
}

并且将音频视频流都提取出来,然后

void AwesomePlayer::onPrepareAsyncEvent() {
Mutex::Autolock autoLock(mLock);

if (mFlags & PREPARE_CANCELLED) {
LOGI("prepare was cancelled before doing anything");
abortPrepare(UNKNOWN_ERROR);
return;
}

if (mUri.size() > 0) {
status_t err = finishSetDataSource_l();

if (err != OK) {
abortPrepare(err);
return;
}
}

if (mVideoTrack != NULL && mVideoSource == NULL) {
status_t err = initVideoDecoder();

if (err != OK) {
abortPrepare(err);
return;
}
}

if (mAudioTrack != NULL && mAudioSource == NULL) {
status_t err = initAudioDecoder();

if (err != OK) {
abortPrepare(err);
return;
}
}

if (mCachedSource != NULL || mRTSPController != NULL) {
postBufferingEvent_l();
} else {
finishAsyncPrepare_l();
}
}

初始化音视频解码器,这样就完成了整个prepare的动作。

前面用户调用play实际就是开始接收数据,解码,然后播放渲染。

文章结束给大家分享下程序员的一些笑话语录:

一程序员告老还乡,想安度晚年,于是决定在书法上有所造诣。省略数字……,准备好文房4宝,挥起毛笔在白纸上郑重的写下:Hello World
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: