summaryrefslogtreecommitdiffstats
path: root/media/libstagefright/httplive
diff options
context:
space:
mode:
authorRobert Shih <robertshih@google.com>2015-04-08 09:06:54 -0700
committerRobert Shih <robertshih@google.com>2015-04-16 19:01:15 -0700
commit0852843d304006e3ab333081fddda13b07193de8 (patch)
treef60be26aad988e89bc135a86f6e4ae8853c69a49 /media/libstagefright/httplive
parent3d66eb4128aebef31bb0fa44c4d53d6122294a26 (diff)
downloadframeworks_av-0852843d304006e3ab333081fddda13b07193de8.zip
frameworks_av-0852843d304006e3ab333081fddda13b07193de8.tar.gz
frameworks_av-0852843d304006e3ab333081fddda13b07193de8.tar.bz2
stagefright: initial timed id3 support in hls
Change-Id: I00a8a786b3f4b74742c34770edd94e937abe20a8
Diffstat (limited to 'media/libstagefright/httplive')
-rw-r--r--media/libstagefright/httplive/LiveSession.cpp147
-rw-r--r--media/libstagefright/httplive/LiveSession.h15
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.cpp77
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.h5
4 files changed, 192 insertions, 52 deletions
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 4886000..203444a 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -38,6 +38,7 @@
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
@@ -135,6 +136,8 @@ const char *LiveSession::getKeyForStream(StreamType type) {
return "timeUsAudio";
case STREAMTYPE_SUBTITLES:
return "timeUsSubtitle";
+ case STREAMTYPE_METADATA:
+ return "timeUsMetadata"; // unused
default:
TRESPASS();
}
@@ -150,12 +153,30 @@ const char *LiveSession::getNameForStream(StreamType type) {
return "audio";
case STREAMTYPE_SUBTITLES:
return "subs";
+ case STREAMTYPE_METADATA:
+ return "metadata";
default:
break;
}
return "unknown";
}
+//static
+ATSParser::SourceType LiveSession::getSourceTypeForStream(StreamType type) {
+ switch (type) {
+ case STREAMTYPE_VIDEO:
+ return ATSParser::VIDEO;
+ case STREAMTYPE_AUDIO:
+ return ATSParser::AUDIO;
+ case STREAMTYPE_METADATA:
+ return ATSParser::META;
+ case STREAMTYPE_SUBTITLES:
+ default:
+ TRESPASS();
+ }
+ return ATSParser::NUM_SOURCE_TYPES; // should not reach here
+}
+
LiveSession::LiveSession(
const sp<AMessage> &notify, uint32_t flags,
const sp<IMediaHTTPService> &httpService)
@@ -187,12 +208,13 @@ LiveSession::LiveSession(
mUpSwitchMargin(kUpSwitchMarginUs),
mFirstTimeUsValid(false),
mFirstTimeUs(0),
- mLastSeekTimeUs(0) {
+ mLastSeekTimeUs(0),
+ mHasMetadata(false) {
mStreams[kAudioIndex] = StreamItem("audio");
mStreams[kVideoIndex] = StreamItem("video");
mStreams[kSubtitleIndex] = StreamItem("subtitles");
- for (size_t i = 0; i < kMaxStreams; ++i) {
+ for (size_t i = 0; i < kNumSources; ++i) {
mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
}
@@ -204,6 +226,20 @@ LiveSession::~LiveSession() {
}
}
+int64_t LiveSession::calculateMediaTimeUs(
+ int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq) {
+ if (timeUs >= firstTimeUs) {
+ timeUs -= firstTimeUs;
+ } else {
+ timeUs = 0;
+ }
+ timeUs += mLastSeekTimeUs;
+ if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
+ timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
+ }
+ return timeUs;
+}
+
status_t LiveSession::dequeueAccessUnit(
StreamType stream, sp<ABuffer> *accessUnit) {
status_t finalResult = OK;
@@ -236,7 +272,6 @@ status_t LiveSession::dequeueAccessUnit(
status_t err = packetSource->dequeueAccessUnit(accessUnit);
- StreamItem& strm = mStreams[streamIdx];
if (err == INFO_DISCONTINUITY) {
// adaptive streaming, discontinuities in the playlist
int32_t type;
@@ -256,6 +291,7 @@ status_t LiveSession::dequeueAccessUnit(
if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
int64_t timeUs, originalTimeUs;
int32_t discontinuitySeq = 0;
+ StreamItem& strm = mStreams[streamIdx];
CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
originalTimeUs = timeUs;
(*accessUnit)->meta()->findInt32("discontinuitySeq", &discontinuitySeq);
@@ -299,15 +335,7 @@ status_t LiveSession::dequeueAccessUnit(
}
strm.mLastDequeuedTimeUs = timeUs;
- if (timeUs >= firstTimeUs) {
- timeUs -= firstTimeUs;
- } else {
- timeUs = 0;
- }
- timeUs += mLastSeekTimeUs;
- if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
- timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
- }
+ timeUs = calculateMediaTimeUs(firstTimeUs, timeUs, discontinuitySeq);
ALOGV("[%s] dequeueAccessUnit: time %lld us, original %lld us",
streamStr, (long long)timeUs, (long long)originalTimeUs);
@@ -323,6 +351,17 @@ status_t LiveSession::dequeueAccessUnit(
(*accessUnit)->meta()->setInt32(
"trackIndex", mPlaylist->getSelectedIndex());
(*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs);
+ } else if (stream == STREAMTYPE_METADATA) {
+ HLSTime mdTime((*accessUnit)->meta());
+ if (mDiscontinuityAbsStartTimesUs.indexOfKey(mdTime.mSeq) < 0) {
+ packetSource->requeueAccessUnit((*accessUnit));
+ return -EAGAIN;
+ } else {
+ int64_t firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(mdTime.mSeq);
+ int64_t timeUs = calculateMediaTimeUs(firstTimeUs, mdTime.mTimeUs, mdTime.mSeq);
+ (*accessUnit)->meta()->setInt64("timeUs", timeUs);
+ (*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs);
+ }
}
} else {
ALOGI("[%s] encountered error %d", streamStr, err);
@@ -728,6 +767,17 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case PlaylistFetcher::kWhatMetadataDetected:
+ {
+ if (!mHasMetadata) {
+ mHasMetadata = true;
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatMetadataDetected);
+ notify->post();
+ }
+ break;
+ }
+
default:
TRESPASS();
}
@@ -788,7 +838,7 @@ int LiveSession::SortByBandwidth(const BandwidthItem *a, const BandwidthItem *b)
// static
LiveSession::StreamType LiveSession::indexToType(int idx) {
- CHECK(idx >= 0 && idx < kMaxStreams);
+ CHECK(idx >= 0 && idx < kNumSources);
return (StreamType)(1 << idx);
}
@@ -801,6 +851,8 @@ ssize_t LiveSession::typeToIndex(int32_t type) {
return 1;
case STREAMTYPE_SUBTITLES:
return 2;
+ case STREAMTYPE_METADATA:
+ return 3;
default:
return -1;
};
@@ -1179,6 +1231,45 @@ static double uniformRand() {
}
#endif
+bool LiveSession::UriIsSameAsIndex(const AString &uri, int32_t i, bool newUri) {
+ ALOGI("[timed_id3] i %d UriIsSameAsIndex newUri %s, %s", i,
+ newUri ? "true" : "false",
+ newUri ? mStreams[i].mNewUri.c_str() : mStreams[i].mUri.c_str());
+ return i >= 0
+ && ((!newUri && uri == mStreams[i].mUri)
+ || (newUri && uri == mStreams[i].mNewUri));
+}
+
+sp<AnotherPacketSource> LiveSession::getPacketSourceForStreamIndex(
+ size_t trackIndex, bool newUri) {
+ StreamType type = indexToType(trackIndex);
+ sp<AnotherPacketSource> source = NULL;
+ if (newUri) {
+ source = mPacketSources2.valueFor(type);
+ source->clear();
+ } else {
+ source = mPacketSources.valueFor(type);
+ };
+ return source;
+}
+
+sp<AnotherPacketSource> LiveSession::getMetadataSource(
+ sp<AnotherPacketSource> sources[kNumSources], uint32_t streamMask, bool newUri) {
+ // todo: One case where the following strategy can fail is when audio and video
+ // are in separate playlists, both are transport streams, and the metadata
+ // is actually contained in the audio stream.
+ ALOGV("[timed_id3] getMetadataSourceForUri streamMask %x newUri %s",
+ streamMask, newUri ? "true" : "false");
+
+ if ((sources[kVideoIndex] != NULL) // video fetcher; or ...
+ || (!(streamMask & STREAMTYPE_VIDEO) && sources[kAudioIndex] != NULL)) {
+ // ... audio fetcher for audio only variant
+ return getPacketSourceForStreamIndex(kMetaDataIndex, newUri);
+ }
+
+ return NULL;
+}
+
bool LiveSession::resumeFetcher(
const AString &uri, uint32_t streamMask, int64_t timeUs, bool newUri) {
ssize_t index = mFetcherInfos.indexOfKey(uri);
@@ -1188,18 +1279,11 @@ bool LiveSession::resumeFetcher(
}
bool resume = false;
- sp<AnotherPacketSource> sources[kMaxStreams];
+ sp<AnotherPacketSource> sources[kNumSources];
for (size_t i = 0; i < kMaxStreams; ++i) {
- if ((streamMask & indexToType(i))
- && ((!newUri && uri == mStreams[i].mUri)
- || (newUri && uri == mStreams[i].mNewUri))) {
+ if ((streamMask & indexToType(i)) && UriIsSameAsIndex(uri, i, newUri)) {
resume = true;
- if (newUri) {
- sources[i] = mPacketSources2.valueFor(indexToType(i));
- sources[i]->clear();
- } else {
- sources[i] = mPacketSources.valueFor(indexToType(i));
- }
+ sources[i] = getPacketSourceForStreamIndex(i, newUri);
}
}
@@ -1214,6 +1298,7 @@ bool LiveSession::resumeFetcher(
sources[kAudioIndex],
sources[kVideoIndex],
sources[kSubtitleIndex],
+ getMetadataSource(sources, streamMask, newUri),
timeUs, -1, -1, seekMode);
}
@@ -1424,7 +1509,7 @@ size_t LiveSession::getTrackCount() const {
if (mPlaylist == NULL) {
return 0;
} else {
- return mPlaylist->getTrackCount();
+ return mPlaylist->getTrackCount() + (mHasMetadata ? 1 : 0);
}
}
@@ -1432,6 +1517,13 @@ sp<AMessage> LiveSession::getTrackInfo(size_t trackIndex) const {
if (mPlaylist == NULL) {
return NULL;
} else {
+ if (trackIndex == mPlaylist->getTrackCount() && mHasMetadata) {
+ sp<AMessage> format = new AMessage();
+ format->setInt32("type", MEDIA_TRACK_TYPE_METADATA);
+ format->setString("language", "und");
+ format->setString("mime", MEDIA_MIMETYPE_DATA_METADATA);
+ return format;
+ }
return mPlaylist->getTrackInfo(trackIndex);
}
}
@@ -1768,7 +1860,7 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
HLSTime startTime;
SeekMode seekMode = kSeekModeExactPosition;
- sp<AnotherPacketSource> sources[kMaxStreams];
+ sp<AnotherPacketSource> sources[kNumSources];
if (i == kSubtitleIndex || (!pickTrack && !switching)) {
startTime = latestMediaSegmentStartTime();
@@ -1797,8 +1889,8 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
}
}
- if (j != kSubtitleIndex && meta != NULL
- && !meta->findInt32("discontinuity", &type)) {
+ if ((j == kAudioIndex || j == kVideoIndex)
+ && meta != NULL && !meta->findInt32("discontinuity", &type)) {
HLSTime tmpTime(meta);
if (startTime < tmpTime) {
startTime = tmpTime;
@@ -1851,6 +1943,7 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
sources[kAudioIndex],
sources[kVideoIndex],
sources[kSubtitleIndex],
+ getMetadataSource(sources, mNewStreamMask, switching),
startTime.mTimeUs < 0 ? mLastSeekTimeUs : startTime.mTimeUs,
startTime.getSegmentTimeUs(true /* midpoint */),
startTime.mSeq,
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index ed74bc2..86d0498 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -23,6 +23,8 @@
#include <utils/String8.h>
+#include "mpeg2ts/ATSParser.h"
+
namespace android {
struct ABuffer;
@@ -47,12 +49,15 @@ struct LiveSession : public AHandler {
kVideoIndex = 1,
kSubtitleIndex = 2,
kMaxStreams = 3,
+ kMetaDataIndex = 3,
+ kNumSources = 4,
};
enum StreamType {
STREAMTYPE_AUDIO = 1 << kAudioIndex,
STREAMTYPE_VIDEO = 1 << kVideoIndex,
STREAMTYPE_SUBTITLES = 1 << kSubtitleIndex,
+ STREAMTYPE_METADATA = 1 << kMetaDataIndex,
};
enum SeekMode {
@@ -66,6 +71,7 @@ struct LiveSession : public AHandler {
uint32_t flags,
const sp<IMediaHTTPService> &httpService);
+ int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
@@ -92,6 +98,7 @@ struct LiveSession : public AHandler {
static const char *getKeyForStream(StreamType type);
static const char *getNameForStream(StreamType type);
+ static ATSParser::SourceType getSourceTypeForStream(StreamType type);
enum {
kWhatStreamsChanged,
@@ -101,6 +108,7 @@ struct LiveSession : public AHandler {
kWhatBufferingStart,
kWhatBufferingEnd,
kWhatBufferingUpdate,
+ kWhatMetadataDetected,
};
protected:
@@ -233,6 +241,8 @@ private:
bool mFirstTimeUsValid;
int64_t mFirstTimeUs;
int64_t mLastSeekTimeUs;
+ bool mHasMetadata;
+
KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
@@ -268,6 +278,11 @@ private:
sp<M3UParser> fetchPlaylist(
const char *url, uint8_t *curPlaylistHash, bool *unchanged);
+ bool UriIsSameAsIndex( const AString &uri, int32_t index, bool newUri);
+ sp<AnotherPacketSource> getPacketSourceForStreamIndex(size_t trackIndex, bool newUri);
+ sp<AnotherPacketSource> getMetadataSource(
+ sp<AnotherPacketSource> sources[kNumSources], uint32_t streamMask, bool newUri);
+
bool resumeFetcher(
const AString &uri, uint32_t streamMask,
int64_t timeUs = -1ll, bool newUri = false);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index ce79cc2..a4e523d 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -17,6 +17,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "PlaylistFetcher"
#include <utils/Log.h>
+#include <utils/misc.h>
#include "PlaylistFetcher.h"
@@ -174,7 +175,8 @@ PlaylistFetcher::PlaylistFetcher(
mFirstTimeUs(-1ll),
mVideoBuffer(new AnotherPacketSource(NULL)),
mThresholdRatio(-1.0f),
- mDownloadState(new DownloadState()) {
+ mDownloadState(new DownloadState()),
+ mHasMetadata(false) {
memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
mHTTPDataSource = mSession->getHTTPDataSource();
}
@@ -470,6 +472,7 @@ void PlaylistFetcher::startAsync(
const sp<AnotherPacketSource> &audioSource,
const sp<AnotherPacketSource> &videoSource,
const sp<AnotherPacketSource> &subtitleSource,
+ const sp<AnotherPacketSource> &metadataSource,
int64_t startTimeUs,
int64_t segmentStartTimeUs,
int32_t startDiscontinuitySeq,
@@ -493,6 +496,11 @@ void PlaylistFetcher::startAsync(
streamTypeMask |= LiveSession::STREAMTYPE_SUBTITLES;
}
+ if (metadataSource != NULL) {
+ msg->setPointer("metadataSource", metadataSource.get());
+ // metadataSource does not affect streamTypeMask.
+ }
+
msg->setInt32("streamTypeMask", streamTypeMask);
msg->setInt64("startTimeUs", startTimeUs);
msg->setInt64("segmentStartTimeUs", segmentStartTimeUs);
@@ -637,6 +645,15 @@ status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) {
static_cast<AnotherPacketSource *>(ptr));
}
+ void *ptr;
+ // metadataSource is not part of streamTypeMask
+ if ((streamTypeMask & (LiveSession::STREAMTYPE_AUDIO | LiveSession::STREAMTYPE_VIDEO))
+ && msg->findPointer("metadataSource", &ptr)) {
+ mPacketSources.add(
+ LiveSession::STREAMTYPE_METADATA,
+ static_cast<AnotherPacketSource *>(ptr));
+ }
+
mStreamTypeMask = streamTypeMask;
mSegmentStartTimeUs = segmentStartTimeUs;
@@ -1315,11 +1332,11 @@ void PlaylistFetcher::onDownloadNext() {
if (bufferStartsWithTsSyncByte(buffer)) {
// If we don't see a stream in the program table after fetching a full ts segment
// mark it as nonexistent.
- const size_t kNumTypes = ATSParser::NUM_SOURCE_TYPES;
- ATSParser::SourceType srcTypes[kNumTypes] =
+ ATSParser::SourceType srcTypes[] =
{ ATSParser::VIDEO, ATSParser::AUDIO };
- LiveSession::StreamType streamTypes[kNumTypes] =
+ LiveSession::StreamType streamTypes[] =
{ LiveSession::STREAMTYPE_VIDEO, LiveSession::STREAMTYPE_AUDIO };
+ const size_t kNumTypes = NELEM(srcTypes);
for (size_t i = 0; i < kNumTypes; i++) {
ATSParser::SourceType srcType = srcTypes[i];
@@ -1502,6 +1519,27 @@ const sp<ABuffer> &PlaylistFetcher::setAccessUnitProperties(
return accessUnit;
}
+bool PlaylistFetcher::isStartTimeReached(int64_t timeUs) {
+ if (!mFirstPTSValid) {
+ mFirstTimeUs = timeUs;
+ mFirstPTSValid = true;
+ }
+ bool startTimeReached = true;
+ if (mStartTimeUsRelative) {
+ FLOGV("startTimeUsRelative, timeUs (%lld) - %lld = %lld",
+ (long long)timeUs,
+ (long long)mFirstTimeUs,
+ (long long)(timeUs - mFirstTimeUs));
+ timeUs -= mFirstTimeUs;
+ if (timeUs < 0) {
+ FLOGV("clamp negative timeUs to 0");
+ timeUs = 0;
+ }
+ startTimeReached = (timeUs >= mStartTimeUs);
+ }
+ return startTimeReached;
+}
+
status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer) {
if (mTSParser == NULL) {
// Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
@@ -1548,10 +1586,9 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
ALOGE("MPEG2 Transport streams do not contain subtitles.");
return ERROR_MALFORMED;
}
+
const char *key = LiveSession::getKeyForStream(stream);
- ATSParser::SourceType type =
- (stream == LiveSession::STREAMTYPE_AUDIO) ?
- ATSParser::AUDIO : ATSParser::VIDEO;
+ ATSParser::SourceType type =LiveSession::getSourceTypeForStream(stream);
sp<AnotherPacketSource> source =
static_cast<AnotherPacketSource *>(
@@ -1637,23 +1674,7 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
}
}
if (mStartup) {
- if (!mFirstPTSValid) {
- mFirstTimeUs = timeUs;
- mFirstPTSValid = true;
- }
- bool startTimeReached = true;
- if (mStartTimeUsRelative) {
- FLOGV("startTimeUsRelative, timeUs (%lld) - %lld = %lld",
- (long long)timeUs,
- (long long)mFirstTimeUs,
- (long long)(timeUs - mFirstTimeUs));
- timeUs -= mFirstTimeUs;
- if (timeUs < 0) {
- FLOGV("clamp negative timeUs to 0");
- timeUs = 0;
- }
- startTimeReached = (timeUs >= mStartTimeUs);
- }
+ bool startTimeReached = isStartTimeReached(timeUs);
if (!startTimeReached || (isAvc && !mIDRFound)) {
// buffer up to the closest preceding IDR frame in the next segement,
@@ -1680,7 +1701,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
if (mStartTimeUsNotify != NULL) {
uint32_t streamMask = 0;
mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
- if (!(streamMask & mPacketSources.keyAt(i))) {
+ if ((mStreamTypeMask & mPacketSources.keyAt(i))
+ && !(streamMask & mPacketSources.keyAt(i))) {
streamMask |= mPacketSources.keyAt(i);
mStartTimeUsNotify->setInt32("streamMask", streamMask);
FSLOGV(stream, "found start point, timeUs=%lld, streamMask becomes %x",
@@ -1721,6 +1743,11 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
FSLOGV(stream, "queueAccessUnit (saved), timeUs=%lld",
(long long)bufferTimeUs);
}
+ } else if (stream == LiveSession::STREAMTYPE_METADATA && !mHasMetadata) {
+ mHasMetadata = true;
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatMetadataDetected);
+ notify->post();
}
setAccessUnitProperties(accessUnit, source);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index f64d160..bb14a0d 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -49,6 +49,7 @@ struct PlaylistFetcher : public AHandler {
kWhatPreparationFailed,
kWhatStartedAt,
kWhatStopReached,
+ kWhatMetadataDetected,
};
PlaylistFetcher(
@@ -66,6 +67,7 @@ struct PlaylistFetcher : public AHandler {
const sp<AnotherPacketSource> &audioSource,
const sp<AnotherPacketSource> &videoSource,
const sp<AnotherPacketSource> &subtitleSource,
+ const sp<AnotherPacketSource> &metadataSource,
int64_t startTimeUs = -1ll, // starting timestamps
int64_t segmentStartTimeUs = -1ll, // starting position within playlist
// startTimeUs!=segmentStartTimeUs only when playlist is live
@@ -177,6 +179,8 @@ private:
sp<DownloadState> mDownloadState;
+ bool mHasMetadata;
+
// Set first to true if decrypting the first segment of a playlist segment. When
// first is true, reset the initialization vector based on the available
// information in the manifest; otherwise, use the initialization vector as
@@ -222,6 +226,7 @@ private:
const sp<ABuffer> &accessUnit,
const sp<AnotherPacketSource> &source,
bool discard = false);
+ bool isStartTimeReached(int64_t timeUs);
status_t extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer);
status_t extractAndQueueAccessUnits(