/* * Copyright (C) 2010 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#define LOG_NDEBUG 0 #define LOG_TAG "NuPlayer" #include #include "NuPlayer.h" #include "HTTPLiveSource.h" #include "NuPlayerDecoder.h" #include "NuPlayerDecoderPassThrough.h" #include "NuPlayerDriver.h" #include "NuPlayerRenderer.h" #include "NuPlayerSource.h" #include "RTSPSource.h" #include "StreamingSource.h" #include "GenericSource.h" #include "TextDescriptions.h" #include "ATSParser.h" #include #include #include #include #include #include #include #include #include #include "avc_utils.h" #include "ESDS.h" #include namespace android { // TODO optimize buffer size for power consumption // The offload read buffer size is 32 KB but 24 KB uses less power. const size_t NuPlayer::kAggregateBufferSizeBytes = 24 * 1024; struct NuPlayer::Action : public RefBase { Action() {} virtual void execute(NuPlayer *player) = 0; private: DISALLOW_EVIL_CONSTRUCTORS(Action); }; struct NuPlayer::SeekAction : public Action { SeekAction(int64_t seekTimeUs) : mSeekTimeUs(seekTimeUs) { } virtual void execute(NuPlayer *player) { player->performSeek(mSeekTimeUs); } private: int64_t mSeekTimeUs; DISALLOW_EVIL_CONSTRUCTORS(SeekAction); }; struct NuPlayer::SetSurfaceAction : public Action { SetSurfaceAction(const sp &wrapper) : mWrapper(wrapper) { } virtual void execute(NuPlayer *player) { player->performSetSurface(mWrapper); } private: sp mWrapper; DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction); }; struct NuPlayer::ShutdownDecoderAction : public Action { ShutdownDecoderAction(bool audio, bool video) : mAudio(audio), mVideo(video) { } virtual void execute(NuPlayer *player) { player->performDecoderShutdown(mAudio, mVideo); } private: bool mAudio; bool mVideo; DISALLOW_EVIL_CONSTRUCTORS(ShutdownDecoderAction); }; struct NuPlayer::PostMessageAction : public Action { PostMessageAction(const sp &msg) : mMessage(msg) { } virtual void execute(NuPlayer *) { mMessage->post(); } private: sp mMessage; DISALLOW_EVIL_CONSTRUCTORS(PostMessageAction); }; // Use this if there's no state necessary to save in order to execute // the action. struct NuPlayer::SimpleAction : public Action { typedef void (NuPlayer::*ActionFunc)(); SimpleAction(ActionFunc func) : mFunc(func) { } virtual void execute(NuPlayer *player) { (player->*mFunc)(); } private: ActionFunc mFunc; DISALLOW_EVIL_CONSTRUCTORS(SimpleAction); }; //////////////////////////////////////////////////////////////////////////////// NuPlayer::NuPlayer() : mUIDValid(false), mSourceFlags(0), mCurrentPositionUs(0), mVideoIsAVC(false), mOffloadAudio(false), mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), mAudioDecoderGeneration(0), mVideoDecoderGeneration(0), mAudioEOS(false), mVideoEOS(false), mScanSourcesPending(false), mScanSourcesGeneration(0), mPollDurationGeneration(0), mTimedTextGeneration(0), mTimeDiscontinuityPending(false), mFlushingAudio(NONE), mFlushingVideo(NONE), mSkipRenderingAudioUntilMediaTimeUs(-1ll), mSkipRenderingVideoUntilMediaTimeUs(-1ll), mVideoLateByUs(0ll), mNumFramesTotal(0ll), mNumFramesDropped(0ll), mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW), mStarted(false) { } NuPlayer::~NuPlayer() { } void NuPlayer::setUID(uid_t uid) { mUIDValid = true; mUID = uid; } void NuPlayer::setDriver(const wp &driver) { mDriver = driver; } void NuPlayer::setDataSourceAsync(const sp &source) { sp msg = new AMessage(kWhatSetDataSource, id()); sp notify = new AMessage(kWhatSourceNotify, id()); msg->setObject("source", new StreamingSource(notify, source)); msg->post(); } static bool IsHTTPLiveURL(const char *url) { if (!strncasecmp("http://", url, 7) || !strncasecmp("https://", url, 8) || !strncasecmp("file://", url, 7)) { size_t len = strlen(url); if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) { return true; } if (strstr(url,"m3u8")) { return true; } } return false; } void NuPlayer::setDataSourceAsync( const sp &httpService, const char *url, const KeyedVector *headers) { sp msg = new AMessage(kWhatSetDataSource, id()); size_t len = strlen(url); sp notify = new AMessage(kWhatSourceNotify, id()); sp source; if (IsHTTPLiveURL(url)) { source = new HTTPLiveSource(notify, httpService, url, headers); } else if (!strncasecmp(url, "rtsp://", 7)) { source = new RTSPSource( notify, httpService, url, headers, mUIDValid, mUID); } else if ((!strncasecmp(url, "http://", 7) || !strncasecmp(url, "https://", 8)) && ((len >= 4 && !strcasecmp(".sdp", &url[len - 4])) || strstr(url, ".sdp?"))) { source = new RTSPSource( notify, httpService, url, headers, mUIDValid, mUID, true); } else { sp genericSource = new GenericSource(notify, mUIDValid, mUID); // Don't set FLAG_SECURE on mSourceFlags here for widevine. // The correct flags will be updated in Source::kWhatFlagsChanged // handler when GenericSource is prepared. status_t err = genericSource->setDataSource(httpService, url, headers); if (err == OK) { source = genericSource; } else { ALOGE("Failed to set data source!"); } } msg->setObject("source", source); msg->post(); } void NuPlayer::setDataSourceAsync(int fd, int64_t offset, int64_t length) { sp msg = new AMessage(kWhatSetDataSource, id()); sp notify = new AMessage(kWhatSourceNotify, id()); sp source = new GenericSource(notify, mUIDValid, mUID); status_t err = source->setDataSource(fd, offset, length); if (err != OK) { ALOGE("Failed to set data source!"); source = NULL; } msg->setObject("source", source); msg->post(); } void NuPlayer::prepareAsync() { (new AMessage(kWhatPrepare, id()))->post(); } void NuPlayer::setVideoSurfaceTextureAsync( const sp &bufferProducer) { sp msg = new AMessage(kWhatSetVideoNativeWindow, id()); if (bufferProducer == NULL) { msg->setObject("native-window", NULL); } else { msg->setObject( "native-window", new NativeWindowWrapper( new Surface(bufferProducer, true /* controlledByApp */))); } msg->post(); } void NuPlayer::setAudioSink(const sp &sink) { sp msg = new AMessage(kWhatSetAudioSink, id()); msg->setObject("sink", sink); msg->post(); } void NuPlayer::start() { (new AMessage(kWhatStart, id()))->post(); } void NuPlayer::pause() { (new AMessage(kWhatPause, id()))->post(); } void NuPlayer::resume() { (new AMessage(kWhatResume, id()))->post(); } void NuPlayer::resetAsync() { (new AMessage(kWhatReset, id()))->post(); } void NuPlayer::seekToAsync(int64_t seekTimeUs) { sp msg = new AMessage(kWhatSeek, id()); msg->setInt64("seekTimeUs", seekTimeUs); msg->post(); } // static bool NuPlayer::IsFlushingState(FlushStatus state, bool *needShutdown) { switch (state) { case FLUSHING_DECODER: if (needShutdown != NULL) { *needShutdown = false; } return true; case FLUSHING_DECODER_SHUTDOWN: if (needShutdown != NULL) { *needShutdown = true; } return true; default: return false; } } void NuPlayer::writeTrackInfo( Parcel* reply, const sp format) const { int32_t trackType; CHECK(format->findInt32("type", &trackType)); AString lang; CHECK(format->findString("language", &lang)); reply->writeInt32(2); // write something non-zero reply->writeInt32(trackType); reply->writeString16(String16(lang.c_str())); if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) { AString mime; CHECK(format->findString("mime", &mime)); int32_t isAuto, isDefault, isForced; CHECK(format->findInt32("auto", &isAuto)); CHECK(format->findInt32("default", &isDefault)); CHECK(format->findInt32("forced", &isForced)); reply->writeString16(String16(mime.c_str())); reply->writeInt32(isAuto); reply->writeInt32(isDefault); reply->writeInt32(isForced); } } void NuPlayer::onMessageReceived(const sp &msg) { switch (msg->what()) { case kWhatSetDataSource: { ALOGV("kWhatSetDataSource"); CHECK(mSource == NULL); status_t err = OK; sp obj; CHECK(msg->findObject("source", &obj)); if (obj != NULL) { mSource = static_cast(obj.get()); } else { err = UNKNOWN_ERROR; } CHECK(mDriver != NULL); sp driver = mDriver.promote(); if (driver != NULL) { driver->notifySetDataSourceCompleted(err); } break; } case kWhatPrepare: { mSource->prepareAsync(); break; } case kWhatGetTrackInfo: { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); Parcel* reply; CHECK(msg->findPointer("reply", (void**)&reply)); size_t inbandTracks = 0; if (mSource != NULL) { inbandTracks = mSource->getTrackCount(); } size_t ccTracks = 0; if (mCCDecoder != NULL) { ccTracks = mCCDecoder->getTrackCount(); } // total track count reply->writeInt32(inbandTracks + ccTracks); // write inband tracks for (size_t i = 0; i < inbandTracks; ++i) { writeTrackInfo(reply, mSource->getTrackInfo(i)); } // write CC track for (size_t i = 0; i < ccTracks; ++i) { writeTrackInfo(reply, mCCDecoder->getTrackInfo(i)); } sp response = new AMessage; response->postReply(replyID); break; } case kWhatGetSelectedTrack: { status_t err = INVALID_OPERATION; if (mSource != NULL) { err = OK; int32_t type32; CHECK(msg->findInt32("type", (int32_t*)&type32)); media_track_type type = (media_track_type)type32; ssize_t selectedTrack = mSource->getSelectedTrack(type); Parcel* reply; CHECK(msg->findPointer("reply", (void**)&reply)); reply->writeInt32(selectedTrack); } sp response = new AMessage; response->setInt32("err", err); uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); response->postReply(replyID); break; } case kWhatSelectTrack: { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); size_t trackIndex; int32_t select; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK(msg->findInt32("select", &select)); status_t err = INVALID_OPERATION; size_t inbandTracks = 0; if (mSource != NULL) { inbandTracks = mSource->getTrackCount(); } size_t ccTracks = 0; if (mCCDecoder != NULL) { ccTracks = mCCDecoder->getTrackCount(); } if (trackIndex < inbandTracks) { err = mSource->selectTrack(trackIndex, select); if (!select && err == OK) { int32_t type; sp info = mSource->getTrackInfo(trackIndex); if (info != NULL && info->findInt32("type", &type) && type == MEDIA_TRACK_TYPE_TIMEDTEXT) { ++mTimedTextGeneration; } } } else { trackIndex -= inbandTracks; if (trackIndex < ccTracks) { err = mCCDecoder->selectTrack(trackIndex, select); } } sp response = new AMessage; response->setInt32("err", err); response->postReply(replyID); break; } case kWhatPollDuration: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mPollDurationGeneration) { // stale break; } int64_t durationUs; if (mDriver != NULL && mSource->getDuration(&durationUs) == OK) { sp driver = mDriver.promote(); if (driver != NULL) { driver->notifyDuration(durationUs); } } msg->post(1000000ll); // poll again in a second. break; } case kWhatSetVideoNativeWindow: { ALOGV("kWhatSetVideoNativeWindow"); mDeferredActions.push_back( new ShutdownDecoderAction( false /* audio */, true /* video */)); sp obj; CHECK(msg->findObject("native-window", &obj)); mDeferredActions.push_back( new SetSurfaceAction( static_cast(obj.get()))); if (obj != NULL) { if (mStarted && mVideoDecoder != NULL) { // Issue a seek to refresh the video screen only if started otherwise // the extractor may not yet be started and will assert. // If the video decoder is not set (perhaps audio only in this case) // do not perform a seek as it is not needed. mDeferredActions.push_back(new SeekAction(mCurrentPositionUs)); } // If there is a new surface texture, instantiate decoders // again if possible. mDeferredActions.push_back( new SimpleAction(&NuPlayer::performScanSources)); } processDeferredActions(); break; } case kWhatSetAudioSink: { ALOGV("kWhatSetAudioSink"); sp obj; CHECK(msg->findObject("sink", &obj)); mAudioSink = static_cast(obj.get()); break; } case kWhatStart: { ALOGV("kWhatStart"); mVideoIsAVC = false; mOffloadAudio = false; mAudioEOS = false; mVideoEOS = false; mSkipRenderingAudioUntilMediaTimeUs = -1; mSkipRenderingVideoUntilMediaTimeUs = -1; mVideoLateByUs = 0; mNumFramesTotal = 0; mNumFramesDropped = 0; mStarted = true; /* instantiate decoders now for secure playback */ if (mSourceFlags & Source::FLAG_SECURE) { if (mNativeWindow != NULL) { instantiateDecoder(false, &mVideoDecoder); } if (mAudioSink != NULL) { instantiateDecoder(true, &mAudioDecoder); } } mSource->start(); uint32_t flags = 0; if (mSource->isRealTime()) { flags |= Renderer::FLAG_REAL_TIME; } sp audioMeta = mSource->getFormatMeta(true /* audio */); audio_stream_type_t streamType = AUDIO_STREAM_MUSIC; if (mAudioSink != NULL) { streamType = mAudioSink->getAudioStreamType(); } sp videoFormat = mSource->getFormat(false /* audio */); mOffloadAudio = canOffloadStream(audioMeta, (videoFormat != NULL), true /* is_streaming */, streamType); if (mOffloadAudio) { flags |= Renderer::FLAG_OFFLOAD_AUDIO; } mRenderer = new Renderer( mAudioSink, new AMessage(kWhatRendererNotify, id()), flags); mRendererLooper = new ALooper; mRendererLooper->setName("NuPlayerRenderer"); mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO); mRendererLooper->registerHandler(mRenderer); postScanSources(); break; } case kWhatScanSources: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mScanSourcesGeneration) { // Drop obsolete msg. break; } mScanSourcesPending = false; ALOGV("scanning sources haveAudio=%d, haveVideo=%d", mAudioDecoder != NULL, mVideoDecoder != NULL); bool mHadAnySourcesBefore = (mAudioDecoder != NULL) || (mVideoDecoder != NULL); // initialize video before audio because successful initialization of // video may change deep buffer mode of audio. if (mNativeWindow != NULL) { instantiateDecoder(false, &mVideoDecoder); } if (mAudioSink != NULL) { if (mOffloadAudio) { // open audio sink early under offload mode. sp format = mSource->getFormat(true /*audio*/); openAudioSink(format, true /*offloadOnly*/); } instantiateDecoder(true, &mAudioDecoder); } if (!mHadAnySourcesBefore && (mAudioDecoder != NULL || mVideoDecoder != NULL)) { // This is the first time we've found anything playable. if (mSourceFlags & Source::FLAG_DYNAMIC_DURATION) { schedulePollDuration(); } } status_t err; if ((err = mSource->feedMoreTSData()) != OK) { if (mAudioDecoder == NULL && mVideoDecoder == NULL) { // We're not currently decoding anything (no audio or // video tracks found) and we just ran out of input data. if (err == ERROR_END_OF_STREAM) { notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); } else { notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err); } } break; } if ((mAudioDecoder == NULL && mAudioSink != NULL) || (mVideoDecoder == NULL && mNativeWindow != NULL)) { msg->post(100000ll); mScanSourcesPending = true; } break; } case kWhatVideoNotify: case kWhatAudioNotify: { bool audio = msg->what() == kWhatAudioNotify; int32_t currentDecoderGeneration = (audio? mAudioDecoderGeneration : mVideoDecoderGeneration); int32_t requesterGeneration = currentDecoderGeneration - 1; CHECK(msg->findInt32("generation", &requesterGeneration)); if (requesterGeneration != currentDecoderGeneration) { ALOGV("got message from old %s decoder, generation(%d:%d)", audio ? "audio" : "video", requesterGeneration, currentDecoderGeneration); sp reply; if (!(msg->findMessage("reply", &reply))) { return; } reply->setInt32("err", INFO_DISCONTINUITY); reply->post(); return; } int32_t what; CHECK(msg->findInt32("what", &what)); if (what == Decoder::kWhatFillThisBuffer) { status_t err = feedDecoderInputData( audio, msg); if (err == -EWOULDBLOCK) { if (mSource->feedMoreTSData() == OK) { msg->post(10 * 1000ll); } } } else if (what == Decoder::kWhatEOS) { int32_t err; CHECK(msg->findInt32("err", &err)); if (err == ERROR_END_OF_STREAM) { ALOGV("got %s decoder EOS", audio ? "audio" : "video"); } else { ALOGV("got %s decoder EOS w/ error %d", audio ? "audio" : "video", err); } mRenderer->queueEOS(audio, err); } else if (what == Decoder::kWhatFlushCompleted) { bool needShutdown; if (audio) { CHECK(IsFlushingState(mFlushingAudio, &needShutdown)); mFlushingAudio = FLUSHED; } else { CHECK(IsFlushingState(mFlushingVideo, &needShutdown)); mFlushingVideo = FLUSHED; mVideoLateByUs = 0; } ALOGV("decoder %s flush completed", audio ? "audio" : "video"); if (needShutdown) { ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video"); getDecoder(audio)->initiateShutdown(); if (audio) { mFlushingAudio = SHUTTING_DOWN_DECODER; } else { mFlushingVideo = SHUTTING_DOWN_DECODER; } } finishFlushIfPossible(); } else if (what == Decoder::kWhatOutputFormatChanged) { sp format; CHECK(msg->findMessage("format", &format)); if (audio) { openAudioSink(format, false /*offloadOnly*/); } else { // video sp inputFormat = mSource->getFormat(false /* audio */); updateVideoSize(inputFormat, format); } } else if (what == Decoder::kWhatShutdownCompleted) { ALOGV("%s shutdown completed", audio ? "audio" : "video"); if (audio) { mAudioDecoder.clear(); CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER); mFlushingAudio = SHUT_DOWN; } else { mVideoDecoder.clear(); CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER); mFlushingVideo = SHUT_DOWN; } finishFlushIfPossible(); } else if (what == Decoder::kWhatError) { ALOGE("Received error from %s decoder, aborting playback.", audio ? "audio" : "video"); status_t err; if (!msg->findInt32("err", &err)) { err = UNKNOWN_ERROR; } mRenderer->queueEOS(audio, err); if (audio && mFlushingAudio != NONE) { mAudioDecoder.clear(); mFlushingAudio = SHUT_DOWN; } else if (!audio && mFlushingVideo != NONE){ mVideoDecoder.clear(); mFlushingVideo = SHUT_DOWN; } finishFlushIfPossible(); } else if (what == Decoder::kWhatDrainThisBuffer) { renderBuffer(audio, msg); } else { ALOGV("Unhandled decoder notification %d '%c%c%c%c'.", what, what >> 24, (what >> 16) & 0xff, (what >> 8) & 0xff, what & 0xff); } break; } case kWhatRendererNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); if (what == Renderer::kWhatEOS) { int32_t audio; CHECK(msg->findInt32("audio", &audio)); int32_t finalResult; CHECK(msg->findInt32("finalResult", &finalResult)); if (audio) { mAudioEOS = true; } else { mVideoEOS = true; } if (finalResult == ERROR_END_OF_STREAM) { ALOGV("reached %s EOS", audio ? "audio" : "video"); } else { ALOGE("%s track encountered an error (%d)", audio ? "audio" : "video", finalResult); notifyListener( MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, finalResult); } if ((mAudioEOS || mAudioDecoder == NULL) && (mVideoEOS || mVideoDecoder == NULL)) { notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); } } else if (what == Renderer::kWhatPosition) { int64_t positionUs; CHECK(msg->findInt64("positionUs", &positionUs)); mCurrentPositionUs = positionUs; CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs)); if (mDriver != NULL) { sp driver = mDriver.promote(); if (driver != NULL) { driver->notifyPosition(positionUs); driver->notifyFrameStats( mNumFramesTotal, mNumFramesDropped); } } } else if (what == Renderer::kWhatFlushComplete) { int32_t audio; CHECK(msg->findInt32("audio", &audio)); ALOGV("renderer %s flush completed.", audio ? "audio" : "video"); } else if (what == Renderer::kWhatVideoRenderingStart) { notifyListener(MEDIA_INFO, MEDIA_INFO_RENDERING_START, 0); } else if (what == Renderer::kWhatMediaRenderingStart) { ALOGV("media rendering started"); notifyListener(MEDIA_STARTED, 0, 0); } else if (what == Renderer::kWhatAudioOffloadTearDown) { ALOGV("Tear down audio offload, fall back to s/w path"); int64_t positionUs; CHECK(msg->findInt64("positionUs", &positionUs)); closeAudioSink(); mAudioDecoder.clear(); mRenderer->flush(true /* audio */); if (mVideoDecoder != NULL) { mRenderer->flush(false /* audio */); } mRenderer->signalDisableOffloadAudio(); mOffloadAudio = false; performSeek(positionUs); instantiateDecoder(true /* audio */, &mAudioDecoder); } break; } case kWhatMoreDataQueued: { break; } case kWhatReset: { ALOGV("kWhatReset"); mDeferredActions.push_back( new ShutdownDecoderAction( true /* audio */, true /* video */)); mDeferredActions.push_back( new SimpleAction(&NuPlayer::performReset)); processDeferredActions(); break; } case kWhatSeek: { int64_t seekTimeUs; CHECK(msg->findInt64("seekTimeUs", &seekTimeUs)); ALOGV("kWhatSeek seekTimeUs=%lld us", seekTimeUs); mDeferredActions.push_back( new SimpleAction(&NuPlayer::performDecoderFlush)); mDeferredActions.push_back(new SeekAction(seekTimeUs)); processDeferredActions(); break; } case kWhatPause: { CHECK(mRenderer != NULL); mSource->pause(); mRenderer->pause(); break; } case kWhatResume: { CHECK(mRenderer != NULL); mSource->resume(); mRenderer->resume(); break; } case kWhatSourceNotify: { onSourceNotify(msg); break; } case kWhatClosedCaptionNotify: { onClosedCaptionNotify(msg); break; } default: TRESPASS(); break; } } void NuPlayer::finishFlushIfPossible() { if (mFlushingAudio != NONE && mFlushingAudio != FLUSHED && mFlushingAudio != SHUT_DOWN) { return; } if (mFlushingVideo != NONE && mFlushingVideo != FLUSHED && mFlushingVideo != SHUT_DOWN) { return; } ALOGV("both audio and video are flushed now."); mPendingAudioAccessUnit.clear(); mAggregateBuffer.clear(); if (mTimeDiscontinuityPending) { mRenderer->signalTimeDiscontinuity(); mTimeDiscontinuityPending = false; } if (mAudioDecoder != NULL && mFlushingAudio == FLUSHED) { mAudioDecoder->signalResume(); } if (mVideoDecoder != NULL && mFlushingVideo == FLUSHED) { mVideoDecoder->signalResume(); } mFlushingAudio = NONE; mFlushingVideo = NONE; processDeferredActions(); } void NuPlayer::postScanSources() { if (mScanSourcesPending) { return; } sp msg = new AMessage(kWhatScanSources, id()); msg->setInt32("generation", mScanSourcesGeneration); msg->post(); mScanSourcesPending = true; } void NuPlayer::openAudioSink(const sp &format, bool offloadOnly) { ALOGV("openAudioSink: offloadOnly(%d) mOffloadAudio(%d)", offloadOnly, mOffloadAudio); bool audioSinkChanged = false; int32_t numChannels; CHECK(format->findInt32("channel-count", &numChannels)); int32_t channelMask; if (!format->findInt32("channel-mask", &channelMask)) { // signal to the AudioSink to derive the mask from count. channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; } int32_t sampleRate; CHECK(format->findInt32("sample-rate", &sampleRate)); uint32_t flags; int64_t durationUs; // FIXME: we should handle the case where the video decoder // is created after we receive the format change indication. // Current code will just make that we select deep buffer // with video which should not be a problem as it should // not prevent from keeping A/V sync. if (mVideoDecoder == NULL && mSource->getDuration(&durationUs) == OK && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) { flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER; } else { flags = AUDIO_OUTPUT_FLAG_NONE; } if (mOffloadAudio) { audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; AString mime; CHECK(format->findString("mime", &mime)); status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); if (err != OK) { ALOGE("Couldn't map mime \"%s\" to a valid " "audio_format", mime.c_str()); mOffloadAudio = false; } else { ALOGV("Mime \"%s\" mapped to audio_format 0x%x", mime.c_str(), audioFormat); int avgBitRate = -1; format->findInt32("bit-rate", &avgBitRate); int32_t aacProfile = -1; if (audioFormat == AUDIO_FORMAT_AAC && format->findInt32("aac-profile", &aacProfile)) { // Redefine AAC format as per aac profile mapAACProfileToAudioFormat( audioFormat, aacProfile); } audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; offloadInfo.duration_us = -1; format->findInt64( "durationUs", &offloadInfo.duration_us); offloadInfo.sample_rate = sampleRate; offloadInfo.channel_mask = channelMask; offloadInfo.format = audioFormat; offloadInfo.stream_type = AUDIO_STREAM_MUSIC; offloadInfo.bit_rate = avgBitRate; offloadInfo.has_video = (mVideoDecoder != NULL); offloadInfo.is_streaming = true; if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { ALOGV("openAudioSink: no change in offload mode"); return; // no change from previous configuration, everything ok. } ALOGV("openAudioSink: try to open AudioSink in offload mode"); flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; flags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; audioSinkChanged = true; mAudioSink->close(); err = mAudioSink->open( sampleRate, numChannels, (audio_channel_mask_t)channelMask, audioFormat, 8 /* bufferCount */, &NuPlayer::Renderer::AudioSinkCallback, mRenderer.get(), (audio_output_flags_t)flags, &offloadInfo); if (err == OK) { // If the playback is offloaded to h/w, we pass // the HAL some metadata information. // We don't want to do this for PCM because it // will be going through the AudioFlinger mixer // before reaching the hardware. sp audioMeta = mSource->getFormatMeta(true /* audio */); sendMetaDataToHal(mAudioSink, audioMeta); mCurrentOffloadInfo = offloadInfo; err = mAudioSink->start(); ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); } if (err != OK) { // Clean up, fall back to non offload mode. mAudioSink->close(); mRenderer->signalDisableOffloadAudio(); mOffloadAudio = false; mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; ALOGV("openAudioSink: offload failed"); } } } if (!offloadOnly && !mOffloadAudio) { flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; ALOGV("openAudioSink: open AudioSink in NON-offload mode"); audioSinkChanged = true; mAudioSink->close(); mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; CHECK_EQ(mAudioSink->open( sampleRate, numChannels, (audio_channel_mask_t)channelMask, AUDIO_FORMAT_PCM_16_BIT, 8 /* bufferCount */, NULL, NULL, (audio_output_flags_t)flags), (status_t)OK); mAudioSink->start(); } if (audioSinkChanged) { mRenderer->signalAudioSinkChanged(); } } void NuPlayer::closeAudioSink() { mAudioSink->close(); mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; } status_t NuPlayer::instantiateDecoder(bool audio, sp *decoder) { if (*decoder != NULL) { return OK; } sp format = mSource->getFormat(audio); if (format == NULL) { return -EWOULDBLOCK; } if (!audio) { AString mime; CHECK(format->findString("mime", &mime)); mVideoIsAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str()); sp ccNotify = new AMessage(kWhatClosedCaptionNotify, id()); mCCDecoder = new CCDecoder(ccNotify); if (mSourceFlags & Source::FLAG_SECURE) { format->setInt32("secure", true); } } if (audio) { sp notify = new AMessage(kWhatAudioNotify, id()); ++mAudioDecoderGeneration; notify->setInt32("generation", mAudioDecoderGeneration); if (mOffloadAudio) { *decoder = new DecoderPassThrough(notify); } else { *decoder = new Decoder(notify); } } else { sp notify = new AMessage(kWhatVideoNotify, id()); ++mVideoDecoderGeneration; notify->setInt32("generation", mVideoDecoderGeneration); *decoder = new Decoder(notify, mNativeWindow); } (*decoder)->init(); (*decoder)->configure(format); // allocate buffers to decrypt widevine source buffers if (!audio && (mSourceFlags & Source::FLAG_SECURE)) { Vector > inputBufs; CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK); Vector mediaBufs; for (size_t i = 0; i < inputBufs.size(); i++) { const sp &buffer = inputBufs[i]; MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size()); mediaBufs.push(mbuf); } status_t err = mSource->setBuffers(audio, mediaBufs); if (err != OK) { for (size_t i = 0; i < mediaBufs.size(); ++i) { mediaBufs[i]->release(); } mediaBufs.clear(); ALOGE("Secure source didn't support secure mediaBufs."); return err; } } return OK; } status_t NuPlayer::feedDecoderInputData(bool audio, const sp &msg) { sp reply; CHECK(msg->findMessage("reply", &reply)); if ((audio && mFlushingAudio != NONE) || (!audio && mFlushingVideo != NONE) || mSource == NULL) { reply->setInt32("err", INFO_DISCONTINUITY); reply->post(); return OK; } sp accessUnit; // Aggregate smaller buffers into a larger buffer. // The goal is to reduce power consumption. // Unfortunately this does not work with the software AAC decoder. bool doBufferAggregation = (audio && mOffloadAudio);; bool needMoreData = false; bool dropAccessUnit; do { status_t err; // Did we save an accessUnit earlier because of a discontinuity? if (audio && (mPendingAudioAccessUnit != NULL)) { accessUnit = mPendingAudioAccessUnit; mPendingAudioAccessUnit.clear(); err = mPendingAudioErr; ALOGV("feedDecoderInputData() use mPendingAudioAccessUnit"); } else { err = mSource->dequeueAccessUnit(audio, &accessUnit); } if (err == -EWOULDBLOCK) { return err; } else if (err != OK) { if (err == INFO_DISCONTINUITY) { if (mAggregateBuffer != NULL) { // We already have some data so save this for later. mPendingAudioErr = err; mPendingAudioAccessUnit = accessUnit; accessUnit.clear(); ALOGD("feedDecoderInputData() save discontinuity for later"); break; } int32_t type; CHECK(accessUnit->meta()->findInt32("discontinuity", &type)); bool formatChange = (audio && (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT)) || (!audio && (type & ATSParser::DISCONTINUITY_VIDEO_FORMAT)); bool timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0; ALOGI("%s discontinuity (formatChange=%d, time=%d)", audio ? "audio" : "video", formatChange, timeChange); if (audio) { mSkipRenderingAudioUntilMediaTimeUs = -1; } else { mSkipRenderingVideoUntilMediaTimeUs = -1; } if (timeChange) { sp extra; if (accessUnit->meta()->findMessage("extra", &extra) && extra != NULL) { int64_t resumeAtMediaTimeUs; if (extra->findInt64( "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) { ALOGI("suppressing rendering of %s until %lld us", audio ? "audio" : "video", resumeAtMediaTimeUs); if (audio) { mSkipRenderingAudioUntilMediaTimeUs = resumeAtMediaTimeUs; } else { mSkipRenderingVideoUntilMediaTimeUs = resumeAtMediaTimeUs; } } } } mTimeDiscontinuityPending = mTimeDiscontinuityPending || timeChange; bool seamlessFormatChange = false; sp newFormat = mSource->getFormat(audio); if (formatChange) { seamlessFormatChange = getDecoder(audio)->supportsSeamlessFormatChange(newFormat); // treat seamless format change separately formatChange = !seamlessFormatChange; } bool shutdownOrFlush = formatChange || timeChange; // We want to queue up scan-sources only once per discontinuity. // We control this by doing it only if neither audio nor video are // flushing or shutting down. (After handling 1st discontinuity, one // of the flushing states will not be NONE.) // No need to scan sources if this discontinuity does not result // in a flush or shutdown, as the flushing state will stay NONE. if (mFlushingAudio == NONE && mFlushingVideo == NONE && shutdownOrFlush) { // And we'll resume scanning sources once we're done // flushing. mDeferredActions.push_front( new SimpleAction( &NuPlayer::performScanSources)); } if (formatChange /* not seamless */) { // must change decoder flushDecoder(audio, /* needShutdown = */ true); } else if (timeChange) { // need to flush flushDecoder(audio, /* needShutdown = */ false, newFormat); err = OK; } else if (seamlessFormatChange) { // reuse existing decoder and don't flush updateDecoderFormatWithoutFlush(audio, newFormat); err = OK; } else { // This stream is unaffected by the discontinuity return -EWOULDBLOCK; } } reply->setInt32("err", err); reply->post(); return OK; } if (!audio) { ++mNumFramesTotal; } dropAccessUnit = false; if (!audio && !(mSourceFlags & Source::FLAG_SECURE) && mVideoLateByUs > 100000ll && mVideoIsAVC && !IsAVCReferenceFrame(accessUnit)) { dropAccessUnit = true; ++mNumFramesDropped; } size_t smallSize = accessUnit->size(); needMoreData = false; if (doBufferAggregation && (mAggregateBuffer == NULL) // Don't bother if only room for a few small buffers. && (smallSize < (kAggregateBufferSizeBytes / 3))) { // Create a larger buffer for combining smaller buffers from the extractor. mAggregateBuffer = new ABuffer(kAggregateBufferSizeBytes); mAggregateBuffer->setRange(0, 0); // start empty } if (mAggregateBuffer != NULL) { int64_t timeUs; int64_t dummy; bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs); bool bigTimestampValid = mAggregateBuffer->meta()->findInt64("timeUs", &dummy); // Will the smaller buffer fit? size_t bigSize = mAggregateBuffer->size(); size_t roomLeft = mAggregateBuffer->capacity() - bigSize; // Should we save this small buffer for the next big buffer? // If the first small buffer did not have a timestamp then save // any buffer that does have a timestamp until the next big buffer. if ((smallSize > roomLeft) || (!bigTimestampValid && (bigSize > 0) && smallTimestampValid)) { mPendingAudioErr = err; mPendingAudioAccessUnit = accessUnit; accessUnit.clear(); } else { // Grab time from first small buffer if available. if ((bigSize == 0) && smallTimestampValid) { mAggregateBuffer->meta()->setInt64("timeUs", timeUs); } // Append small buffer to the bigger buffer. memcpy(mAggregateBuffer->base() + bigSize, accessUnit->data(), smallSize); bigSize += smallSize; mAggregateBuffer->setRange(0, bigSize); // Keep looping until we run out of room in the mAggregateBuffer. needMoreData = true; ALOGV("feedDecoderInputData() smallSize = %zu, bigSize = %zu, capacity = %zu", smallSize, bigSize, mAggregateBuffer->capacity()); } } } while (dropAccessUnit || needMoreData); // ALOGV("returned a valid buffer of %s data", audio ? "audio" : "video"); #if 0 int64_t mediaTimeUs; CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs)); ALOGV("feeding %s input buffer at media time %.2f secs", audio ? "audio" : "video", mediaTimeUs / 1E6); #endif if (!audio) { mCCDecoder->decode(accessUnit); } if (mAggregateBuffer != NULL) { ALOGV("feedDecoderInputData() reply with aggregated buffer, %zu", mAggregateBuffer->size()); reply->setBuffer("buffer", mAggregateBuffer); mAggregateBuffer.clear(); } else { reply->setBuffer("buffer", accessUnit); } reply->post(); return OK; } void NuPlayer::renderBuffer(bool audio, const sp &msg) { // ALOGV("renderBuffer %s", audio ? "audio" : "video"); sp reply; CHECK(msg->findMessage("reply", &reply)); if ((audio && mFlushingAudio != NONE) || (!audio && mFlushingVideo != NONE)) { // We're currently attempting to flush the decoder, in order // to complete this, the decoder wants all its buffers back, // so we don't want any output buffers it sent us (from before // we initiated the flush) to be stuck in the renderer's queue. ALOGV("we're still flushing the %s decoder, sending its output buffer" " right back.", audio ? "audio" : "video"); reply->post(); return; } sp buffer; CHECK(msg->findBuffer("buffer", &buffer)); int64_t mediaTimeUs; CHECK(buffer->meta()->findInt64("timeUs", &mediaTimeUs)); int64_t &skipUntilMediaTimeUs = audio ? mSkipRenderingAudioUntilMediaTimeUs : mSkipRenderingVideoUntilMediaTimeUs; if (skipUntilMediaTimeUs >= 0) { if (mediaTimeUs < skipUntilMediaTimeUs) { ALOGV("dropping %s buffer at time %lld as requested.", audio ? "audio" : "video", mediaTimeUs); reply->post(); return; } skipUntilMediaTimeUs = -1; } if (!audio && mCCDecoder->isSelected()) { mCCDecoder->display(mediaTimeUs); } mRenderer->queueBuffer(audio, buffer, reply); } void NuPlayer::updateVideoSize( const sp &inputFormat, const sp &outputFormat) { if (inputFormat == NULL) { ALOGW("Unknown video size, reporting 0x0!"); notifyListener(MEDIA_SET_VIDEO_SIZE, 0, 0); return; } int32_t displayWidth, displayHeight; int32_t cropLeft, cropTop, cropRight, cropBottom; if (outputFormat != NULL) { int32_t width, height; CHECK(outputFormat->findInt32("width", &width)); CHECK(outputFormat->findInt32("height", &height)); int32_t cropLeft, cropTop, cropRight, cropBottom; CHECK(outputFormat->findRect( "crop", &cropLeft, &cropTop, &cropRight, &cropBottom)); displayWidth = cropRight - cropLeft + 1; displayHeight = cropBottom - cropTop + 1; ALOGV("Video output format changed to %d x %d " "(crop: %d x %d @ (%d, %d))", width, height, displayWidth, displayHeight, cropLeft, cropTop); } else { CHECK(inputFormat->findInt32("width", &displayWidth)); CHECK(inputFormat->findInt32("height", &displayHeight)); ALOGV("Video input format %d x %d", displayWidth, displayHeight); } // Take into account sample aspect ratio if necessary: int32_t sarWidth, sarHeight; if (inputFormat->findInt32("sar-width", &sarWidth) && inputFormat->findInt32("sar-height", &sarHeight)) { ALOGV("Sample aspect ratio %d : %d", sarWidth, sarHeight); displayWidth = (displayWidth * sarWidth) / sarHeight; ALOGV("display dimensions %d x %d", displayWidth, displayHeight); } int32_t rotationDegrees; if (!inputFormat->findInt32("rotation-degrees", &rotationDegrees)) { rotationDegrees = 0; } if (rotationDegrees == 90 || rotationDegrees == 270) { int32_t tmp = displayWidth; displayWidth = displayHeight; displayHeight = tmp; } notifyListener( MEDIA_SET_VIDEO_SIZE, displayWidth, displayHeight); } void NuPlayer::notifyListener(int msg, int ext1, int ext2, const Parcel *in) { if (mDriver == NULL) { return; } sp driver = mDriver.promote(); if (driver == NULL) { return; } driver->notifyListener(msg, ext1, ext2, in); } void NuPlayer::flushDecoder( bool audio, bool needShutdown, const sp &newFormat) { ALOGV("[%s] flushDecoder needShutdown=%d", audio ? "audio" : "video", needShutdown); const sp &decoder = getDecoder(audio); if (decoder == NULL) { ALOGI("flushDecoder %s without decoder present", audio ? "audio" : "video"); return; } // Make sure we don't continue to scan sources until we finish flushing. ++mScanSourcesGeneration; mScanSourcesPending = false; decoder->signalFlush(newFormat); mRenderer->flush(audio); FlushStatus newStatus = needShutdown ? FLUSHING_DECODER_SHUTDOWN : FLUSHING_DECODER; if (audio) { ALOGE_IF(mFlushingAudio != NONE, "audio flushDecoder() is called in state %d", mFlushingAudio); mFlushingAudio = newStatus; } else { ALOGE_IF(mFlushingVideo != NONE, "video flushDecoder() is called in state %d", mFlushingVideo); mFlushingVideo = newStatus; if (mCCDecoder != NULL) { mCCDecoder->flush(); } } } void NuPlayer::updateDecoderFormatWithoutFlush( bool audio, const sp &format) { ALOGV("[%s] updateDecoderFormatWithoutFlush", audio ? "audio" : "video"); const sp &decoder = getDecoder(audio); if (decoder == NULL) { ALOGI("updateDecoderFormatWithoutFlush %s without decoder present", audio ? "audio" : "video"); return; } decoder->signalUpdateFormat(format); } void NuPlayer::queueDecoderShutdown( bool audio, bool video, const sp &reply) { ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video); mDeferredActions.push_back( new ShutdownDecoderAction(audio, video)); mDeferredActions.push_back( new SimpleAction(&NuPlayer::performScanSources)); mDeferredActions.push_back(new PostMessageAction(reply)); processDeferredActions(); } status_t NuPlayer::setVideoScalingMode(int32_t mode) { mVideoScalingMode = mode; if (mNativeWindow != NULL) { status_t ret = native_window_set_scaling_mode( mNativeWindow->getNativeWindow().get(), mVideoScalingMode); if (ret != OK) { ALOGE("Failed to set scaling mode (%d): %s", -ret, strerror(-ret)); return ret; } } return OK; } status_t NuPlayer::getTrackInfo(Parcel* reply) const { sp msg = new AMessage(kWhatGetTrackInfo, id()); msg->setPointer("reply", reply); sp response; status_t err = msg->postAndAwaitResponse(&response); return err; } status_t NuPlayer::getSelectedTrack(int32_t type, Parcel* reply) const { sp msg = new AMessage(kWhatGetSelectedTrack, id()); msg->setPointer("reply", reply); msg->setInt32("type", type); sp response; status_t err = msg->postAndAwaitResponse(&response); if (err == OK && response != NULL) { CHECK(response->findInt32("err", &err)); } return err; } status_t NuPlayer::selectTrack(size_t trackIndex, bool select) { sp msg = new AMessage(kWhatSelectTrack, id()); msg->setSize("trackIndex", trackIndex); msg->setInt32("select", select); sp response; status_t err = msg->postAndAwaitResponse(&response); if (err != OK) { return err; } if (!response->findInt32("err", &err)) { err = OK; } return err; } void NuPlayer::schedulePollDuration() { sp msg = new AMessage(kWhatPollDuration, id()); msg->setInt32("generation", mPollDurationGeneration); msg->post(); } void NuPlayer::cancelPollDuration() { ++mPollDurationGeneration; } void NuPlayer::processDeferredActions() { while (!mDeferredActions.empty()) { // We won't execute any deferred actions until we're no longer in // an intermediate state, i.e. one more more decoders are currently // flushing or shutting down. if (mFlushingAudio != NONE || mFlushingVideo != NONE) { // We're currently flushing, postpone the reset until that's // completed. ALOGV("postponing action mFlushingAudio=%d, mFlushingVideo=%d", mFlushingAudio, mFlushingVideo); break; } sp action = *mDeferredActions.begin(); mDeferredActions.erase(mDeferredActions.begin()); action->execute(this); } } void NuPlayer::performSeek(int64_t seekTimeUs) { ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6); if (mSource == NULL) { // This happens when reset occurs right before the loop mode // asynchronously seeks to the start of the stream. LOG_ALWAYS_FATAL_IF(mAudioDecoder != NULL || mVideoDecoder != NULL, "mSource is NULL and decoders not NULL audio(%p) video(%p)", mAudioDecoder.get(), mVideoDecoder.get()); return; } mSource->seekTo(seekTimeUs); ++mTimedTextGeneration; if (mDriver != NULL) { sp driver = mDriver.promote(); if (driver != NULL) { driver->notifyPosition(seekTimeUs); driver->notifySeekComplete(); } } // everything's flushed, continue playback. } void NuPlayer::performDecoderFlush() { ALOGV("performDecoderFlush"); if (mAudioDecoder == NULL && mVideoDecoder == NULL) { return; } mTimeDiscontinuityPending = true; if (mAudioDecoder != NULL) { flushDecoder(true /* audio */, false /* needShutdown */); } if (mVideoDecoder != NULL) { flushDecoder(false /* audio */, false /* needShutdown */); } } void NuPlayer::performDecoderShutdown(bool audio, bool video) { ALOGV("performDecoderShutdown audio=%d, video=%d", audio, video); if ((!audio || mAudioDecoder == NULL) && (!video || mVideoDecoder == NULL)) { return; } mTimeDiscontinuityPending = true; if (audio && mAudioDecoder != NULL) { flushDecoder(true /* audio */, true /* needShutdown */); } if (video && mVideoDecoder != NULL) { flushDecoder(false /* audio */, true /* needShutdown */); } } void NuPlayer::performReset() { ALOGV("performReset"); CHECK(mAudioDecoder == NULL); CHECK(mVideoDecoder == NULL); cancelPollDuration(); ++mScanSourcesGeneration; mScanSourcesPending = false; ++mAudioDecoderGeneration; ++mVideoDecoderGeneration; if (mRendererLooper != NULL) { if (mRenderer != NULL) { mRendererLooper->unregisterHandler(mRenderer->id()); } mRendererLooper->stop(); mRendererLooper.clear(); } mRenderer.clear(); if (mSource != NULL) { mSource->stop(); mSource.clear(); } if (mDriver != NULL) { sp driver = mDriver.promote(); if (driver != NULL) { driver->notifyResetComplete(); } } mStarted = false; } void NuPlayer::performScanSources() { ALOGV("performScanSources"); if (!mStarted) { return; } if (mAudioDecoder == NULL || mVideoDecoder == NULL) { postScanSources(); } } void NuPlayer::performSetSurface(const sp &wrapper) { ALOGV("performSetSurface"); mNativeWindow = wrapper; // XXX - ignore error from setVideoScalingMode for now setVideoScalingMode(mVideoScalingMode); if (mDriver != NULL) { sp driver = mDriver.promote(); if (driver != NULL) { driver->notifySetSurfaceComplete(); } } } void NuPlayer::onSourceNotify(const sp &msg) { int32_t what; CHECK(msg->findInt32("what", &what)); switch (what) { case Source::kWhatPrepared: { if (mSource == NULL) { // This is a stale notification from a source that was // asynchronously preparing when the client called reset(). // We handled the reset, the source is gone. break; } int32_t err; CHECK(msg->findInt32("err", &err)); sp driver = mDriver.promote(); if (driver != NULL) { // notify duration first, so that it's definitely set when // the app received the "prepare complete" callback. int64_t durationUs; if (mSource->getDuration(&durationUs) == OK) { driver->notifyDuration(durationUs); } driver->notifyPrepareCompleted(err); } break; } case Source::kWhatFlagsChanged: { uint32_t flags; CHECK(msg->findInt32("flags", (int32_t *)&flags)); sp driver = mDriver.promote(); if (driver != NULL) { driver->notifyFlagsChanged(flags); } if ((mSourceFlags & Source::FLAG_DYNAMIC_DURATION) && (!(flags & Source::FLAG_DYNAMIC_DURATION))) { cancelPollDuration(); } else if (!(mSourceFlags & Source::FLAG_DYNAMIC_DURATION) && (flags & Source::FLAG_DYNAMIC_DURATION) && (mAudioDecoder != NULL || mVideoDecoder != NULL)) { schedulePollDuration(); } mSourceFlags = flags; break; } case Source::kWhatVideoSizeChanged: { sp format; CHECK(msg->findMessage("format", &format)); updateVideoSize(format); break; } case Source::kWhatBufferingUpdate: { int32_t percentage; CHECK(msg->findInt32("percentage", &percentage)); notifyListener(MEDIA_BUFFERING_UPDATE, percentage, 0); break; } case Source::kWhatBufferingStart: { notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_START, 0); break; } case Source::kWhatBufferingEnd: { notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_END, 0); break; } case Source::kWhatSubtitleData: { sp buffer; CHECK(msg->findBuffer("buffer", &buffer)); sendSubtitleData(buffer, 0 /* baseIndex */); break; } case Source::kWhatTimedTextData: { int32_t generation; if (msg->findInt32("generation", &generation) && generation != mTimedTextGeneration) { break; } sp buffer; CHECK(msg->findBuffer("buffer", &buffer)); sp driver = mDriver.promote(); if (driver == NULL) { break; } int posMs; int64_t timeUs, posUs; driver->getCurrentPosition(&posMs); posUs = posMs * 1000; CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); if (posUs < timeUs) { if (!msg->findInt32("generation", &generation)) { msg->setInt32("generation", mTimedTextGeneration); } msg->post(timeUs - posUs); } else { sendTimedTextData(buffer); } break; } case Source::kWhatQueueDecoderShutdown: { int32_t audio, video; CHECK(msg->findInt32("audio", &audio)); CHECK(msg->findInt32("video", &video)); sp reply; CHECK(msg->findMessage("reply", &reply)); queueDecoderShutdown(audio, video, reply); break; } case Source::kWhatDrmNoLicense: { notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE); break; } default: TRESPASS(); } } void NuPlayer::onClosedCaptionNotify(const sp &msg) { int32_t what; CHECK(msg->findInt32("what", &what)); switch (what) { case NuPlayer::CCDecoder::kWhatClosedCaptionData: { sp buffer; CHECK(msg->findBuffer("buffer", &buffer)); size_t inbandTracks = 0; if (mSource != NULL) { inbandTracks = mSource->getTrackCount(); } sendSubtitleData(buffer, inbandTracks); break; } case NuPlayer::CCDecoder::kWhatTrackAdded: { notifyListener(MEDIA_INFO, MEDIA_INFO_METADATA_UPDATE, 0); break; } default: TRESPASS(); } } void NuPlayer::sendSubtitleData(const sp &buffer, int32_t baseIndex) { int32_t trackIndex; int64_t timeUs, durationUs; CHECK(buffer->meta()->findInt32("trackIndex", &trackIndex)); CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); CHECK(buffer->meta()->findInt64("durationUs", &durationUs)); Parcel in; in.writeInt32(trackIndex + baseIndex); in.writeInt64(timeUs); in.writeInt64(durationUs); in.writeInt32(buffer->size()); in.writeInt32(buffer->size()); in.write(buffer->data(), buffer->size()); notifyListener(MEDIA_SUBTITLE_DATA, 0, 0, &in); } void NuPlayer::sendTimedTextData(const sp &buffer) { const void *data; size_t size = 0; int64_t timeUs; int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS; AString mime; CHECK(buffer->meta()->findString("mime", &mime)); CHECK(strcasecmp(mime.c_str(), MEDIA_MIMETYPE_TEXT_3GPP) == 0); data = buffer->data(); size = buffer->size(); Parcel parcel; if (size > 0) { CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); flag |= TextDescriptions::IN_BAND_TEXT_3GPP; TextDescriptions::getParcelOfDescriptions( (const uint8_t *)data, size, flag, timeUs / 1000, &parcel); } if ((parcel.dataSize() > 0)) { notifyListener(MEDIA_TIMED_TEXT, 0, 0, &parcel); } else { // send an empty timed text notifyListener(MEDIA_TIMED_TEXT, 0, 0); } } //////////////////////////////////////////////////////////////////////////////// sp NuPlayer::Source::getFormat(bool audio) { sp meta = getFormatMeta(audio); if (meta == NULL) { return NULL; } sp msg = new AMessage; if(convertMetaDataToMessage(meta, &msg) == OK) { return msg; } return NULL; } void NuPlayer::Source::notifyFlagsChanged(uint32_t flags) { sp notify = dupNotify(); notify->setInt32("what", kWhatFlagsChanged); notify->setInt32("flags", flags); notify->post(); } void NuPlayer::Source::notifyVideoSizeChanged(const sp &format) { sp notify = dupNotify(); notify->setInt32("what", kWhatVideoSizeChanged); notify->setMessage("format", format); notify->post(); } void NuPlayer::Source::notifyPrepared(status_t err) { sp notify = dupNotify(); notify->setInt32("what", kWhatPrepared); notify->setInt32("err", err); notify->post(); } void NuPlayer::Source::onMessageReceived(const sp & /* msg */) { TRESPASS(); } } // namespace android