diff options
Diffstat (limited to 'media')
63 files changed, 3298 insertions, 905 deletions
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp index 14b31ec..d3b4a35 100644 --- a/media/img_utils/src/DngUtils.cpp +++ b/media/img_utils/src/DngUtils.cpp @@ -229,7 +229,9 @@ status_t OpcodeListBuilder::addGainMap(uint32_t top, err = mEndianOut.write(version, 0, NELEMS(version)); if (err != OK) return err; - uint32_t flags = FLAG_OPTIONAL | FLAG_OPTIONAL_FOR_PREVIEW; + // Do not include optional flag for preview, as this can have a large effect on the output. + uint32_t flags = FLAG_OPTIONAL; + err = mEndianOut.write(&flags, 0, 1); if (err != OK) return err; diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp index 35f6557..0d5d7e4 100644 --- a/media/libmedia/AudioEffect.cpp +++ b/media/libmedia/AudioEffect.cpp @@ -145,15 +145,19 @@ status_t AudioEffect::set(const effect_uuid_t *type, return mStatus; } - mIEffect = iEffect; mCblkMemory = cblk; mCblk = static_cast<effect_param_cblk_t*>(cblk->pointer()); int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int); mCblk->buffer = (uint8_t *)mCblk + bufOffset; iEffect->asBinder()->linkToDeath(mIEffectClient); - ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, - mStatus, mEnabled); + mClientPid = IPCThreadState::self()->getCallingPid(); + ALOGV("set() %p OK effect: %s id: %d status %d enabled %d pid %d", this, mDescriptor.name, mId, + mStatus, mEnabled, mClientPid); + + if (mSessionId > AUDIO_SESSION_OUTPUT_MIX) { + AudioSystem::acquireAudioSessionId(mSessionId, mClientPid); + } return mStatus; } @@ -164,6 +168,9 @@ AudioEffect::~AudioEffect() ALOGV("Destructor %p", this); if (mStatus == NO_ERROR || mStatus == ALREADY_EXISTS) { + if (mSessionId > AUDIO_SESSION_OUTPUT_MIX) { + AudioSystem::releaseAudioSessionId(mSessionId, mClientPid); + } if (mIEffect != NULL) { mIEffect->disconnect(); mIEffect->asBinder()->unlinkToDeath(mIEffectClient); diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index 1742fbe..dda3657 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -939,6 +939,15 @@ status_t AudioSystem::releaseSoundTriggerSession(audio_session_t session) if (aps == 0) return PERMISSION_DENIED; return aps->releaseSoundTriggerSession(session); } + +audio_mode_t AudioSystem::getPhoneState() +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return AUDIO_MODE_INVALID; + return aps->getPhoneState(); +} + + // --------------------------------------------------------------------------- void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused) diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index d87e6f5..e3beba5 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -37,6 +37,19 @@ namespace android { // --------------------------------------------------------------------------- +static int64_t convertTimespecToUs(const struct timespec &tv) +{ + return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000; +} + +// current monotonic time in microseconds. +static int64_t getNowUs() +{ + struct timespec tv; + (void) clock_gettime(CLOCK_MONOTONIC, &tv); + return convertTimespecToUs(tv); +} + // static status_t AudioTrack::getMinFrameCount( size_t* frameCount, @@ -398,7 +411,7 @@ status_t AudioTrack::set( } // create the IAudioTrack - status = createTrack_l(0 /*epoch*/); + status = createTrack_l(); if (status != NO_ERROR) { if (mAudioTrackThread != 0) { @@ -417,6 +430,10 @@ status_t AudioTrack::set( mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; + mServer = 0; + mPosition = 0; + mReleased = 0; + mStartUs = 0; AudioSystem::acquireAudioSessionId(mSessionId, mClientPid); mSequence = 1; mObservedSequence = mSequence; @@ -443,14 +460,22 @@ status_t AudioTrack::start() } else { mState = STATE_ACTIVE; } + (void) updateAndGetPosition_l(); if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) { // reset current position as seen by client to 0 - mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition()); + mPosition = 0; + mReleased = 0; + // For offloaded tracks, we don't know if the hardware counters are really zero here, + // since the flush is asynchronous and stop may not fully drain. + // We save the time when the track is started to later verify whether + // the counters are realistic (i.e. start from zero after this time). + mStartUs = getNowUs(); + // force refresh of remaining frames by processAudioBuffer() as last // write before stop could be partial. mRefreshRemaining = true; } - mNewPosition = mProxy->getPosition() + mUpdatePeriod; + mNewPosition = mPosition + mUpdatePeriod; int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags); sp<AudioTrackThread> t = mAudioTrackThread; @@ -582,9 +607,18 @@ void AudioTrack::pause() if (isOffloaded_l()) { if (mOutput != AUDIO_IO_HANDLE_NONE) { + // An offload output can be re-used between two audio tracks having + // the same configuration. A timestamp query for a paused track + // while the other is running would return an incorrect time. + // To fix this, cache the playback position on a pause() and return + // this time when requested until the track is resumed. + + // OffloadThread sends HAL pause in its threadLoop. Time saved + // here can be slightly off. + + // TODO: check return code for getRenderPosition. + uint32_t halFrames; - // OffloadThread sends HAL pause in its threadLoop.. time saved - // here can be slightly off AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition); ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition); } @@ -709,7 +743,7 @@ void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount) { // FIXME If setting a loop also sets position to start of loop, then // this is correct. Otherwise it should be removed. - mNewPosition = mProxy->getPosition() + mUpdatePeriod; + mNewPosition = updateAndGetPosition_l() + mUpdatePeriod; mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0; mStaticProxy->setLoop(loopStart, loopEnd, loopCount); } @@ -751,7 +785,7 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) } AutoMutex lock(mLock); - mNewPosition = mProxy->getPosition() + updatePeriod; + mNewPosition = updateAndGetPosition_l() + updatePeriod; mUpdatePeriod = updatePeriod; return NO_ERROR; @@ -791,7 +825,7 @@ status_t AudioTrack::setPosition(uint32_t position) if (mState == STATE_ACTIVE) { return INVALID_OPERATION; } - mNewPosition = mProxy->getPosition() + mUpdatePeriod; + mNewPosition = updateAndGetPosition_l() + mUpdatePeriod; mLoopPeriod = 0; // FIXME Check whether loops and setting position are incompatible in old code. // If we use setLoop for both purposes we lose the capability to set the position while looping. @@ -800,7 +834,7 @@ status_t AudioTrack::setPosition(uint32_t position) return NO_ERROR; } -status_t AudioTrack::getPosition(uint32_t *position) const +status_t AudioTrack::getPosition(uint32_t *position) { if (position == NULL) { return BAD_VALUE; @@ -820,11 +854,13 @@ status_t AudioTrack::getPosition(uint32_t *position) const uint32_t halFrames; AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames); } + // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED) + // due to hardware latency. We leave this behavior for now. *position = dspFrames; } else { // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes - *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 : - mProxy->getPosition(); + *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? + 0 : updateAndGetPosition_l(); } return NO_ERROR; } @@ -881,7 +917,7 @@ status_t AudioTrack::attachAuxEffect(int effectId) // ------------------------------------------------------------------------- // must be called with mLock held -status_t AudioTrack::createTrack_l(size_t epoch) +status_t AudioTrack::createTrack_l() { status_t status; const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); @@ -1184,7 +1220,6 @@ status_t AudioTrack::createTrack_l(size_t epoch) mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY); mProxy->setSendLevel(mSendLevel); mProxy->setSampleRate(mSampleRate); - mProxy->setEpoch(epoch); mProxy->setMinimum(mNotificationFramesAct); mDeathNotifier = new DeathNotifier(this); @@ -1319,6 +1354,7 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer) buffer.mRaw = audioBuffer->raw; AutoMutex lock(mLock); + mReleased += stepCount; mInUnderrun = false; mProxy->releaseBuffer(&buffer); @@ -1531,7 +1567,7 @@ nsecs_t AudioTrack::processAudioBuffer() } // Get current position of server - size_t position = mProxy->getPosition(); + size_t position = updateAndGetPosition_l(); // Manage marker callback bool markerReached = false; @@ -1796,14 +1832,18 @@ status_t AudioTrack::restoreTrack_l(const char *from) return DEAD_OBJECT; } - // if the new IAudioTrack is created, createTrack_l() will modify the + // save the old static buffer position + size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0; + + // If a new IAudioTrack is successfully created, createTrack_l() will modify the // following member variables: mAudioTrack, mCblkMemory and mCblk. - // It will also delete the strong references on previous IAudioTrack and IMemory + // It will also delete the strong references on previous IAudioTrack and IMemory. + // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact. + result = createTrack_l(); // take the frames that will be lost by track recreation into account in saved position - size_t position = mProxy->getPosition() + mProxy->getFramesFilled(); - size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0; - result = createTrack_l(position /*epoch*/); + (void) updateAndGetPosition_l(); + mPosition = mReleased; if (result == NO_ERROR) { // continue playback from last known position, but @@ -1838,6 +1878,27 @@ status_t AudioTrack::restoreTrack_l(const char *from) return result; } +uint32_t AudioTrack::updateAndGetPosition_l() +{ + // This is the sole place to read server consumed frames + uint32_t newServer = mProxy->getPosition(); + int32_t delta = newServer - mServer; + mServer = newServer; + // TODO There is controversy about whether there can be "negative jitter" in server position. + // This should be investigated further, and if possible, it should be addressed. + // A more definite failure mode is infrequent polling by client. + // One could call (void)getPosition_l() in releaseBuffer(), + // so mReleased and mPosition are always lock-step as best possible. + // That should ensure delta never goes negative for infrequent polling + // unless the server has more than 2^31 frames in its buffer, + // in which case the use of uint32_t for these counters has bigger issues. + if (delta < 0) { + ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta); + delta = 0; + } + return mPosition += (uint32_t) delta; +} + status_t AudioTrack::setParameters(const String8& keyValuePairs) { AutoMutex lock(mLock); @@ -1851,12 +1912,94 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp) if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { return INVALID_OPERATION; } - if (mState != STATE_ACTIVE && mState != STATE_PAUSED) { - return INVALID_OPERATION; + + switch (mState) { + case STATE_ACTIVE: + case STATE_PAUSED: + break; // handle below + case STATE_FLUSHED: + case STATE_STOPPED: + return WOULD_BLOCK; + case STATE_STOPPING: + case STATE_PAUSED_STOPPING: + if (!isOffloaded_l()) { + return INVALID_OPERATION; + } + break; // offloaded tracks handled below + default: + LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState); + break; } + + // The presented frame count must always lag behind the consumed frame count. + // To avoid a race, read the presented frames first. This ensures that presented <= consumed. status_t status = mAudioTrack->getTimestamp(timestamp); - if (status == NO_ERROR) { - timestamp.mPosition += mProxy->getEpoch(); + if (status != NO_ERROR) { + ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status); + return status; + } + if (isOffloadedOrDirect_l()) { + if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) { + // use cached paused position in case another offloaded track is running. + timestamp.mPosition = mPausedPosition; + clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime); + return NO_ERROR; + } + + // Check whether a pending flush or stop has completed, as those commands may + // be asynchronous or return near finish. + if (mStartUs != 0 && mSampleRate != 0) { + static const int kTimeJitterUs = 100000; // 100 ms + static const int k1SecUs = 1000000; + + const int64_t timeNow = getNowUs(); + + if (timeNow < mStartUs + k1SecUs) { // within first second of starting + const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime); + if (timestampTimeUs < mStartUs) { + return WOULD_BLOCK; // stale timestamp time, occurs before start. + } + const int64_t deltaTimeUs = timestampTimeUs - mStartUs; + const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate; + + if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) { + // Verify that the counter can't count faster than the sample rate + // since the start time. If greater, then that means we have failed + // to completely flush or stop the previous playing track. + ALOGW("incomplete flush or stop:" + " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)", + (long long)deltaTimeUs, (long long)deltaPositionByUs, + timestamp.mPosition); + return WOULD_BLOCK; + } + } + mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded. + } + } else { + // Update the mapping between local consumed (mPosition) and server consumed (mServer) + (void) updateAndGetPosition_l(); + // Server consumed (mServer) and presented both use the same server time base, + // and server consumed is always >= presented. + // The delta between these represents the number of frames in the buffer pipeline. + // If this delta between these is greater than the client position, it means that + // actually presented is still stuck at the starting line (figuratively speaking), + // waiting for the first frame to go by. So we can't report a valid timestamp yet. + if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) { + return INVALID_OPERATION; + } + // Convert timestamp position from server time base to client time base. + // TODO The following code should work OK now because timestamp.mPosition is 32-bit. + // But if we change it to 64-bit then this could fail. + // If (mPosition - mServer) can be negative then should use: + // (int32_t)(mPosition - mServer) + timestamp.mPosition += mPosition - mServer; + // Immediately after a call to getPosition_l(), mPosition and + // mServer both represent the same frame position. mPosition is + // in client's point of view, and mServer is in server's point of + // view. So the difference between them is the "fudge factor" + // between client and server views due to stop() and/or new + // IAudioTrack. And timestamp.mPosition is initially in server's + // point of view, so we need to apply the same fudge factor to it. } return status; } @@ -1981,9 +2124,16 @@ void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) { // usage to stream type mapping switch (aa.usage) { + case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: + // TODO once AudioPolicyManager fully supports audio_attributes_t, + // remove stream change based on phone state + if (AudioSystem::getPhoneState() == AUDIO_MODE_RINGTONE) { + mStreamType = AUDIO_STREAM_RING; + break; + } + /// FALL THROUGH case AUDIO_USAGE_MEDIA: case AUDIO_USAGE_GAME: - case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE: mStreamType = AUDIO_STREAM_MUSIC; return; diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp index b57f747..256cb3f 100644 --- a/media/libmedia/IAudioPolicyService.cpp +++ b/media/libmedia/IAudioPolicyService.cpp @@ -67,7 +67,8 @@ enum { REGISTER_CLIENT, GET_OUTPUT_FOR_ATTR, ACQUIRE_SOUNDTRIGGER_SESSION, - RELEASE_SOUNDTRIGGER_SESSION + RELEASE_SOUNDTRIGGER_SESSION, + GET_PHONE_STATE }; class BpAudioPolicyService : public BpInterface<IAudioPolicyService> @@ -607,6 +608,17 @@ public: } return (status_t)reply.readInt32(); } + + virtual audio_mode_t getPhoneState() + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_PHONE_STATE, data, &reply); + if (status != NO_ERROR) { + return AUDIO_MODE_INVALID; + } + return (audio_mode_t)reply.readInt32(); + } }; IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService"); @@ -1057,6 +1069,12 @@ status_t BnAudioPolicyService::onTransact( return NO_ERROR; } break; + case GET_PHONE_STATE: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + reply->writeInt32((int32_t)getPhoneState()); + return NO_ERROR; + } break; + default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp index d2e181b..e2e6042 100644 --- a/media/libmedia/MediaProfiles.cpp +++ b/media/libmedia/MediaProfiles.cpp @@ -87,6 +87,7 @@ const MediaProfiles::NameToTagMap MediaProfiles::sCamcorderQualityNameMap[] = { {"highspeed480p", CAMCORDER_QUALITY_HIGH_SPEED_480P}, {"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P}, {"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P}, + {"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P}, }; #if LOG_NDEBUG diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk index adc066d..2cf5710 100644 --- a/media/libmediaplayerservice/Android.mk +++ b/media/libmediaplayerservice/Android.mk @@ -22,6 +22,7 @@ LOCAL_SRC_FILES:= \ StagefrightPlayer.cpp \ StagefrightRecorder.cpp \ TestPlayerStub.cpp \ + VideoFrameScheduler.cpp \ LOCAL_SHARED_LIBRARIES := \ libbinder \ diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp index c8cb7ed..8eb1269 100644 --- a/media/libmediaplayerservice/MediaPlayerService.cpp +++ b/media/libmediaplayerservice/MediaPlayerService.cpp @@ -43,6 +43,7 @@ #include <utils/Errors.h> // for status_t #include <utils/String8.h> #include <utils/SystemClock.h> +#include <utils/Timers.h> #include <utils/Vector.h> #include <media/IMediaHTTPService.h> @@ -1496,6 +1497,12 @@ status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position) const return mTrack->getPosition(position); } +status_t MediaPlayerService::AudioOutput::getTimestamp(AudioTimestamp &ts) const +{ + if (mTrack == 0) return NO_INIT; + return mTrack->getTimestamp(ts); +} + status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const { if (mTrack == 0) return NO_INIT; @@ -1971,6 +1978,15 @@ status_t MediaPlayerService::AudioCache::getPosition(uint32_t *position) const return NO_ERROR; } +status_t MediaPlayerService::AudioCache::getTimestamp(AudioTimestamp &ts) const +{ + ts.mPosition = mSize / mFrameSize; + nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); + ts.mTime.tv_sec = now / 1000000000LL; + ts.mTime.tv_nsec = now - (1000000000LL * ts.mTime.tv_sec); + return NO_ERROR; +} + status_t MediaPlayerService::AudioCache::getFramesWritten(uint32_t *written) const { if (written == 0) return BAD_VALUE; diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h index 4fe7075..3b96e88 100644 --- a/media/libmediaplayerservice/MediaPlayerService.h +++ b/media/libmediaplayerservice/MediaPlayerService.h @@ -85,6 +85,7 @@ class MediaPlayerService : public BnMediaPlayerService virtual uint32_t latency() const; virtual float msecsPerFrame() const; virtual status_t getPosition(uint32_t *position) const; + virtual status_t getTimestamp(AudioTimestamp &ts) const; virtual status_t getFramesWritten(uint32_t *frameswritten) const; virtual int getSessionId() const; virtual uint32_t getSampleRate() const; @@ -198,6 +199,7 @@ class MediaPlayerService : public BnMediaPlayerService virtual uint32_t latency() const; virtual float msecsPerFrame() const; virtual status_t getPosition(uint32_t *position) const; + virtual status_t getTimestamp(AudioTimestamp &ts) const; virtual status_t getFramesWritten(uint32_t *frameswritten) const; virtual int getSessionId() const; virtual uint32_t getSampleRate() const; diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index e2bcb1e..cadd691 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -30,6 +30,7 @@ #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> #include <media/stagefright/foundation/ALooper.h> +#include <media/stagefright/ACodec.h> #include <media/stagefright/AudioSource.h> #include <media/stagefright/AMRWriter.h> #include <media/stagefright/AACWriter.h> @@ -183,11 +184,7 @@ status_t StagefrightRecorder::setVideoEncoder(video_encoder ve) { return BAD_VALUE; } - if (ve == VIDEO_ENCODER_DEFAULT) { - mVideoEncoder = VIDEO_ENCODER_H263; - } else { - mVideoEncoder = ve; - } + mVideoEncoder = ve; return OK; } @@ -1033,6 +1030,7 @@ status_t StagefrightRecorder::setupRTPRecording() { if (mAudioSource != AUDIO_SOURCE_CNT) { source = createAudioSource(); } else { + setDefaultVideoEncoderIfNecessary(); sp<MediaSource> mediaSource; status_t err = setupMediaSource(&mediaSource); @@ -1074,6 +1072,7 @@ status_t StagefrightRecorder::setupMPEG2TSRecording() { if (mVideoSource < VIDEO_SOURCE_LIST_END) { if (mVideoEncoder != VIDEO_ENCODER_H264) { + ALOGE("MPEG2TS recording only supports H.264 encoding!"); return ERROR_UNSUPPORTED; } @@ -1108,6 +1107,12 @@ status_t StagefrightRecorder::setupMPEG2TSRecording() { void StagefrightRecorder::clipVideoFrameRate() { ALOGV("clipVideoFrameRate: encoder %d", mVideoEncoder); + if (mFrameRate == -1) { + mFrameRate = mEncoderProfiles->getCamcorderProfileParamByName( + "vid.fps", mCameraId, CAMCORDER_QUALITY_LOW); + ALOGW("Using default video fps %d", mFrameRate); + } + int minFrameRate = mEncoderProfiles->getVideoEncoderParamByName( "enc.vid.fps.min", mVideoEncoder); int maxFrameRate = mEncoderProfiles->getVideoEncoderParamByName( @@ -1167,6 +1172,7 @@ status_t StagefrightRecorder::checkVideoEncoderCapabilities( client.interface(), (mVideoEncoder == VIDEO_ENCODER_H263 ? MEDIA_MIMETYPE_VIDEO_H263 : mVideoEncoder == VIDEO_ENCODER_MPEG_4_SP ? MEDIA_MIMETYPE_VIDEO_MPEG4 : + mVideoEncoder == VIDEO_ENCODER_VP8 ? MEDIA_MIMETYPE_VIDEO_VP8 : mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""), false /* decoder */, true /* hwCodec */, &codecs); *supportsCameraSourceMetaDataMode = codecs.size() > 0; @@ -1239,6 +1245,31 @@ void StagefrightRecorder::setDefaultProfileIfNecessary() { if (videoCodec == VIDEO_ENCODER_H264) { ALOGI("Force to use AVC baseline profile"); setParamVideoEncoderProfile(OMX_VIDEO_AVCProfileBaseline); + // set 0 for invalid levels - this will be rejected by the + // codec if it cannot handle it during configure + setParamVideoEncoderLevel(ACodec::getAVCLevelFor( + videoFrameWidth, videoFrameHeight, videoFrameRate, videoBitRate)); + } + } +} + +void StagefrightRecorder::setDefaultVideoEncoderIfNecessary() { + if (mVideoEncoder == VIDEO_ENCODER_DEFAULT) { + if (mOutputFormat == OUTPUT_FORMAT_WEBM) { + // default to VP8 for WEBM recording + mVideoEncoder = VIDEO_ENCODER_VP8; + } else { + // pick the default encoder for CAMCORDER_QUALITY_LOW + int videoCodec = mEncoderProfiles->getCamcorderProfileParamByName( + "vid.codec", mCameraId, CAMCORDER_QUALITY_LOW); + + if (videoCodec > VIDEO_ENCODER_DEFAULT && + videoCodec < VIDEO_ENCODER_LIST_END) { + mVideoEncoder = (video_encoder)videoCodec; + } else { + // default to H.264 if camcorder profile not available + mVideoEncoder = VIDEO_ENCODER_H264; + } } } } @@ -1562,6 +1593,7 @@ status_t StagefrightRecorder::setupMPEG4orWEBMRecording() { } if (mVideoSource < VIDEO_SOURCE_LIST_END) { + setDefaultVideoEncoderIfNecessary(); sp<MediaSource> mediaSource; err = setupMediaSource(&mediaSource); @@ -1721,7 +1753,7 @@ status_t StagefrightRecorder::reset() { // Default parameters mOutputFormat = OUTPUT_FORMAT_THREE_GPP; mAudioEncoder = AUDIO_ENCODER_AMR_NB; - mVideoEncoder = VIDEO_ENCODER_H263; + mVideoEncoder = VIDEO_ENCODER_DEFAULT; mVideoWidth = 176; mVideoHeight = 144; mFrameRate = -1; diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h index 9062f30..54c38d3 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.h +++ b/media/libmediaplayerservice/StagefrightRecorder.h @@ -178,6 +178,7 @@ private: void clipAudioSampleRate(); void clipNumberOfAudioChannels(); void setDefaultProfileIfNecessary(); + void setDefaultVideoEncoderIfNecessary(); StagefrightRecorder(const StagefrightRecorder &); diff --git a/media/libmediaplayerservice/VideoFrameScheduler.cpp b/media/libmediaplayerservice/VideoFrameScheduler.cpp new file mode 100644 index 0000000..ce5f5fe --- /dev/null +++ b/media/libmediaplayerservice/VideoFrameScheduler.cpp @@ -0,0 +1,482 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "VideoFrameScheduler" +#include <utils/Log.h> +#define ATRACE_TAG ATRACE_TAG_VIDEO +#include <utils/Trace.h> + +#include <sys/time.h> + +#include <binder/IServiceManager.h> +#include <gui/ISurfaceComposer.h> +#include <ui/DisplayStatInfo.h> + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AUtils.h> + +#include "VideoFrameScheduler.h" + +namespace android { + +static const nsecs_t kNanosIn1s = 1000000000; + +template<class T> +static int compare(const T *lhs, const T *rhs) { + if (*lhs < *rhs) { + return -1; + } else if (*lhs > *rhs) { + return 1; + } else { + return 0; + } +} + +/* ======================================================================= */ +/* PLL */ +/* ======================================================================= */ + +static const size_t kMinSamplesToStartPrime = 3; +static const size_t kMinSamplesToStopPrime = VideoFrameScheduler::kHistorySize; +static const size_t kMinSamplesToEstimatePeriod = 3; +static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize; + +static const size_t kPrecision = 12; +static const size_t kErrorThreshold = (1 << (kPrecision * 2)) / 10; +static const int64_t kMultiplesThresholdDiv = 4; // 25% +static const int64_t kReFitThresholdDiv = 100; // 1% +static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s; // 1 sec +static const nsecs_t kMinPeriod = kNanosIn1s / 120; // 120Hz +static const nsecs_t kRefitRefreshPeriod = 10 * kNanosIn1s; // 10 sec + +VideoFrameScheduler::PLL::PLL() + : mPeriod(-1), + mPhase(0), + mPrimed(false), + mSamplesUsedForPriming(0), + mLastTime(-1), + mNumSamples(0) { +} + +void VideoFrameScheduler::PLL::reset(float fps) { + //test(); + + mSamplesUsedForPriming = 0; + mLastTime = -1; + + // set up or reset video PLL + if (fps <= 0.f) { + mPeriod = -1; + mPrimed = false; + } else { + ALOGV("reset at %.1f fps", fps); + mPeriod = (nsecs_t)(1e9 / fps + 0.5); + mPrimed = true; + } + + restart(); +} + +// reset PLL but keep previous period estimate +void VideoFrameScheduler::PLL::restart() { + mNumSamples = 0; + mPhase = -1; +} + +#if 0 + +void VideoFrameScheduler::PLL::test() { + nsecs_t period = kNanosIn1s / 60; + mTimes[0] = 0; + mTimes[1] = period; + mTimes[2] = period * 3; + mTimes[3] = period * 4; + mTimes[4] = period * 7; + mTimes[5] = period * 8; + mTimes[6] = period * 10; + mTimes[7] = period * 12; + mNumSamples = 8; + int64_t a, b, err; + fit(0, period * 12 / 7, 8, &a, &b, &err); + // a = 0.8(5)+ + // b = -0.14097(2)+ + // err = 0.2750578(703)+ + ALOGD("a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)", + (long long)a, (a / (float)(1 << kPrecision)), + (long long)b, (b / (float)(1 << kPrecision)), + (long long)err, (err / (float)(1 << (kPrecision * 2)))); +} + +#endif + +bool VideoFrameScheduler::PLL::fit( + nsecs_t phase, nsecs_t period, size_t numSamplesToUse, + int64_t *a, int64_t *b, int64_t *err) { + if (numSamplesToUse > mNumSamples) { + numSamplesToUse = mNumSamples; + } + + int64_t sumX = 0; + int64_t sumXX = 0; + int64_t sumXY = 0; + int64_t sumYY = 0; + int64_t sumY = 0; + + int64_t x = 0; // x usually is in [0..numSamplesToUse) + nsecs_t lastTime; + for (size_t i = 0; i < numSamplesToUse; i++) { + size_t ix = (mNumSamples - numSamplesToUse + i) % kHistorySize; + nsecs_t time = mTimes[ix]; + if (i > 0) { + x += divRound(time - lastTime, period); + } + // y is usually in [-numSamplesToUse..numSamplesToUse+kRefitRefreshPeriod/kMinPeriod) << kPrecision + // ideally in [0..numSamplesToUse), but shifted by -numSamplesToUse during + // priming, and possibly shifted by up to kRefitRefreshPeriod/kMinPeriod + // while we are not refitting. + int64_t y = divRound(time - phase, period >> kPrecision); + sumX += x; + sumY += y; + sumXX += x * x; + sumXY += x * y; + sumYY += y * y; + lastTime = time; + } + + int64_t div = numSamplesToUse * sumXX - sumX * sumX; + if (div == 0) { + return false; + } + + int64_t a_nom = numSamplesToUse * sumXY - sumX * sumY; + int64_t b_nom = sumXX * sumY - sumX * sumXY; + *a = divRound(a_nom, div); + *b = divRound(b_nom, div); + // don't use a and b directly as the rounding error is significant + *err = sumYY - divRound(a_nom * sumXY + b_nom * sumY, div); + ALOGV("fitting[%zu] a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)", + numSamplesToUse, + (long long)*a, (*a / (float)(1 << kPrecision)), + (long long)*b, (*b / (float)(1 << kPrecision)), + (long long)*err, (*err / (float)(1 << (kPrecision * 2)))); + return true; +} + +void VideoFrameScheduler::PLL::prime(size_t numSamplesToUse) { + if (numSamplesToUse > mNumSamples) { + numSamplesToUse = mNumSamples; + } + CHECK(numSamplesToUse >= 3); // must have at least 3 samples + + // estimate video framerate from deltas between timestamps, and + // 2nd order deltas + Vector<nsecs_t> deltas; + nsecs_t lastTime, firstTime; + for (size_t i = 0; i < numSamplesToUse; ++i) { + size_t index = (mNumSamples - numSamplesToUse + i) % kHistorySize; + nsecs_t time = mTimes[index]; + if (i > 0) { + if (time - lastTime > kMinPeriod) { + //ALOGV("delta: %lld", (long long)(time - lastTime)); + deltas.push(time - lastTime); + } + } else { + firstTime = time; + } + lastTime = time; + } + deltas.sort(compare<nsecs_t>); + size_t numDeltas = deltas.size(); + if (numDeltas > 1) { + nsecs_t deltaMinLimit = max(deltas[0] / kMultiplesThresholdDiv, kMinPeriod); + nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv; + for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) { + if (deltas[i] > deltaMaxLimit) { + deltas.resize(i); + numDeltas = i; + break; + } + } + for (size_t i = 1; i < numDeltas; ++i) { + nsecs_t delta2nd = deltas[i] - deltas[i - 1]; + if (delta2nd >= deltaMinLimit) { + //ALOGV("delta2: %lld", (long long)(delta2nd)); + deltas.push(delta2nd); + } + } + } + + // use the one that yields the best match + int64_t bestScore; + for (size_t i = 0; i < deltas.size(); ++i) { + nsecs_t delta = deltas[i]; + int64_t score = 0; +#if 1 + // simplest score: number of deltas that are near multiples + size_t matches = 0; + for (size_t j = 0; j < deltas.size(); ++j) { + nsecs_t err = periodicError(deltas[j], delta); + if (err < delta / kMultiplesThresholdDiv) { + ++matches; + } + } + score = matches; +#if 0 + // could be weighed by the (1 - normalized error) + if (numSamplesToUse >= kMinSamplesToEstimatePeriod) { + int64_t a, b, err; + fit(firstTime, delta, numSamplesToUse, &a, &b, &err); + err = (1 << (2 * kPrecision)) - err; + score *= max(0, err); + } +#endif +#else + // or use the error as a negative score + if (numSamplesToUse >= kMinSamplesToEstimatePeriod) { + int64_t a, b, err; + fit(firstTime, delta, numSamplesToUse, &a, &b, &err); + score = -delta * err; + } +#endif + if (i == 0 || score > bestScore) { + bestScore = score; + mPeriod = delta; + mPhase = firstTime; + } + } + ALOGV("priming[%zu] phase:%lld period:%lld", numSamplesToUse, mPhase, mPeriod); +} + +nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) { + if (mLastTime >= 0 + // if time goes backward, or we skipped rendering + && (time > mLastTime + kMaxAllowedFrameSkip || time < mLastTime)) { + restart(); + } + + mLastTime = time; + mTimes[mNumSamples % kHistorySize] = time; + ++mNumSamples; + + bool doFit = time > mRefitAt; + if ((mPeriod <= 0 || !mPrimed) && mNumSamples >= kMinSamplesToStartPrime) { + prime(kMinSamplesToStopPrime); + ++mSamplesUsedForPriming; + doFit = true; + } + if (mPeriod > 0 && mNumSamples >= kMinSamplesToEstimatePeriod) { + if (mPhase < 0) { + // initialize phase to the current render time + mPhase = time; + doFit = true; + } else if (!doFit) { + int64_t err = periodicError(time - mPhase, mPeriod); + doFit = err > mPeriod / kReFitThresholdDiv; + } + + if (doFit) { + int64_t a, b, err; + if (!fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err)) { + // samples are not suitable for fitting. this means they are + // also not suitable for priming. + ALOGV("could not fit - keeping old period:%lld", (long long)mPeriod); + return mPeriod; + } + + mRefitAt = time + kRefitRefreshPeriod; + + mPhase += (mPeriod * b) >> kPrecision; + mPeriod = (mPeriod * a) >> kPrecision; + ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod); + + if (err < kErrorThreshold) { + if (!mPrimed && mSamplesUsedForPriming >= kMinSamplesToStopPrime) { + mPrimed = true; + } + } else { + mPrimed = false; + mSamplesUsedForPriming = 0; + } + } + } + return mPeriod; +} + +/* ======================================================================= */ +/* Frame Scheduler */ +/* ======================================================================= */ + +static const nsecs_t kDefaultVsyncPeriod = kNanosIn1s / 60; // 60Hz +static const nsecs_t kVsyncRefreshPeriod = kNanosIn1s; // 1 sec + +VideoFrameScheduler::VideoFrameScheduler() + : mVsyncTime(0), + mVsyncPeriod(0), + mVsyncRefreshAt(0), + mLastVsyncTime(-1), + mTimeCorrection(0) { +} + +void VideoFrameScheduler::updateVsync() { + mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod; + mVsyncPeriod = 0; + mVsyncTime = 0; + + // TODO: schedule frames for the destination surface + // For now, surface flinger only schedules frames on the primary display + if (mComposer == NULL) { + String16 name("SurfaceFlinger"); + sp<IServiceManager> sm = defaultServiceManager(); + mComposer = interface_cast<ISurfaceComposer>(sm->checkService(name)); + } + if (mComposer != NULL) { + DisplayStatInfo stats; + status_t res = mComposer->getDisplayStats(NULL /* display */, &stats); + if (res == OK) { + ALOGV("vsync time:%lld period:%lld", + (long long)stats.vsyncTime, (long long)stats.vsyncPeriod); + mVsyncTime = stats.vsyncTime; + mVsyncPeriod = stats.vsyncPeriod; + } else { + ALOGW("getDisplayStats returned %d", res); + } + } else { + ALOGW("could not get surface mComposer service"); + } +} + +void VideoFrameScheduler::init(float videoFps) { + updateVsync(); + + mLastVsyncTime = -1; + mTimeCorrection = 0; + + mPll.reset(videoFps); +} + +void VideoFrameScheduler::restart() { + mLastVsyncTime = -1; + mTimeCorrection = 0; + + mPll.restart(); +} + +nsecs_t VideoFrameScheduler::getVsyncPeriod() { + if (mVsyncPeriod > 0) { + return mVsyncPeriod; + } + return kDefaultVsyncPeriod; +} + +nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) { + nsecs_t origRenderTime = renderTime; + + nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); + if (now >= mVsyncRefreshAt) { + updateVsync(); + } + + // without VSYNC info, there is nothing to do + if (mVsyncPeriod == 0) { + ALOGV("no vsync: render=%lld", (long long)renderTime); + return renderTime; + } + + // ensure vsync time is well before (corrected) render time + if (mVsyncTime > renderTime - 4 * mVsyncPeriod) { + mVsyncTime -= + ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod; + } + + // Video presentation takes place at the VSYNC _after_ renderTime. Adjust renderTime + // so this effectively becomes a rounding operation (to the _closest_ VSYNC.) + renderTime -= mVsyncPeriod / 2; + + const nsecs_t videoPeriod = mPll.addSample(origRenderTime); + if (videoPeriod > 0) { + // Smooth out rendering + size_t N = 12; + nsecs_t fiveSixthDev = + abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod) + / (mVsyncPeriod / 100); + // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz) + if (fiveSixthDev < 12) { /* 12% / 6 = 2% */ + N = 20; + } + + nsecs_t offset = 0; + nsecs_t edgeRemainder = 0; + for (size_t i = 1; i <= N; i++) { + offset += + (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod; + edgeRemainder += (videoPeriod * i) % mVsyncPeriod; + } + mTimeCorrection += mVsyncPeriod / 2 - offset / N; + renderTime += mTimeCorrection; + nsecs_t correctionLimit = mVsyncPeriod * 3 / 5; + edgeRemainder = abs(edgeRemainder / N - mVsyncPeriod / 2); + if (edgeRemainder <= mVsyncPeriod / 3) { + correctionLimit /= 2; + } + + // estimate how many VSYNCs a frame will spend on the display + nsecs_t nextVsyncTime = + renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod); + if (mLastVsyncTime >= 0) { + size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod; + size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod); + bool vsyncsPerFrameAreNearlyConstant = + periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0; + + if (mTimeCorrection > correctionLimit && + (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) { + // remove a VSYNC + mTimeCorrection -= mVsyncPeriod / 2; + renderTime -= mVsyncPeriod / 2; + nextVsyncTime -= mVsyncPeriod; + --vsyncsForLastFrame; + } else if (mTimeCorrection < -correctionLimit && + (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) { + // add a VSYNC + mTimeCorrection += mVsyncPeriod / 2; + renderTime += mVsyncPeriod / 2; + nextVsyncTime += mVsyncPeriod; + ++vsyncsForLastFrame; + } + ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame); + } + mLastVsyncTime = nextVsyncTime; + } + + // align rendertime to the center between VSYNC edges + renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod; + renderTime += mVsyncPeriod / 2; + ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime); + ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000); + return renderTime; +} + +void VideoFrameScheduler::release() { + mComposer.clear(); +} + +VideoFrameScheduler::~VideoFrameScheduler() { + release(); +} + +} // namespace android + diff --git a/media/libmediaplayerservice/VideoFrameScheduler.h b/media/libmediaplayerservice/VideoFrameScheduler.h new file mode 100644 index 0000000..84b27b4 --- /dev/null +++ b/media/libmediaplayerservice/VideoFrameScheduler.h @@ -0,0 +1,99 @@ +/* + * Copyright 2014, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VIDEO_FRAME_SCHEDULER_H_ +#define VIDEO_FRAME_SCHEDULER_H_ + +#include <utils/RefBase.h> +#include <utils/Timers.h> + +#include <media/stagefright/foundation/ABase.h> + +namespace android { + +struct ISurfaceComposer; + +struct VideoFrameScheduler : public RefBase { + VideoFrameScheduler(); + + // (re)initialize scheduler + void init(float videoFps = -1); + // use in case of video render-time discontinuity, e.g. seek + void restart(); + // get adjusted nanotime for a video frame render at renderTime + nsecs_t schedule(nsecs_t renderTime); + + // returns the vsync period for the main display + nsecs_t getVsyncPeriod(); + + void release(); + + static const size_t kHistorySize = 8; + +protected: + virtual ~VideoFrameScheduler(); + +private: + struct PLL { + PLL(); + + // reset PLL to new PLL + void reset(float fps = -1); + // keep current estimate, but restart phase + void restart(); + // returns period + nsecs_t addSample(nsecs_t time); + + private: + nsecs_t mPeriod; + nsecs_t mPhase; + + bool mPrimed; // have an estimate for the period + size_t mSamplesUsedForPriming; + + nsecs_t mLastTime; // last input time + nsecs_t mRefitAt; // next input time to fit at + + size_t mNumSamples; // can go past kHistorySize + nsecs_t mTimes[kHistorySize]; + + void test(); + // returns whether fit was successful + bool fit(nsecs_t phase, nsecs_t period, size_t numSamples, + int64_t *a, int64_t *b, int64_t *err); + void prime(size_t numSamples); + }; + + void updateVsync(); + + nsecs_t mVsyncTime; // vsync timing from display + nsecs_t mVsyncPeriod; + nsecs_t mVsyncRefreshAt; // next time to refresh timing info + + nsecs_t mLastVsyncTime; // estimated vsync time for last frame + nsecs_t mTimeCorrection; // running adjustment + + PLL mPll; // PLL for video frame rate based on render time + + sp<ISurfaceComposer> mComposer; + + DISALLOW_EVIL_CONSTRUCTORS(VideoFrameScheduler); +}; + +} // namespace android + +#endif // VIDEO_FRAME_SCHEDULER_H_ + diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk index 0dd2b61..676c0a6 100644 --- a/media/libmediaplayerservice/nuplayer/Android.mk +++ b/media/libmediaplayerservice/nuplayer/Android.mk @@ -19,6 +19,7 @@ LOCAL_C_INCLUDES := \ $(TOP)/frameworks/av/media/libstagefright/mpeg2ts \ $(TOP)/frameworks/av/media/libstagefright/rtsp \ $(TOP)/frameworks/av/media/libstagefright/timedtext \ + $(TOP)/frameworks/av/media/libmediaplayerservice \ $(TOP)/frameworks/native/include/media/openmax LOCAL_MODULE:= libstagefright_nuplayer diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp index 8e1987a..6859a1a 100644 --- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp +++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp @@ -36,6 +36,7 @@ #include "../../libstagefright/include/DRMExtractor.h" #include "../../libstagefright/include/NuCachedSource2.h" #include "../../libstagefright/include/WVMExtractor.h" +#include "../../libstagefright/include/HTTPBase.h" namespace android { @@ -54,7 +55,8 @@ NuPlayer::GenericSource::GenericSource( mDrmManagerClient(NULL), mMetaDataSize(-1ll), mBitrate(-1ll), - mPollBufferingGeneration(0) { + mPollBufferingGeneration(0), + mPendingReadBufferTypes(0) { resetDataSource(); DataSource::RegisterDefaultSniffers(); } @@ -63,6 +65,7 @@ void NuPlayer::GenericSource::resetDataSource() { mAudioTimeUs = 0; mVideoTimeUs = 0; mHTTPService.clear(); + mHttpSource.clear(); mUri.clear(); mUriHeaders.clear(); mFd = -1; @@ -72,6 +75,7 @@ void NuPlayer::GenericSource::resetDataSource() { mDecryptHandle = NULL; mDrmManagerClient = NULL; mStarted = false; + mStopRead = true; } status_t NuPlayer::GenericSource::setDataSource( @@ -105,6 +109,10 @@ status_t NuPlayer::GenericSource::setDataSource( return OK; } +sp<MetaData> NuPlayer::GenericSource::getFileFormatMeta() const { + return mFileMeta; +} + status_t NuPlayer::GenericSource::initFromDataSource() { sp<MediaExtractor> extractor; @@ -143,17 +151,22 @@ status_t NuPlayer::GenericSource::initFromDataSource() { checkDrmStatus(mDataSource); } - sp<MetaData> fileMeta = extractor->getMetaData(); - if (fileMeta != NULL) { + mFileMeta = extractor->getMetaData(); + if (mFileMeta != NULL) { int64_t duration; - if (fileMeta->findInt64(kKeyDuration, &duration)) { + if (mFileMeta->findInt64(kKeyDuration, &duration)) { mDurationUs = duration; } } int32_t totalBitrate = 0; - for (size_t i = 0; i < extractor->countTracks(); ++i) { + size_t numtracks = extractor->countTracks(); + if (numtracks == 0) { + return UNKNOWN_ERROR; + } + + for (size_t i = 0; i < numtracks; ++i) { sp<MediaSource> track = extractor->getTrack(i); sp<MetaData> meta = extractor->getTrackMetaData(i); @@ -169,6 +182,8 @@ status_t NuPlayer::GenericSource::initFromDataSource() { if (mAudioTrack.mSource == NULL) { mAudioTrack.mIndex = i; mAudioTrack.mSource = track; + mAudioTrack.mPackets = + new AnotherPacketSource(mAudioTrack.mSource->getFormat()); if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) { mAudioIsVorbis = true; @@ -180,6 +195,8 @@ status_t NuPlayer::GenericSource::initFromDataSource() { if (mVideoTrack.mSource == NULL) { mVideoTrack.mIndex = i; mVideoTrack.mSource = track; + mVideoTrack.mPackets = + new AnotherPacketSource(mVideoTrack.mSource->getFormat()); // check if the source requires secure buffers int32_t secure; @@ -270,10 +287,23 @@ void NuPlayer::GenericSource::onPrepareAsync() { // delayed data source creation if (mDataSource == NULL) { if (!mUri.empty()) { - mIsWidevine = !strncasecmp(mUri.c_str(), "widevine://", 11); + const char* uri = mUri.c_str(); + mIsWidevine = !strncasecmp(uri, "widevine://", 11); + + if (!strncasecmp("http://", uri, 7) + || !strncasecmp("https://", uri, 8) + || mIsWidevine) { + mHttpSource = DataSource::CreateMediaHTTP(mHTTPService); + if (mHttpSource == NULL) { + ALOGE("Failed to create http source!"); + notifyPreparedAndCleanup(UNKNOWN_ERROR); + return; + } + } mDataSource = DataSource::CreateFromURI( - mHTTPService, mUri.c_str(), &mUriHeaders, &mContentType); + mHTTPService, uri, &mUriHeaders, &mContentType, + static_cast<HTTPBase *>(mHttpSource.get())); } else { // set to false first, if the extractor // comes back as secure, set it to true then. @@ -346,6 +376,7 @@ void NuPlayer::GenericSource::notifyPreparedAndCleanup(status_t err) { mSniffedMIME = ""; mDataSource.clear(); mCachedSource.clear(); + mHttpSource.clear(); cancelPollBuffering(); } @@ -425,18 +456,15 @@ status_t NuPlayer::GenericSource::prefillCacheIfNecessary() { void NuPlayer::GenericSource::start() { ALOGI("start"); + mStopRead = false; if (mAudioTrack.mSource != NULL) { CHECK_EQ(mAudioTrack.mSource->start(), (status_t)OK); - mAudioTrack.mPackets = - new AnotherPacketSource(mAudioTrack.mSource->getFormat()); postReadBuffer(MEDIA_TRACK_TYPE_AUDIO); } if (mVideoTrack.mSource != NULL) { CHECK_EQ(mVideoTrack.mSource->start(), (status_t)OK); - mVideoTrack.mPackets = - new AnotherPacketSource(mVideoTrack.mSource->getFormat()); postReadBuffer(MEDIA_TRACK_TYPE_VIDEO); } @@ -449,6 +477,12 @@ void NuPlayer::GenericSource::stop() { // nothing to do, just account for DRM playback status setDrmPlaybackStatusIfNeeded(Playback::STOP, 0); mStarted = false; + if (mIsWidevine) { + // For a widevine source we need to prevent any further reads. + sp<AMessage> msg = new AMessage(kWhatStopWidevine, id()); + sp<AMessage> response; + (void) msg->postAndAwaitResponse(&response); + } } void NuPlayer::GenericSource::pause() { @@ -463,6 +497,17 @@ void NuPlayer::GenericSource::resume() { mStarted = true; } +void NuPlayer::GenericSource::disconnect() { + if (mDataSource != NULL) { + // disconnect data source + if (mDataSource->flags() & DataSource::kIsCachingDataSource) { + static_cast<NuCachedSource2 *>(mDataSource.get())->disconnect(); + } + } else if (mHttpSource != NULL) { + static_cast<HTTPBase *>(mHttpSource.get())->disconnect(); + } +} + void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) { if (mDecryptHandle != NULL) { mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position); @@ -656,6 +701,20 @@ void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatStopWidevine: + { + // mStopRead is only used for Widevine to prevent the video source + // from being read while the associated video decoder is shutting down. + mStopRead = true; + if (mVideoTrack.mSource != NULL) { + mVideoTrack.mPackets->clear(); + } + sp<AMessage> response = new AMessage; + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + response->postReply(replyID); + break; + } default: Source::onMessageReceived(msg); break; @@ -941,7 +1000,7 @@ status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) { ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex); sp<AMessage> msg = new AMessage(kWhatSelectTrack, id()); msg->setInt32("trackIndex", trackIndex); - msg->setInt32("select", trackIndex); + msg->setInt32("select", select); sp<AMessage> response; status_t err = msg->postAndAwaitResponse(&response); @@ -1063,6 +1122,11 @@ void NuPlayer::GenericSource::onSeek(sp<AMessage> msg) { } status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) { + // If the Widevine source is stopped, do not attempt to read any + // more buffers. + if (mStopRead) { + return INVALID_OPERATION; + } if (mVideoTrack.mSource != NULL) { int64_t actualTimeUs; readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs); @@ -1096,8 +1160,8 @@ sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer( if (mIsWidevine && !audio) { // data is already provided in the buffer ab = new ABuffer(NULL, mb->range_length()); - ab->meta()->setPointer("mediaBuffer", mb); mb->add_ref(); + ab->setMediaBufferBase(mb); } else { ab = new ABuffer(outLength); memcpy(ab->data(), @@ -1148,27 +1212,52 @@ sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer( } void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) { - sp<AMessage> msg = new AMessage(kWhatReadBuffer, id()); - msg->setInt32("trackType", trackType); - msg->post(); + Mutex::Autolock _l(mReadBufferLock); + + if ((mPendingReadBufferTypes & (1 << trackType)) == 0) { + mPendingReadBufferTypes |= (1 << trackType); + sp<AMessage> msg = new AMessage(kWhatReadBuffer, id()); + msg->setInt32("trackType", trackType); + msg->post(); + } } void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) { int32_t tmpType; CHECK(msg->findInt32("trackType", &tmpType)); media_track_type trackType = (media_track_type)tmpType; + { + // only protect the variable change, as readBuffer may + // take considerable time. This may result in one extra + // read being processed, but that is benign. + Mutex::Autolock _l(mReadBufferLock); + mPendingReadBufferTypes &= ~(1 << trackType); + } readBuffer(trackType); } void NuPlayer::GenericSource::readBuffer( media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) { + // Do not read data if Widevine source is stopped + if (mStopRead) { + return; + } Track *track; + size_t maxBuffers = 1; switch (trackType) { case MEDIA_TRACK_TYPE_VIDEO: track = &mVideoTrack; + if (mIsWidevine) { + maxBuffers = 2; + } break; case MEDIA_TRACK_TYPE_AUDIO: track = &mAudioTrack; + if (mIsWidevine) { + maxBuffers = 8; + } else { + maxBuffers = 64; + } break; case MEDIA_TRACK_TYPE_SUBTITLE: track = &mSubtitleTrack; @@ -1201,7 +1290,7 @@ void NuPlayer::GenericSource::readBuffer( options.setNonBlocking(); } - for (;;) { + for (size_t numBuffers = 0; numBuffers < maxBuffers; ) { MediaBuffer *mbuf; status_t err = track->mSource->read(&mbuf, &options); @@ -1232,7 +1321,9 @@ void NuPlayer::GenericSource::readBuffer( sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType, actualTimeUs); track->mPackets->queueAccessUnit(buffer); - break; + formatChange = false; + seeking = false; + ++numBuffers; } else if (err == WOULD_BLOCK) { break; } else if (err == INFO_FORMAT_CHANGED) { diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h index 50ff98a..f8601ea 100644 --- a/media/libmediaplayerservice/nuplayer/GenericSource.h +++ b/media/libmediaplayerservice/nuplayer/GenericSource.h @@ -55,8 +55,12 @@ struct NuPlayer::GenericSource : public NuPlayer::Source { virtual void pause(); virtual void resume(); + virtual void disconnect(); + virtual status_t feedMoreTSData(); + virtual sp<MetaData> getFileFormatMeta() const; + virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit); virtual status_t getDuration(int64_t *durationUs); @@ -89,6 +93,7 @@ private: kWhatSelectTrack, kWhatSeek, kWhatReadBuffer, + kWhatStopWidevine, }; Vector<sp<MediaSource> > mSources; @@ -122,15 +127,20 @@ private: sp<DataSource> mDataSource; sp<NuCachedSource2> mCachedSource; + sp<DataSource> mHttpSource; sp<WVMExtractor> mWVMExtractor; + sp<MetaData> mFileMeta; DrmManagerClient *mDrmManagerClient; sp<DecryptHandle> mDecryptHandle; bool mStarted; + bool mStopRead; String8 mContentType; AString mSniffedMIME; off64_t mMetaDataSize; int64_t mBitrate; int32_t mPollBufferingGeneration; + uint32_t mPendingReadBufferTypes; + mutable Mutex mReadBufferLock; sp<ALooper> mLooper; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp index c8bf8f0..53eec91 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp @@ -50,6 +50,10 @@ namespace android { +// TODO optimize buffer size for power consumption +// The offload read buffer size is 32 KB but 24 KB uses less power. +const size_t NuPlayer::kAggregateBufferSizeBytes = 24 * 1024; + struct NuPlayer::Action : public RefBase { Action() {} @@ -60,16 +64,18 @@ private: }; struct NuPlayer::SeekAction : public Action { - SeekAction(int64_t seekTimeUs) - : mSeekTimeUs(seekTimeUs) { + SeekAction(int64_t seekTimeUs, bool needNotify) + : mSeekTimeUs(seekTimeUs), + mNeedNotify(needNotify) { } virtual void execute(NuPlayer *player) { - player->performSeek(mSeekTimeUs); + player->performSeek(mSeekTimeUs, mNeedNotify); } private: int64_t mSeekTimeUs; + bool mNeedNotify; DISALLOW_EVIL_CONSTRUCTORS(SeekAction); }; @@ -151,6 +157,7 @@ NuPlayer::NuPlayer() mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), mAudioDecoderGeneration(0), mVideoDecoderGeneration(0), + mRendererGeneration(0), mAudioEOS(false), mVideoEOS(false), mScanSourcesPending(false), @@ -306,12 +313,23 @@ void NuPlayer::resume() { } void NuPlayer::resetAsync() { + if (mSource != NULL) { + // During a reset, the data source might be unresponsive already, we need to + // disconnect explicitly so that reads exit promptly. + // We can't queue the disconnect request to the looper, as it might be + // queued behind a stuck read and never gets processed. + // Doing a disconnect outside the looper to allows the pending reads to exit + // (either successfully or with error). + mSource->disconnect(); + } + (new AMessage(kWhatReset, id()))->post(); } -void NuPlayer::seekToAsync(int64_t seekTimeUs) { +void NuPlayer::seekToAsync(int64_t seekTimeUs, bool needNotify) { sp<AMessage> msg = new AMessage(kWhatSeek, id()); msg->setInt64("seekTimeUs", seekTimeUs); + msg->setInt32("needNotify", needNotify); msg->post(); } @@ -541,12 +559,13 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { static_cast<NativeWindowWrapper *>(obj.get()))); if (obj != NULL) { - if (mStarted && mVideoDecoder != NULL) { + if (mStarted && mSource->getFormat(false /* audio */) != NULL) { // Issue a seek to refresh the video screen only if started otherwise // the extractor may not yet be started and will assert. // If the video decoder is not set (perhaps audio only in this case) // do not perform a seek as it is not needed. - mDeferredActions.push_back(new SeekAction(mCurrentPositionUs)); + mDeferredActions.push_back( + new SeekAction(mCurrentPositionUs, false /* needNotify */)); } // If there is a new surface texture, instantiate decoders @@ -619,16 +638,23 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { flags |= Renderer::FLAG_OFFLOAD_AUDIO; } - mRenderer = new Renderer( - mAudioSink, - new AMessage(kWhatRendererNotify, id()), - flags); + sp<AMessage> notify = new AMessage(kWhatRendererNotify, id()); + ++mRendererGeneration; + notify->setInt32("generation", mRendererGeneration); + mRenderer = new Renderer(mAudioSink, notify, flags); mRendererLooper = new ALooper; mRendererLooper->setName("NuPlayerRenderer"); mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO); mRendererLooper->registerHandler(mRenderer); + sp<MetaData> meta = getFileMeta(); + int32_t rate; + if (meta != NULL + && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) { + mRenderer->setVideoFrameRate(rate); + } + postScanSources(); break; } @@ -730,7 +756,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { if (err == -EWOULDBLOCK) { if (mSource->feedMoreTSData() == OK) { - msg->post(10000ll); + msg->post(10 * 1000ll); } } } else if (what == Decoder::kWhatEOS) { @@ -765,6 +791,11 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video"); + // Widevine source reads must stop before releasing the video decoder. + if (!audio && mSource != NULL && mSourceFlags & Source::FLAG_SECURE) { + mSource->stop(); + } + getDecoder(audio)->initiateShutdown(); if (audio) { @@ -792,11 +823,13 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { ALOGV("%s shutdown completed", audio ? "audio" : "video"); if (audio) { mAudioDecoder.clear(); + ++mAudioDecoderGeneration; CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER); mFlushingAudio = SHUT_DOWN; } else { mVideoDecoder.clear(); + ++mVideoDecoderGeneration; CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER); mFlushingVideo = SHUT_DOWN; @@ -804,22 +837,51 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { finishFlushIfPossible(); } else if (what == Decoder::kWhatError) { - ALOGE("Received error from %s decoder, aborting playback.", - audio ? "audio" : "video"); - status_t err; - if (!msg->findInt32("err", &err)) { + if (!msg->findInt32("err", &err) || err == OK) { err = UNKNOWN_ERROR; } - mRenderer->queueEOS(audio, err); - if (audio && mFlushingAudio != NONE) { - mAudioDecoder.clear(); - mFlushingAudio = SHUT_DOWN; - } else if (!audio && mFlushingVideo != NONE){ - mVideoDecoder.clear(); - mFlushingVideo = SHUT_DOWN; + + // Decoder errors can be due to Source (e.g. from streaming), + // or from decoding corrupted bitstreams, or from other decoder + // MediaCodec operations (e.g. from an ongoing reset or seek). + // + // We try to gracefully shut down the affected decoder if possible, + // rather than trying to force the shutdown with something + // similar to performReset(). This method can lead to a hang + // if MediaCodec functions block after an error, but they should + // typically return INVALID_OPERATION instead of blocking. + + FlushStatus *flushing = audio ? &mFlushingAudio : &mFlushingVideo; + ALOGE("received error(%#x) from %s decoder, flushing(%d), now shutting down", + err, audio ? "audio" : "video", *flushing); + + switch (*flushing) { + case NONE: + mDeferredActions.push_back( + new ShutdownDecoderAction(audio, !audio /* video */)); + processDeferredActions(); + break; + case FLUSHING_DECODER: + *flushing = FLUSHING_DECODER_SHUTDOWN; // initiate shutdown after flush. + break; // Wait for flush to complete. + case FLUSHING_DECODER_SHUTDOWN: + break; // Wait for flush to complete. + case SHUTTING_DOWN_DECODER: + break; // Wait for shutdown to complete. + case FLUSHED: + // Widevine source reads must stop before releasing the video decoder. + if (!audio && mSource != NULL && mSourceFlags & Source::FLAG_SECURE) { + mSource->stop(); + } + getDecoder(audio)->initiateShutdown(); // In the middle of a seek. + *flushing = SHUTTING_DOWN_DECODER; // Shut down. + break; + case SHUT_DOWN: + finishFlushIfPossible(); // Should not occur. + break; // Finish anyways. } - finishFlushIfPossible(); + notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err); } else if (what == Decoder::kWhatDrainThisBuffer) { renderBuffer(audio, msg); } else { @@ -836,6 +898,14 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { case kWhatRendererNotify: { + int32_t requesterGeneration = mRendererGeneration - 1; + CHECK(msg->findInt32("generation", &requesterGeneration)); + if (requesterGeneration != mRendererGeneration) { + ALOGV("got message from old renderer, generation(%d:%d)", + requesterGeneration, mRendererGeneration); + return; + } + int32_t what; CHECK(msg->findInt32("what", &what)); @@ -896,8 +966,11 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { ALOGV("Tear down audio offload, fall back to s/w path"); int64_t positionUs; CHECK(msg->findInt64("positionUs", &positionUs)); + int32_t reason; + CHECK(msg->findInt32("reason", &reason)); closeAudioSink(); mAudioDecoder.clear(); + ++mAudioDecoderGeneration; mRenderer->flush(true /* audio */); if (mVideoDecoder != NULL) { mRenderer->flush(false /* audio */); @@ -905,8 +978,10 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { mRenderer->signalDisableOffloadAudio(); mOffloadAudio = false; - performSeek(positionUs); - instantiateDecoder(true /* audio */, &mAudioDecoder); + performSeek(positionUs, false /* needNotify */); + if (reason == Renderer::kDueToError) { + instantiateDecoder(true /* audio */, &mAudioDecoder); + } } break; } @@ -934,14 +1009,18 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { case kWhatSeek: { int64_t seekTimeUs; + int32_t needNotify; CHECK(msg->findInt64("seekTimeUs", &seekTimeUs)); + CHECK(msg->findInt32("needNotify", &needNotify)); - ALOGV("kWhatSeek seekTimeUs=%lld us", seekTimeUs); + ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d", + seekTimeUs, needNotify); mDeferredActions.push_back( new SimpleAction(&NuPlayer::performDecoderFlush)); - mDeferredActions.push_back(new SeekAction(seekTimeUs)); + mDeferredActions.push_back( + new SeekAction(seekTimeUs, needNotify)); processDeferredActions(); break; @@ -949,17 +1028,36 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { case kWhatPause: { - CHECK(mRenderer != NULL); - mSource->pause(); - mRenderer->pause(); + if (mSource != NULL) { + mSource->pause(); + } else { + ALOGW("pause called when source is gone or not set"); + } + if (mRenderer != NULL) { + mRenderer->pause(); + } else { + ALOGW("pause called when renderer is gone or not set"); + } break; } case kWhatResume: { - CHECK(mRenderer != NULL); - mSource->resume(); - mRenderer->resume(); + if (mSource != NULL) { + mSource->resume(); + } else { + ALOGW("resume called when source is gone or not set"); + } + // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if + // needed. + if (audioDecoderStillNeeded() && mAudioDecoder == NULL) { + instantiateDecoder(true /* audio */, &mAudioDecoder); + } + if (mRenderer != NULL) { + mRenderer->resume(); + } else { + ALOGW("resume called when renderer is gone or not set"); + } break; } @@ -981,6 +1079,11 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { } } +bool NuPlayer::audioDecoderStillNeeded() { + // Audio decoder is no longer needed if it's in shut/shutting down status. + return ((mFlushingAudio != SHUT_DOWN) && (mFlushingAudio != SHUTTING_DOWN_DECODER)); +} + void NuPlayer::finishFlushIfPossible() { if (mFlushingAudio != NONE && mFlushingAudio != FLUSHED && mFlushingAudio != SHUT_DOWN) { @@ -995,6 +1098,7 @@ void NuPlayer::finishFlushIfPossible() { ALOGV("both audio and video are flushed now."); mPendingAudioAccessUnit.clear(); + mAggregateBuffer.clear(); if (mTimeDiscontinuityPending) { mRenderer->signalTimeDiscontinuity(); @@ -1244,7 +1348,8 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { CHECK(msg->findMessage("reply", &reply)); if ((audio && mFlushingAudio != NONE) - || (!audio && mFlushingVideo != NONE)) { + || (!audio && mFlushingVideo != NONE) + || mSource == NULL) { reply->setInt32("err", INFO_DISCONTINUITY); reply->post(); return OK; @@ -1254,15 +1359,9 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { // Aggregate smaller buffers into a larger buffer. // The goal is to reduce power consumption. - // Unfortunately this does not work with the software AAC decoder. - // TODO optimize buffer size for power consumption - // The offload read buffer size is 32 KB but 24 KB uses less power. - const int kAudioBigBufferSizeBytes = 24 * 1024; + // Note this will not work if the decoder requires one frame per buffer. bool doBufferAggregation = (audio && mOffloadAudio); - sp<ABuffer> biggerBuffer; bool needMoreData = false; - int numSmallBuffers = 0; - bool gotTime = false; bool dropAccessUnit; do { @@ -1278,15 +1377,10 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { } if (err == -EWOULDBLOCK) { - ALOGD("feedDecoderInputData() got EWOULDBLOCK"); - if (biggerBuffer == NULL) { - return err; - } else { - break; // Reply with data that we already have. - } + return err; } else if (err != OK) { if (err == INFO_DISCONTINUITY) { - if (biggerBuffer != NULL) { + if (doBufferAggregation && (mAggregateBuffer != NULL)) { // We already have some data so save this for later. mPendingAudioErr = err; mPendingAudioAccessUnit = accessUnit; @@ -1401,46 +1495,45 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { size_t smallSize = accessUnit->size(); needMoreData = false; - if (doBufferAggregation && (biggerBuffer == NULL) + if (doBufferAggregation && (mAggregateBuffer == NULL) // Don't bother if only room for a few small buffers. - && (smallSize < (kAudioBigBufferSizeBytes / 3))) { + && (smallSize < (kAggregateBufferSizeBytes / 3))) { // Create a larger buffer for combining smaller buffers from the extractor. - biggerBuffer = new ABuffer(kAudioBigBufferSizeBytes); - biggerBuffer->setRange(0, 0); // start empty + mAggregateBuffer = new ABuffer(kAggregateBufferSizeBytes); + mAggregateBuffer->setRange(0, 0); // start empty } - if (biggerBuffer != NULL) { + if (doBufferAggregation && (mAggregateBuffer != NULL)) { int64_t timeUs; + int64_t dummy; bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs); + bool bigTimestampValid = mAggregateBuffer->meta()->findInt64("timeUs", &dummy); // Will the smaller buffer fit? - size_t bigSize = biggerBuffer->size(); - size_t roomLeft = biggerBuffer->capacity() - bigSize; + size_t bigSize = mAggregateBuffer->size(); + size_t roomLeft = mAggregateBuffer->capacity() - bigSize; // Should we save this small buffer for the next big buffer? // If the first small buffer did not have a timestamp then save // any buffer that does have a timestamp until the next big buffer. if ((smallSize > roomLeft) - || (!gotTime && (numSmallBuffers > 0) && smallTimestampValid)) { + || (!bigTimestampValid && (bigSize > 0) && smallTimestampValid)) { mPendingAudioErr = err; mPendingAudioAccessUnit = accessUnit; accessUnit.clear(); } else { + // Grab time from first small buffer if available. + if ((bigSize == 0) && smallTimestampValid) { + mAggregateBuffer->meta()->setInt64("timeUs", timeUs); + } // Append small buffer to the bigger buffer. - memcpy(biggerBuffer->base() + bigSize, accessUnit->data(), smallSize); + memcpy(mAggregateBuffer->base() + bigSize, accessUnit->data(), smallSize); bigSize += smallSize; - biggerBuffer->setRange(0, bigSize); + mAggregateBuffer->setRange(0, bigSize); - // Keep looping until we run out of room in the biggerBuffer. + // Keep looping until we run out of room in the mAggregateBuffer. needMoreData = true; - // Grab time from first small buffer if available. - if ((numSmallBuffers == 0) && smallTimestampValid) { - biggerBuffer->meta()->setInt64("timeUs", timeUs); - gotTime = true; - } - - ALOGV("feedDecoderInputData() #%d, smallSize = %zu, bigSize = %zu, capacity = %zu", - numSmallBuffers, smallSize, bigSize, biggerBuffer->capacity()); - numSmallBuffers++; + ALOGV("feedDecoderInputData() smallSize = %zu, bigSize = %zu, capacity = %zu", + smallSize, bigSize, mAggregateBuffer->capacity()); } } } while (dropAccessUnit || needMoreData); @@ -1459,9 +1552,11 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { mCCDecoder->decode(accessUnit); } - if (biggerBuffer != NULL) { - ALOGV("feedDecoderInputData() reply with aggregated buffer, %d", numSmallBuffers); - reply->setBuffer("buffer", biggerBuffer); + if (doBufferAggregation && (mAggregateBuffer != NULL)) { + ALOGV("feedDecoderInputData() reply with aggregated buffer, %zu", + mAggregateBuffer->size()); + reply->setBuffer("buffer", mAggregateBuffer); + mAggregateBuffer.clear(); } else { reply->setBuffer("buffer", accessUnit); } @@ -1724,6 +1819,10 @@ status_t NuPlayer::selectTrack(size_t trackIndex, bool select) { return err; } +sp<MetaData> NuPlayer::getFileMeta() { + return mSource->getFileFormatMeta(); +} + void NuPlayer::schedulePollDuration() { sp<AMessage> msg = new AMessage(kWhatPollDuration, id()); msg->setInt32("generation", mPollDurationGeneration); @@ -1757,10 +1856,11 @@ void NuPlayer::processDeferredActions() { } } -void NuPlayer::performSeek(int64_t seekTimeUs) { - ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)", +void NuPlayer::performSeek(int64_t seekTimeUs, bool needNotify) { + ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), needNotify(%d)", seekTimeUs, - seekTimeUs / 1E6); + seekTimeUs / 1E6, + needNotify); if (mSource == NULL) { // This happens when reset occurs right before the loop mode @@ -1777,7 +1877,9 @@ void NuPlayer::performSeek(int64_t seekTimeUs) { sp<NuPlayerDriver> driver = mDriver.promote(); if (driver != NULL) { driver->notifyPosition(seekTimeUs); - driver->notifySeekComplete(); + if (needNotify) { + driver->notifySeekComplete(); + } } } @@ -1840,6 +1942,7 @@ void NuPlayer::performReset() { mRendererLooper.clear(); } mRenderer.clear(); + ++mRendererGeneration; if (mSource != NULL) { mSource->stop(); diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h index 89ae11c..8157733 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h @@ -59,14 +59,19 @@ struct NuPlayer : public AHandler { // Will notify the driver through "notifyResetComplete" once finished. void resetAsync(); - // Will notify the driver through "notifySeekComplete" once finished. - void seekToAsync(int64_t seekTimeUs); + // Will notify the driver through "notifySeekComplete" once finished + // and needNotify is true. + void seekToAsync(int64_t seekTimeUs, bool needNotify = false); status_t setVideoScalingMode(int32_t mode); status_t getTrackInfo(Parcel* reply) const; status_t getSelectedTrack(int32_t type, Parcel* reply) const; status_t selectTrack(size_t trackIndex, bool select); + sp<MetaData> getFileMeta(); + + static const size_t kAggregateBufferSizeBytes; + protected: virtual ~NuPlayer(); @@ -133,6 +138,7 @@ private: sp<ALooper> mRendererLooper; int32_t mAudioDecoderGeneration; int32_t mVideoDecoderGeneration; + int32_t mRendererGeneration; List<sp<Action> > mDeferredActions; @@ -158,8 +164,11 @@ private: // notion of time has changed. bool mTimeDiscontinuityPending; + // Used by feedDecoderInputData to aggregate small buffers into + // one large buffer. sp<ABuffer> mPendingAudioAccessUnit; status_t mPendingAudioErr; + sp<ABuffer> mAggregateBuffer; FlushStatus mFlushingAudio; FlushStatus mFlushingVideo; @@ -194,6 +203,8 @@ private: void finishFlushIfPossible(); + bool audioDecoderStillNeeded(); + void flushDecoder( bool audio, bool needShutdown, const sp<AMessage> &newFormat = NULL); void updateDecoderFormatWithoutFlush(bool audio, const sp<AMessage> &format); @@ -207,7 +218,7 @@ private: void processDeferredActions(); - void performSeek(int64_t seekTimeUs); + void performSeek(int64_t seekTimeUs, bool needNotify); void performDecoderFlush(); void performDecoderShutdown(bool audio, bool video); void performReset(); diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp index 8ce7baf..27f6131 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp @@ -53,6 +53,10 @@ NuPlayer::Decoder::Decoder( } NuPlayer::Decoder::~Decoder() { + mDecoderLooper->unregisterHandler(id()); + mDecoderLooper->stop(); + + releaseAndResetMediaBuffers(); } static @@ -122,17 +126,22 @@ void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) { mCodec->getName(&mComponentName); + status_t err; if (mNativeWindow != NULL) { // disconnect from surface as MediaCodec will reconnect - CHECK_EQ((int)NO_ERROR, - native_window_api_disconnect( - surface.get(), - NATIVE_WINDOW_API_MEDIA)); + err = native_window_api_disconnect( + surface.get(), NATIVE_WINDOW_API_MEDIA); + // We treat this as a warning, as this is a preparatory step. + // Codec will try to connect to the surface, which is where + // any error signaling will occur. + ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err); } - status_t err = mCodec->configure( + err = mCodec->configure( format, surface, NULL /* crypto */, 0 /* flags */); if (err != OK) { ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err); + mCodec->release(); + mCodec.clear(); handleError(err); return; } @@ -145,6 +154,8 @@ void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) { err = mCodec->start(); if (err != OK) { ALOGE("Failed to start %s decoder (err=%d)", mComponentName.c_str(), err); + mCodec->release(); + mCodec.clear(); handleError(err); return; } @@ -178,6 +189,8 @@ void NuPlayer::Decoder::releaseAndResetMediaBuffers() { for (size_t i = 0; i < mInputBufferIsDequeued.size(); i++) { mInputBufferIsDequeued.editItemAt(i) = false; } + + mPendingInputMessages.clear(); } void NuPlayer::Decoder::requestCodecNotification() { @@ -220,7 +233,11 @@ status_t NuPlayer::Decoder::getInputBuffers(Vector<sp<ABuffer> > *buffers) const void NuPlayer::Decoder::handleError(int32_t err) { - mCodec->release(); + // We cannot immediately release the codec due to buffers still outstanding + // in the renderer. We signal to the player the error so it can shutdown/release the + // decoder after flushing and increment the generation to discard unnecessary messages. + + ++mBufferGeneration; sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatError); @@ -235,6 +252,8 @@ bool NuPlayer::Decoder::handleAnInputBuffer() { mComponentName.c_str(), res == OK ? (int)bufferIx : res); if (res != OK) { if (res != -EAGAIN) { + ALOGE("Failed to dequeue input buffer for %s (err=%d)", + mComponentName.c_str(), res); handleError(res); } return false; @@ -257,7 +276,19 @@ bool NuPlayer::Decoder::handleAnInputBuffer() { ALOGI("[%s] resubmitting CSD", mComponentName.c_str()); reply->setBuffer("buffer", buffer); mCSDsToSubmit.removeAt(0); - reply->post(); + CHECK(onInputBufferFilled(reply)); + return true; + } + + while (!mPendingInputMessages.empty()) { + sp<AMessage> msg = *mPendingInputMessages.begin(); + if (!onInputBufferFilled(msg)) { + break; + } + mPendingInputMessages.erase(mPendingInputMessages.begin()); + } + + if (!mInputBufferIsDequeued.editItemAt(bufferIx)) { return true; } @@ -269,7 +300,7 @@ bool NuPlayer::Decoder::handleAnInputBuffer() { return true; } -void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { +bool android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { size_t bufferIx; CHECK(msg->findSize("buffer-ix", &bufferIx)); CHECK_LT(bufferIx, mInputBuffers.size()); @@ -280,23 +311,21 @@ void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { // handle widevine classic source - that fills an arbitrary input buffer MediaBuffer *mediaBuffer = NULL; - if (hasBuffer && buffer->meta()->findPointer( - "mediaBuffer", (void **)&mediaBuffer)) { - if (mediaBuffer == NULL) { - // received no actual buffer - ALOGW("[%s] received null MediaBuffer %s", - mComponentName.c_str(), msg->debugString().c_str()); - buffer = NULL; - } else { + if (hasBuffer) { + mediaBuffer = (MediaBuffer *)(buffer->getMediaBufferBase()); + if (mediaBuffer != NULL) { // likely filled another buffer than we requested: adjust buffer index size_t ix; for (ix = 0; ix < mInputBuffers.size(); ix++) { const sp<ABuffer> &buf = mInputBuffers[ix]; if (buf->data() == mediaBuffer->data()) { // all input buffers are dequeued on start, hence the check - CHECK(mInputBufferIsDequeued[ix]); - ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu", - mComponentName.c_str(), ix, bufferIx); + if (!mInputBufferIsDequeued[ix]) { + ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu", + mComponentName.c_str(), ix, bufferIx); + mediaBuffer->release(); + return false; + } // TRICKY: need buffer for the metadata, so instead, set // codecBuffer to the same (though incorrect) buffer to @@ -313,7 +342,7 @@ void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { } } - mInputBufferIsDequeued.editItemAt(bufferIx) = false; + if (buffer == NULL /* includes !hasBuffer */) { int32_t streamErr = ERROR_END_OF_STREAM; @@ -321,7 +350,7 @@ void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { if (streamErr == OK) { /* buffers are returned to hold on to */ - return; + return true; } // attempt to queue EOS @@ -331,12 +360,18 @@ void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { 0, 0, MediaCodec::BUFFER_FLAG_EOS); - if (streamErr == ERROR_END_OF_STREAM && err != OK) { + if (err == OK) { + mInputBufferIsDequeued.editItemAt(bufferIx) = false; + } else if (streamErr == ERROR_END_OF_STREAM) { streamErr = err; // err will not be ERROR_END_OF_STREAM } if (streamErr != ERROR_END_OF_STREAM) { + ALOGE("Stream error for %s (err=%d), EOS %s queued", + mComponentName.c_str(), + streamErr, + err == OK ? "successfully" : "unsuccessfully"); handleError(streamErr); } } else { @@ -366,16 +401,21 @@ void android::NuPlayer::Decoder::onInputBufferFilled(const sp<AMessage> &msg) { timeUs, flags); if (err != OK) { + if (mediaBuffer != NULL) { + mediaBuffer->release(); + } ALOGE("Failed to queue input buffer for %s (err=%d)", mComponentName.c_str(), err); handleError(err); - } - - if (mediaBuffer != NULL) { - CHECK(mMediaBuffers[bufferIx] == NULL); - mMediaBuffers.editItemAt(bufferIx) = mediaBuffer; + } else { + mInputBufferIsDequeued.editItemAt(bufferIx) = false; + if (mediaBuffer != NULL) { + CHECK(mMediaBuffers[bufferIx] == NULL); + mMediaBuffers.editItemAt(bufferIx) = mediaBuffer; + } } } + return true; } bool NuPlayer::Decoder::handleAnOutputBuffer() { @@ -424,6 +464,8 @@ bool NuPlayer::Decoder::handleAnOutputBuffer() { return true; } else if (res != OK) { if (res != -EAGAIN) { + ALOGE("Failed to dequeue output buffer for %s (err=%d)", + mComponentName.c_str(), res); handleError(res); } return false; @@ -467,7 +509,9 @@ void NuPlayer::Decoder::onRenderBuffer(const sp<AMessage> &msg) { size_t bufferIx; CHECK(msg->findSize("buffer-ix", &bufferIx)); if (msg->findInt32("render", &render) && render) { - err = mCodec->renderOutputBufferAndRelease(bufferIx); + int64_t timestampNs; + CHECK(msg->findInt64("timestampNs", ×tampNs)); + err = mCodec->renderOutputBufferAndRelease(bufferIx, timestampNs); } else { err = mCodec->releaseOutputBuffer(bufferIx); } @@ -489,9 +533,9 @@ void NuPlayer::Decoder::onFlush() { if (err != OK) { ALOGE("failed to flush %s (err=%d)", mComponentName.c_str(), err); handleError(err); - return; + // finish with posting kWhatFlushCompleted. + // we attempt to release the buffers even if flush fails. } - releaseAndResetMediaBuffers(); sp<AMessage> notify = mNotify->dup(); @@ -529,7 +573,7 @@ void NuPlayer::Decoder::onShutdown() { if (err != OK) { ALOGE("failed to release %s (err=%d)", mComponentName.c_str(), err); handleError(err); - return; + // finish with posting kWhatShutdownCompleted. } sp<AMessage> notify = mNotify->dup(); @@ -578,13 +622,21 @@ void NuPlayer::Decoder::onMessageReceived(const sp<AMessage> &msg) { case kWhatCodecNotify: { if (!isStaleReply(msg)) { - if (!mPaused) { - while (handleAnInputBuffer()) { - } + int32_t numInput, numOutput; + + if (!msg->findInt32("input-buffers", &numInput)) { + numInput = INT32_MAX; } - while (handleAnOutputBuffer()) { + if (!msg->findInt32("output-buffers", &numOutput)) { + numOutput = INT32_MAX; } + + if (!mPaused) { + while (numInput-- > 0 && handleAnInputBuffer()) {} + } + + while (numOutput-- > 0 && handleAnOutputBuffer()) {} } requestCodecNotification(); @@ -594,8 +646,12 @@ void NuPlayer::Decoder::onMessageReceived(const sp<AMessage> &msg) { case kWhatInputBufferFilled: { if (!isStaleReply(msg)) { - onInputBufferFilled(msg); + if (!mPendingInputMessages.empty() + || !onInputBufferFilled(msg)) { + mPendingInputMessages.push_back(msg); + } } + break; } diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h index cc1bdff..dba3eee 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h @@ -80,6 +80,8 @@ private: sp<ALooper> mCodecLooper; sp<ALooper> mDecoderLooper; + List<sp<AMessage> > mPendingInputMessages; + Vector<sp<ABuffer> > mInputBuffers; Vector<sp<ABuffer> > mOutputBuffers; Vector<sp<ABuffer> > mCSDsForCurrentFormat; @@ -98,7 +100,7 @@ private: void onConfigure(const sp<AMessage> &format); void onFlush(); void onResume(); - void onInputBufferFilled(const sp<AMessage> &msg); + bool onInputBufferFilled(const sp<AMessage> &msg); void onRenderBuffer(const sp<AMessage> &msg); void onShutdown(); diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp index c9be0dd..f7aacdd 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp @@ -30,8 +30,10 @@ namespace android { -static const int kMaxPendingBuffers = 10; -static const int kMaxCachedBytes = 200000; +static const size_t kMaxCachedBytes = 200000; +// The buffers will contain a bit less than kAggregateBufferSizeBytes. +// So we can start off with just enough buffers to keep the cache full. +static const size_t kMaxPendingBuffers = 1 + (kMaxCachedBytes / NuPlayer::kAggregateBufferSizeBytes); NuPlayer::DecoderPassThrough::DecoderPassThrough( const sp<AMessage> ¬ify) @@ -39,7 +41,8 @@ NuPlayer::DecoderPassThrough::DecoderPassThrough( mNotify(notify), mBufferGeneration(0), mReachedEOS(true), - mPendingBuffers(0), + mPendingBuffersToFill(0), + mPendingBuffersToDrain(0), mCachedBytes(0), mComponentName("pass through decoder") { mDecoderLooper = new ALooper; @@ -79,12 +82,13 @@ bool NuPlayer::DecoderPassThrough::supportsSeamlessFormatChange( void NuPlayer::DecoderPassThrough::onConfigure(const sp<AMessage> &format) { ALOGV("[%s] onConfigure", mComponentName.c_str()); - mPendingBuffers = 0; mCachedBytes = 0; + mPendingBuffersToFill = 0; + mPendingBuffersToDrain = 0; mReachedEOS = false; ++mBufferGeneration; - requestABuffer(); + requestMaxBuffers(); sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatOutputFormatChanged); @@ -98,12 +102,15 @@ bool NuPlayer::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) { return generation != mBufferGeneration; } -void NuPlayer::DecoderPassThrough::requestABuffer() { - if (mCachedBytes >= kMaxCachedBytes || mReachedEOS) { - ALOGV("[%s] mReachedEOS=%d, max pending buffers(%d:%d)", - mComponentName.c_str(), (mReachedEOS ? 1 : 0), - mPendingBuffers, kMaxPendingBuffers); - return; +bool NuPlayer::DecoderPassThrough::requestABuffer() { + if (mCachedBytes >= kMaxCachedBytes) { + ALOGV("[%s] mCachedBytes = %zu", + mComponentName.c_str(), mCachedBytes); + return false; + } + if (mReachedEOS) { + ALOGV("[%s] reached EOS", mComponentName.c_str()); + return false; } sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, id()); @@ -113,16 +120,16 @@ void NuPlayer::DecoderPassThrough::requestABuffer() { notify->setInt32("what", kWhatFillThisBuffer); notify->setMessage("reply", reply); notify->post(); - mPendingBuffers++; + mPendingBuffersToFill++; + ALOGV("requestABuffer: #ToFill = %zu, #ToDrain = %zu", mPendingBuffersToFill, + mPendingBuffersToDrain); - sp<AMessage> message = new AMessage(kWhatRequestABuffer, id()); - message->setInt32("generation", mBufferGeneration); - message->post(); - return; + return true; } void android::NuPlayer::DecoderPassThrough::onInputBufferFilled( const sp<AMessage> &msg) { + --mPendingBuffersToFill; if (mReachedEOS) { return; } @@ -150,14 +157,17 @@ void android::NuPlayer::DecoderPassThrough::onInputBufferFilled( notify->setBuffer("buffer", buffer); notify->setMessage("reply", reply); notify->post(); + ++mPendingBuffersToDrain; + ALOGV("onInputBufferFilled: #ToFill = %zu, #ToDrain = %zu, cachedBytes = %zu", + mPendingBuffersToFill, mPendingBuffersToDrain, mCachedBytes); } void NuPlayer::DecoderPassThrough::onBufferConsumed(int32_t size) { - mPendingBuffers--; + --mPendingBuffersToDrain; mCachedBytes -= size; - sp<AMessage> message = new AMessage(kWhatRequestABuffer, id()); - message->setInt32("generation", mBufferGeneration); - message->post(); + ALOGV("onBufferConsumed: #ToFill = %zu, #ToDrain = %zu, cachedBytes = %zu", + mPendingBuffersToFill, mPendingBuffersToDrain, mCachedBytes); + requestABuffer(); } void NuPlayer::DecoderPassThrough::onFlush() { @@ -166,11 +176,20 @@ void NuPlayer::DecoderPassThrough::onFlush() { sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatFlushCompleted); notify->post(); - mPendingBuffers = 0; + mPendingBuffersToFill = 0; + mPendingBuffersToDrain = 0; mCachedBytes = 0; mReachedEOS = false; } +void NuPlayer::DecoderPassThrough::requestMaxBuffers() { + for (size_t i = 0; i < kMaxPendingBuffers; i++) { + if (!requestABuffer()) { + break; + } + } +} + void NuPlayer::DecoderPassThrough::onShutdown() { ++mBufferGeneration; @@ -228,7 +247,7 @@ void NuPlayer::DecoderPassThrough::onMessageReceived(const sp<AMessage> &msg) { case kWhatResume: { - requestABuffer(); + requestMaxBuffers(); break; } diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h index 8590856..fb20257 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h @@ -55,19 +55,26 @@ private: sp<AMessage> mNotify; sp<ALooper> mDecoderLooper; - void requestABuffer(); + /** Returns true if a buffer was requested. + * Returns false if at EOS or cache already full. + */ + bool requestABuffer(); bool isStaleReply(const sp<AMessage> &msg); void onConfigure(const sp<AMessage> &format); void onFlush(); void onInputBufferFilled(const sp<AMessage> &msg); void onBufferConsumed(int32_t size); + void requestMaxBuffers(); void onShutdown(); int32_t mBufferGeneration; - bool mReachedEOS; - int32_t mPendingBuffers; - int32_t mCachedBytes; + bool mReachedEOS; + // TODO mPendingBuffersToFill and mPendingBuffersToDrain are only for + // debugging. They can be removed when the power investigation is done. + size_t mPendingBuffersToFill; + size_t mPendingBuffersToDrain; + size_t mCachedBytes; AString mComponentName; DISALLOW_EVIL_CONSTRUCTORS(DecoderPassThrough); diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp index 35cd514..1a01d52 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp @@ -27,6 +27,7 @@ #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/ALooper.h> #include <media/stagefright/MetaData.h> +#include <media/stagefright/Utils.h> namespace android { @@ -45,7 +46,9 @@ NuPlayerDriver::NuPlayerDriver() mPlayerFlags(0), mAtEOS(false), mLooping(false), + mAutoLoop(false), mStartupSeekTimeUs(-1) { + ALOGV("NuPlayerDriver(%p)", this); mLooper->setName("NuPlayerDriver Looper"); mLooper->start( @@ -60,6 +63,7 @@ NuPlayerDriver::NuPlayerDriver() } NuPlayerDriver::~NuPlayerDriver() { + ALOGV("~NuPlayerDriver(%p)", this); mLooper->stop(); } @@ -77,9 +81,9 @@ status_t NuPlayerDriver::setDataSource( const sp<IMediaHTTPService> &httpService, const char *url, const KeyedVector<String8, String8> *headers) { + ALOGV("setDataSource(%p) url(%s)", this, uriDebugString(url, false).c_str()); Mutex::Autolock autoLock(mLock); - ALOGV("setDataSource: url=%s", url); if (mState != STATE_IDLE) { return INVALID_OPERATION; } @@ -96,9 +100,9 @@ status_t NuPlayerDriver::setDataSource( } status_t NuPlayerDriver::setDataSource(int fd, int64_t offset, int64_t length) { + ALOGV("setDataSource(%p) file(%d)", this, fd); Mutex::Autolock autoLock(mLock); - ALOGV("setDataSource: fd=%d", fd); if (mState != STATE_IDLE) { return INVALID_OPERATION; } @@ -115,9 +119,9 @@ status_t NuPlayerDriver::setDataSource(int fd, int64_t offset, int64_t length) { } status_t NuPlayerDriver::setDataSource(const sp<IStreamSource> &source) { + ALOGV("setDataSource(%p) stream source", this); Mutex::Autolock autoLock(mLock); - ALOGV("setDataSource: stream source"); if (mState != STATE_IDLE) { return INVALID_OPERATION; } @@ -135,6 +139,7 @@ status_t NuPlayerDriver::setDataSource(const sp<IStreamSource> &source) { status_t NuPlayerDriver::setVideoSurfaceTexture( const sp<IGraphicBufferProducer> &bufferProducer) { + ALOGV("setVideoSurfaceTexture(%p)", this); Mutex::Autolock autoLock(mLock); if (mSetSurfaceInProgress) { @@ -162,6 +167,7 @@ status_t NuPlayerDriver::setVideoSurfaceTexture( } status_t NuPlayerDriver::prepare() { + ALOGV("prepare(%p)", this); Mutex::Autolock autoLock(mLock); return prepare_l(); } @@ -196,6 +202,7 @@ status_t NuPlayerDriver::prepare_l() { } status_t NuPlayerDriver::prepareAsync() { + ALOGV("prepareAsync(%p)", this); Mutex::Autolock autoLock(mLock); switch (mState) { @@ -217,6 +224,7 @@ status_t NuPlayerDriver::prepareAsync() { } status_t NuPlayerDriver::start() { + ALOGD("start(%p)", this); Mutex::Autolock autoLock(mLock); switch (mState) { @@ -239,9 +247,7 @@ status_t NuPlayerDriver::start() { mPlayer->start(); if (mStartupSeekTimeUs >= 0) { - if (mStartupSeekTimeUs == 0) { - notifySeekComplete_l(); - } else { + if (mStartupSeekTimeUs > 0) { mPlayer->seekToAsync(mStartupSeekTimeUs); } @@ -263,8 +269,22 @@ status_t NuPlayerDriver::start() { case STATE_PAUSED: case STATE_STOPPED_AND_PREPARED: { - mPlayer->resume(); - mPositionUs -= ALooper::GetNowUs() - mPauseStartedTimeUs; + if (mAtEOS) { + mPlayer->seekToAsync(0); + mAtEOS = false; + mPlayer->resume(); + mPositionUs = -1; + } else { + mPlayer->resume(); + if (mNotifyTimeRealUs != -1) { + // Pause time must be set if here by setPauseStartedTimeIfNeeded(). + //CHECK(mPauseStartedTimeUs != -1); + + // if no seek occurs, adjust our notify time so that getCurrentPosition() + // is continuous if read immediately after calling start(). + mNotifyTimeRealUs += ALooper::GetNowUs() - mPauseStartedTimeUs; + } + } break; } @@ -279,6 +299,7 @@ status_t NuPlayerDriver::start() { } status_t NuPlayerDriver::stop() { + ALOGD("stop(%p)", this); Mutex::Autolock autoLock(mLock); switch (mState) { @@ -333,6 +354,7 @@ bool NuPlayerDriver::isPlaying() { } status_t NuPlayerDriver::seekTo(int msec) { + ALOGD("seekTo(%p) %d ms", this, msec); Mutex::Autolock autoLock(mLock); int64_t seekTimeUs = msec * 1000ll; @@ -354,7 +376,7 @@ status_t NuPlayerDriver::seekTo(int msec) { mAtEOS = false; // seeks can take a while, so we essentially paused notifyListener_l(MEDIA_PAUSED); - mPlayer->seekToAsync(seekTimeUs); + mPlayer->seekToAsync(seekTimeUs, true /* needNotify */); break; } @@ -371,15 +393,36 @@ status_t NuPlayerDriver::getCurrentPosition(int *msec) { Mutex::Autolock autoLock(mLock); if (mPositionUs < 0) { + // mPositionUs is the media time. + // It is negative under these cases + // (1) == -1 after reset, or very first playback, no stream notification yet. + // (2) == -1 start after end of stream, no stream notification yet. + // (3) == large negative # after ~292,471 years of continuous playback. + + //CHECK_EQ(mPositionUs, -1); *msec = 0; } else if (mNotifyTimeRealUs == -1) { + // A seek has occurred just occurred, no stream notification yet. + // mPositionUs (>= 0) is the new media position. *msec = mPositionUs / 1000; } else { + // mPosition must be valid (i.e. >= 0) by the first check above. + // We're either playing or have pause time set: mPauseStartedTimeUs is >= 0 + //LOG_ALWAYS_FATAL_IF( + // !isPlaying() && mPauseStartedTimeUs < 0, + // "Player in non-playing mState(%d) and mPauseStartedTimeUs(%lld) < 0", + // mState, (long long)mPauseStartedTimeUs); + ALOG_ASSERT(mNotifyTimeRealUs >= 0); int64_t nowUs = (isPlaying() ? ALooper::GetNowUs() : mPauseStartedTimeUs); *msec = (mPositionUs + nowUs - mNotifyTimeRealUs + 500ll) / 1000; + // It is possible for *msec to be negative if the media position is > 596 hours. + // but we turn on this checking in NDEBUG == 0 mode. + ALOG_ASSERT(*msec >= 0); + ALOGV("getCurrentPosition nowUs(%lld)", (long long)nowUs); } - + ALOGV("getCurrentPosition returning(%d) mPositionUs(%lld) mNotifyRealTimeUs(%lld)", + *msec, (long long)mPositionUs, (long long)mNotifyTimeRealUs); return OK; } @@ -396,6 +439,7 @@ status_t NuPlayerDriver::getDuration(int *msec) { } status_t NuPlayerDriver::reset() { + ALOGD("reset(%p)", this); Mutex::Autolock autoLock(mLock); switch (mState) { @@ -498,6 +542,7 @@ status_t NuPlayerDriver::invoke(const Parcel &request, Parcel *reply) { void NuPlayerDriver::setAudioSink(const sp<AudioSink> &audioSink) { mPlayer->setAudioSink(audioSink); + mAudioSink = audioSink; } status_t NuPlayerDriver::setParameter( @@ -537,6 +582,7 @@ status_t NuPlayerDriver::getMetadata( } void NuPlayerDriver::notifyResetComplete() { + ALOGI("notifyResetComplete(%p)", this); Mutex::Autolock autoLock(mLock); CHECK_EQ(mState, STATE_RESET_IN_PROGRESS); @@ -545,6 +591,7 @@ void NuPlayerDriver::notifyResetComplete() { } void NuPlayerDriver::notifySetSurfaceComplete() { + ALOGV("notifySetSurfaceComplete(%p)", this); Mutex::Autolock autoLock(mLock); CHECK(mSetSurfaceInProgress); @@ -567,6 +614,7 @@ void NuPlayerDriver::notifyPosition(int64_t positionUs) { } void NuPlayerDriver::notifySeekComplete() { + ALOGV("notifySeekComplete(%p)", this); Mutex::Autolock autoLock(mLock); notifySeekComplete_l(); } @@ -626,12 +674,16 @@ void NuPlayerDriver::notifyListener_l( switch (msg) { case MEDIA_PLAYBACK_COMPLETE: { - if (mLooping && mState != STATE_RESET_IN_PROGRESS) { - mPlayer->seekToAsync(0); - break; + if (mState != STATE_RESET_IN_PROGRESS) { + if (mLooping || (mAutoLoop + && (mAudioSink == NULL || mAudioSink->realtime()))) { + mPlayer->seekToAsync(0); + break; + } + + mPlayer->pause(); + mState = STATE_PAUSED; } - mPlayer->pause(); - mState = STATE_PAUSED; // fall through } @@ -690,6 +742,13 @@ void NuPlayerDriver::notifyPrepareCompleted(status_t err) { } } + sp<MetaData> meta = mPlayer->getFileMeta(); + int32_t loop; + if (meta != NULL + && meta->findInt32(kKeyAutoLoop, &loop) && loop != 0) { + mAutoLoop = true; + } + mCondition.broadcast(); } diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h index e81d605..f2bd431 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h @@ -114,10 +114,12 @@ private: sp<ALooper> mLooper; sp<NuPlayer> mPlayer; + sp<AudioSink> mAudioSink; uint32_t mPlayerFlags; bool mAtEOS; bool mLooping; + bool mAutoLoop; int64_t mStartupSeekTimeUs; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp index aad6e93..d6bf1de 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp @@ -20,19 +20,37 @@ #include "NuPlayerRenderer.h" +#include <cutils/properties.h> + #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> +#include <VideoFrameScheduler.h> + #include <inttypes.h> namespace android { +// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink +// is closed to allow the audio DSP to power down. +static const int64_t kOffloadPauseMaxUs = 60000000ll; + // static const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; +static bool sFrameAccurateAVsync = false; + +static void readProperties() { + char value[PROPERTY_VALUE_MAX]; + if (property_get("persist.sys.media.avsync", value, NULL)) { + sFrameAccurateAVsync = + !strcmp("1", value) || !strcasecmp("true", value); + } +} + NuPlayer::Renderer::Renderer( const sp<MediaPlayerBase::AudioSink> &sink, const sp<AMessage> ¬ify, @@ -45,7 +63,7 @@ NuPlayer::Renderer::Renderer( mDrainVideoQueuePending(false), mAudioQueueGeneration(0), mVideoQueueGeneration(0), - mFirstAudioTimeUs(-1), + mFirstAnchorTimeMediaUs(-1), mAnchorTimeMediaUs(-1), mAnchorTimeRealUs(-1), mFlushingAudio(false), @@ -54,12 +72,15 @@ NuPlayer::Renderer::Renderer( mHasVideo(false), mSyncQueues(false), mPaused(false), + mVideoSampleReceived(false), mVideoRenderingStarted(false), mVideoRenderingStartGeneration(0), mAudioRenderingStartGeneration(0), mLastPositionUpdateUs(-1ll), mVideoLateByUs(0ll), - mVideoSampleReceived(false) { + mAudioOffloadPauseTimeoutGeneration(0), + mAudioOffloadTornDown(false) { + readProperties(); } NuPlayer::Renderer::~Renderer() { @@ -115,6 +136,7 @@ void NuPlayer::Renderer::signalTimeDiscontinuity() { Mutex::Autolock autoLock(mLock); // CHECK(mAudioQueue.empty()); // CHECK(mVideoQueue.empty()); + mFirstAnchorTimeMediaUs = -1; mAnchorTimeMediaUs = -1; mAnchorTimeRealUs = -1; mSyncQueues = false; @@ -136,6 +158,12 @@ void NuPlayer::Renderer::resume() { (new AMessage(kWhatResume, id()))->post(); } +void NuPlayer::Renderer::setVideoFrameRate(float fps) { + sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, id()); + msg->setFloat("frame-rate", fps); + msg->post(); +} + void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatStopAudioSink: @@ -236,9 +264,29 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { break; } + case kWhatSetVideoFrameRate: + { + float fps; + CHECK(msg->findFloat("frame-rate", &fps)); + onSetVideoFrameRate(fps); + break; + } + case kWhatAudioOffloadTearDown: { - onAudioOffloadTearDown(); + onAudioOffloadTearDown(kDueToError); + break; + } + + case kWhatAudioOffloadPauseTimeout: + { + int32_t generation; + CHECK(msg->findInt32("generation", &generation)); + if (generation != mAudioOffloadPauseTimeoutGeneration) { + break; + } + ALOGV("Audio Offload tear down due to pause timeout."); + onAudioOffloadTearDown(kDueToTimeout); break; } @@ -339,19 +387,16 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { int64_t mediaTimeUs; CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6); - if (mFirstAudioTimeUs == -1) { - mFirstAudioTimeUs = mediaTimeUs; + if (mFirstAnchorTimeMediaUs == -1) { + mFirstAnchorTimeMediaUs = mediaTimeUs; } - uint32_t numFramesPlayed; - CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK); + int64_t nowUs = ALooper::GetNowUs(); + mAnchorTimeMediaUs = + mFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs); + mAnchorTimeRealUs = nowUs; - // TODO: figure out how to calculate initial latency. - // Otherwise, the initial time is not correct till the first sample - // is played. - mAnchorTimeMediaUs = mFirstAudioTimeUs - + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll; - mAnchorTimeRealUs = ALooper::GetNowUs(); + notifyPosition(); } size_t copy = entry->mBuffer->size() - entry->mOffset; @@ -374,10 +419,6 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { notifyIfMediaRenderingStarted(); } - if (sizeCopied != 0) { - notifyPosition(); - } - if (hasEOS) { (new AMessage(kWhatStopAudioSink, id()))->post(); } @@ -413,7 +454,7 @@ bool NuPlayer::Renderer::onDrainAudioQueue() { // EOS int64_t postEOSDelayUs = 0; if (mAudioSink->needsTrailingPadding()) { - postEOSDelayUs = getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency(); + postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); } notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); @@ -426,10 +467,15 @@ bool NuPlayer::Renderer::onDrainAudioQueue() { int64_t mediaTimeUs; CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6); + if (mFirstAnchorTimeMediaUs == -1) { + mFirstAnchorTimeMediaUs = mediaTimeUs; + } mAnchorTimeMediaUs = mediaTimeUs; - mAnchorTimeRealUs = ALooper::GetNowUs() - + getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency() / 2; + int64_t nowUs = ALooper::GetNowUs(); + mAnchorTimeRealUs = nowUs + getPendingAudioPlayoutDurationUs(nowUs); + + notifyPosition(); } size_t copy = entry->mBuffer->size() - entry->mOffset; @@ -478,17 +524,13 @@ bool NuPlayer::Renderer::onDrainAudioQueue() { break; } } - notifyPosition(); - return !mAudioQueue.empty(); } -int64_t NuPlayer::Renderer::getAudioPendingPlayoutUs() { - uint32_t numFramesPlayed; - CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK); - - uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed; - return numFramesPendingPlayout * mAudioSink->msecsPerFrame() * 1000; +int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { + int64_t writtenAudioDurationUs = + mNumFramesWritten * 1000LL * mAudioSink->msecsPerFrame(); + return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs); } void NuPlayer::Renderer::postDrainVideoQueue() { @@ -507,37 +549,53 @@ void NuPlayer::Renderer::postDrainVideoQueue() { sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, id()); msg->setInt32("generation", mVideoQueueGeneration); - int64_t delayUs; - if (entry.mBuffer == NULL) { // EOS doesn't carry a timestamp. - delayUs = 0; - } else if (mFlags & FLAG_REAL_TIME) { + msg->post(); + mDrainVideoQueuePending = true; + return; + } + + int64_t delayUs; + int64_t nowUs = ALooper::GetNowUs(); + int64_t realTimeUs; + if (mFlags & FLAG_REAL_TIME) { int64_t mediaTimeUs; CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); - - delayUs = mediaTimeUs - ALooper::GetNowUs(); + realTimeUs = mediaTimeUs; } else { int64_t mediaTimeUs; CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); + if (mFirstAnchorTimeMediaUs == -1 && !mHasAudio) { + mFirstAnchorTimeMediaUs = mediaTimeUs; + } if (mAnchorTimeMediaUs < 0) { - delayUs = 0; - if (!mHasAudio) { mAnchorTimeMediaUs = mediaTimeUs; - mAnchorTimeRealUs = ALooper::GetNowUs(); + mAnchorTimeRealUs = nowUs; + notifyPosition(); } + realTimeUs = nowUs; } else { - int64_t realTimeUs = + realTimeUs = (mediaTimeUs - mAnchorTimeMediaUs) + mAnchorTimeRealUs; - - delayUs = realTimeUs - ALooper::GetNowUs(); } } + realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; + int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); + + delayUs = realTimeUs - nowUs; + ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); - msg->post(delayUs); + // post 2 display refreshes before rendering is due + // FIXME currently this increases power consumption, so unless frame-accurate + // AV sync is requested, post closer to required render time (at 0.63 vsyncs) + if (!sFrameAccurateAVsync) { + twoVsyncsUs >>= 4; + } + msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); mDrainVideoQueuePending = true; } @@ -558,8 +616,6 @@ void NuPlayer::Renderer::onDrainVideoQueue() { entry = NULL; mVideoLateByUs = 0ll; - - notifyPosition(); return; } @@ -591,6 +647,7 @@ void NuPlayer::Renderer::onDrainVideoQueue() { mVideoLateByUs = 0ll; } + entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); entry->mNotifyConsumed->setInt32("render", !tooLate); entry->mNotifyConsumed->post(); mVideoQueue.erase(mVideoQueue.begin()); @@ -605,8 +662,6 @@ void NuPlayer::Renderer::onDrainVideoQueue() { } notifyIfMediaRenderingStarted(); } - - notifyPosition(); } void NuPlayer::Renderer::notifyVideoRenderingStart() { @@ -635,6 +690,10 @@ void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { mHasAudio = true; } else { mHasVideo = true; + if (mVideoScheduler == NULL) { + mVideoScheduler = new VideoFrameScheduler(); + mVideoScheduler->init(); + } } if (dropBufferWhileFlushing(audio, msg)) { @@ -783,7 +842,7 @@ void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { prepareForMediaRenderingStart(); if (offloadingAudio()) { - mFirstAudioTimeUs = -1; + mFirstAnchorTimeMediaUs = -1; } } @@ -800,6 +859,10 @@ void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { mDrainVideoQueuePending = false; ++mVideoQueueGeneration; + if (mVideoScheduler != NULL) { + mVideoScheduler->restart(); + } + prepareForMediaRenderingStart(); } @@ -871,9 +934,11 @@ void NuPlayer::Renderer::onDisableOffloadAudio() { } void NuPlayer::Renderer::notifyPosition() { - if (mAnchorTimeRealUs < 0 || mAnchorTimeMediaUs < 0) { - return; - } + // notifyPosition() must be called only after setting mAnchorTimeRealUs + // and mAnchorTimeMediaUs, and must not be paused as it extrapolates position. + //CHECK_GE(mAnchorTimeRealUs, 0); + //CHECK_GE(mAnchorTimeMediaUs, 0); + //CHECK(!mPaused || !mHasAudio); // video-only does display in paused mode. int64_t nowUs = ALooper::GetNowUs(); @@ -885,6 +950,18 @@ void NuPlayer::Renderer::notifyPosition() { int64_t positionUs = (nowUs - mAnchorTimeRealUs) + mAnchorTimeMediaUs; + //ALOGD("notifyPosition: positionUs(%lld) nowUs(%lld) mAnchorTimeRealUs(%lld)" + // " mAnchorTimeMediaUs(%lld) mFirstAnchorTimeMediaUs(%lld)", + // (long long)positionUs, (long long)nowUs, (long long)mAnchorTimeRealUs, + // (long long)mAnchorTimeMediaUs, (long long)mFirstAnchorTimeMediaUs); + + // Due to adding the latency to mAnchorTimeRealUs in onDrainAudioQueue(), + // positionUs may be less than the first media time. This is avoided + // here to prevent potential retrograde motion of the position bar + // when starting up after a seek. + if (positionUs < mFirstAnchorTimeMediaUs) { + positionUs = mFirstAnchorTimeMediaUs; + } sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatPosition); notify->setInt64("positionUs", positionUs); @@ -893,8 +970,10 @@ void NuPlayer::Renderer::notifyPosition() { } void NuPlayer::Renderer::onPause() { - CHECK(!mPaused); - + if (mPaused) { + ALOGW("Renderer::onPause() called while already paused!"); + return; + } { Mutex::Autolock autoLock(mLock); ++mAudioQueueGeneration; @@ -908,6 +987,7 @@ void NuPlayer::Renderer::onPause() { if (mHasAudio) { mAudioSink->pause(); + startAudioOffloadPauseTimeout(); } ALOGV("now paused audio queue has %d entries, video has %d entries", @@ -915,11 +995,14 @@ void NuPlayer::Renderer::onPause() { } void NuPlayer::Renderer::onResume() { + readProperties(); + if (!mPaused) { return; } if (mHasAudio) { + cancelAudioOffloadPauseTimeout(); mAudioSink->start(); } @@ -935,17 +1018,92 @@ void NuPlayer::Renderer::onResume() { } } -void NuPlayer::Renderer::onAudioOffloadTearDown() { +void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { + if (mVideoScheduler == NULL) { + mVideoScheduler = new VideoFrameScheduler(); + } + mVideoScheduler->init(fps); +} + +// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs() +// as it acquires locks and may query the audio driver. +// +// Some calls are not needed since notifyPosition() doesn't always deliver a message. +// Some calls could conceivably retrieve extrapolated data instead of +// accessing getTimestamp() or getPosition() every time a data buffer with +// a media time is received. +// +int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) { uint32_t numFramesPlayed; - CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK); + int64_t numFramesPlayedAt; + AudioTimestamp ts; + static const int64_t kStaleTimestamp100ms = 100000; + + status_t res = mAudioSink->getTimestamp(ts); + if (res == OK) { // case 1: mixing audio tracks and offloaded tracks. + numFramesPlayed = ts.mPosition; + numFramesPlayedAt = + ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000; + const int64_t timestampAge = nowUs - numFramesPlayedAt; + if (timestampAge > kStaleTimestamp100ms) { + // This is an audio FIXME. + // getTimestamp returns a timestamp which may come from audio mixing threads. + // After pausing, the MixerThread may go idle, thus the mTime estimate may + // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms, + // the max latency should be about 25ms with an average around 12ms (to be verified). + // For safety we use 100ms. + ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)", + (long long)nowUs, (long long)numFramesPlayedAt); + numFramesPlayedAt = nowUs - kStaleTimestamp100ms; + } + //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt); + } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track + numFramesPlayed = 0; + numFramesPlayedAt = nowUs; + //ALOGD("getTimestamp: WOULD_BLOCK %d %lld", + // numFramesPlayed, (long long)numFramesPlayedAt); + } else { // case 3: transitory at new track or audio fast tracks. + res = mAudioSink->getPosition(&numFramesPlayed); + CHECK_EQ(res, (status_t)OK); + numFramesPlayedAt = nowUs; + numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */ + //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt); + } + + // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. + //CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test + int64_t durationUs = (int32_t)numFramesPlayed * 1000LL * mAudioSink->msecsPerFrame() + + nowUs - numFramesPlayedAt; + if (durationUs < 0) { + // Occurs when numFramesPlayed position is very small and the following: + // (1) In case 1, the time nowUs is computed before getTimestamp() is called and + // numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed. + // (2) In case 3, using getPosition and adding mAudioSink->latency() to + // numFramesPlayedAt, by a time amount greater than numFramesPlayed. + // + // Both of these are transitory conditions. + ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs); + durationUs = 0; + } + ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)", + (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt); + return durationUs; +} + +void NuPlayer::Renderer::onAudioOffloadTearDown(AudioOffloadTearDownReason reason) { + if (mAudioOffloadTornDown) { + return; + } + mAudioOffloadTornDown = true; int64_t firstAudioTimeUs; { Mutex::Autolock autoLock(mLock); - firstAudioTimeUs = mFirstAudioTimeUs; + firstAudioTimeUs = mFirstAnchorTimeMediaUs; } - int64_t currentPositionUs = firstAudioTimeUs - + (numFramesPlayed * mAudioSink->msecsPerFrame()) * 1000ll; + + int64_t currentPositionUs = + firstAudioTimeUs + getPlayedOutAudioDurationUs(ALooper::GetNowUs()); mAudioSink->stop(); mAudioSink->flush(); @@ -953,8 +1111,23 @@ void NuPlayer::Renderer::onAudioOffloadTearDown() { sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatAudioOffloadTearDown); notify->setInt64("positionUs", currentPositionUs); + notify->setInt32("reason", reason); notify->post(); } +void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { + if (offloadingAudio()) { + sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, id()); + msg->setInt32("generation", mAudioOffloadPauseTimeoutGeneration); + msg->post(kOffloadPauseMaxUs); + } +} + +void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { + if (offloadingAudio()) { + ++mAudioOffloadPauseTimeoutGeneration; + } +} + } // namespace android diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h index 5c7d2d7..4237902 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h @@ -23,6 +23,7 @@ namespace android { struct ABuffer; +struct VideoFrameScheduler; struct NuPlayer::Renderer : public AHandler { enum Flags { @@ -56,6 +57,8 @@ struct NuPlayer::Renderer : public AHandler { void pause(); void resume(); + void setVideoFrameRate(float fps); + enum { kWhatEOS = 'eos ', kWhatFlushComplete = 'fluC', @@ -63,6 +66,12 @@ struct NuPlayer::Renderer : public AHandler { kWhatVideoRenderingStart = 'vdrd', kWhatMediaRenderingStart = 'mdrd', kWhatAudioOffloadTearDown = 'aOTD', + kWhatAudioOffloadPauseTimeout = 'aOPT', + }; + + enum AudioOffloadTearDownReason { + kDueToError = 0, + kDueToTimeout, }; protected: @@ -82,6 +91,7 @@ private: kWhatResume = 'resm', kWhatStopAudioSink = 'stpA', kWhatDisableOffloadAudio = 'noOA', + kWhatSetVideoFrameRate = 'sVFR', }; struct QueueEntry { @@ -100,13 +110,14 @@ private: List<QueueEntry> mAudioQueue; List<QueueEntry> mVideoQueue; uint32_t mNumFramesWritten; + sp<VideoFrameScheduler> mVideoScheduler; bool mDrainAudioQueuePending; bool mDrainVideoQueuePending; int32_t mAudioQueueGeneration; int32_t mVideoQueueGeneration; - int64_t mFirstAudioTimeUs; + int64_t mFirstAnchorTimeMediaUs; int64_t mAnchorTimeMediaUs; int64_t mAnchorTimeRealUs; @@ -127,10 +138,14 @@ private: int64_t mLastPositionUpdateUs; int64_t mVideoLateByUs; + int32_t mAudioOffloadPauseTimeoutGeneration; + bool mAudioOffloadTornDown; + size_t fillAudioBuffer(void *buffer, size_t size); bool onDrainAudioQueue(); - int64_t getAudioPendingPlayoutUs(); + int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs); + int64_t getPlayedOutAudioDurationUs(int64_t nowUs); void postDrainAudioQueue_l(int64_t delayUs = 0); void onDrainVideoQueue(); @@ -146,7 +161,8 @@ private: void onDisableOffloadAudio(); void onPause(); void onResume(); - void onAudioOffloadTearDown(); + void onSetVideoFrameRate(float fps); + void onAudioOffloadTearDown(AudioOffloadTearDownReason reason); void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0); void notifyFlushComplete(bool audio); @@ -161,6 +177,9 @@ private: bool offloadingAudio() const { return (mFlags & FLAG_OFFLOAD_AUDIO) != 0; } + void startAudioOffloadPauseTimeout(); + void cancelAudioOffloadPauseTimeout(); + DISALLOW_EVIL_CONSTRUCTORS(Renderer); }; diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h index 7ccf3b1..2f06c31 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h @@ -67,12 +67,16 @@ struct NuPlayer::Source : public AHandler { virtual void pause() {} virtual void resume() {} + // Explicitly disconnect the underling data source + virtual void disconnect() {} + // Returns OK iff more data was available, // an error or ERROR_END_OF_STREAM if not. virtual status_t feedMoreTSData() = 0; virtual sp<AMessage> getFormat(bool audio); virtual sp<MetaData> getFormatMeta(bool /* audio */) { return NULL; } + virtual sp<MetaData> getFileFormatMeta() const { return NULL; } virtual status_t dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) = 0; diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp index e812ad0..b693625 100644 --- a/media/libstagefright/ACodec.cpp +++ b/media/libstagefright/ACodec.cpp @@ -32,6 +32,7 @@ #include <media/stagefright/foundation/ABuffer.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/foundation/AUtils.h> #include <media/stagefright/BufferProducerWrapper.h> #include <media/stagefright/MediaCodecList.h> @@ -1386,6 +1387,7 @@ status_t ACodec::configureCodec( int32_t isADTS, aacProfile; int32_t sbrMode; int32_t maxOutputChannelCount; + int32_t pcmLimiterEnable; drcParams_t drc; if (!msg->findInt32("is-adts", &isADTS)) { isADTS = 0; @@ -1400,6 +1402,10 @@ status_t ACodec::configureCodec( if (!msg->findInt32("aac-max-output-channel_count", &maxOutputChannelCount)) { maxOutputChannelCount = -1; } + if (!msg->findInt32("aac-pcm-limiter-enable", &pcmLimiterEnable)) { + // value is unknown + pcmLimiterEnable = -1; + } if (!msg->findInt32("aac-encoded-target-level", &drc.encodedTargetLevel)) { // value is unknown drc.encodedTargetLevel = -1; @@ -1423,7 +1429,8 @@ status_t ACodec::configureCodec( err = setupAACCodec( encoder, numChannels, sampleRate, bitRate, aacProfile, - isADTS != 0, sbrMode, maxOutputChannelCount, drc); + isADTS != 0, sbrMode, maxOutputChannelCount, drc, + pcmLimiterEnable); } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) { err = setupAMRCodec(encoder, false /* isWAMR */, bitRate); @@ -1588,7 +1595,8 @@ status_t ACodec::selectAudioPortFormat( status_t ACodec::setupAACCodec( bool encoder, int32_t numChannels, int32_t sampleRate, int32_t bitRate, int32_t aacProfile, bool isADTS, int32_t sbrMode, - int32_t maxOutputChannelCount, const drcParams_t& drc) { + int32_t maxOutputChannelCount, const drcParams_t& drc, + int32_t pcmLimiterEnable) { if (encoder && isADTS) { return -EINVAL; } @@ -1718,6 +1726,7 @@ status_t ACodec::setupAACCodec( presentation.nHeavyCompression = drc.heavyCompression; presentation.nTargetReferenceLevel = drc.targetRefLevel; presentation.nEncodedTargetLevel = drc.encodedTargetLevel; + presentation.nPCMLimiterEnable = pcmLimiterEnable; status_t res = mOMX->setParameter(mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile)); if (res == OK) { @@ -2509,6 +2518,58 @@ status_t ACodec::setupH263EncoderParameters(const sp<AMessage> &msg) { return setupErrorCorrectionParameters(); } +// static +int /* OMX_VIDEO_AVCLEVELTYPE */ ACodec::getAVCLevelFor( + int width, int height, int rate, int bitrate, + OMX_VIDEO_AVCPROFILETYPE profile) { + // convert bitrate to main/baseline profile kbps equivalent + switch (profile) { + case OMX_VIDEO_AVCProfileHigh10: + bitrate = divUp(bitrate, 3000); break; + case OMX_VIDEO_AVCProfileHigh: + bitrate = divUp(bitrate, 1250); break; + default: + bitrate = divUp(bitrate, 1000); break; + } + + // convert size and rate to MBs + width = divUp(width, 16); + height = divUp(height, 16); + int mbs = width * height; + rate *= mbs; + int maxDimension = max(width, height); + + static const int limits[][5] = { + /* MBps MB dim bitrate level */ + { 1485, 99, 28, 64, OMX_VIDEO_AVCLevel1 }, + { 1485, 99, 28, 128, OMX_VIDEO_AVCLevel1b }, + { 3000, 396, 56, 192, OMX_VIDEO_AVCLevel11 }, + { 6000, 396, 56, 384, OMX_VIDEO_AVCLevel12 }, + { 11880, 396, 56, 768, OMX_VIDEO_AVCLevel13 }, + { 11880, 396, 56, 2000, OMX_VIDEO_AVCLevel2 }, + { 19800, 792, 79, 4000, OMX_VIDEO_AVCLevel21 }, + { 20250, 1620, 113, 4000, OMX_VIDEO_AVCLevel22 }, + { 40500, 1620, 113, 10000, OMX_VIDEO_AVCLevel3 }, + { 108000, 3600, 169, 14000, OMX_VIDEO_AVCLevel31 }, + { 216000, 5120, 202, 20000, OMX_VIDEO_AVCLevel32 }, + { 245760, 8192, 256, 20000, OMX_VIDEO_AVCLevel4 }, + { 245760, 8192, 256, 50000, OMX_VIDEO_AVCLevel41 }, + { 522240, 8704, 263, 50000, OMX_VIDEO_AVCLevel42 }, + { 589824, 22080, 420, 135000, OMX_VIDEO_AVCLevel5 }, + { 983040, 36864, 543, 240000, OMX_VIDEO_AVCLevel51 }, + { 2073600, 36864, 543, 240000, OMX_VIDEO_AVCLevel52 }, + }; + + for (size_t i = 0; i < ARRAY_SIZE(limits); i++) { + const int (&limit)[5] = limits[i]; + if (rate <= limit[0] && mbs <= limit[1] && maxDimension <= limit[2] + && bitrate <= limit[3]) { + return limit[4]; + } + } + return 0; +} + status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) { int32_t bitrate, iFrameInterval; if (!msg->findInt32("bitrate", &bitrate) @@ -2960,13 +3021,6 @@ bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams ¶ms) { image.mNumPlanes = 0; const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat; - // we need stride and slice-height to be non-zero - if (params.nStride == 0 || params.nSliceHeight == 0) { - ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u", - fmt, fmt, params.nStride, params.nSliceHeight); - return false; - } - image.mWidth = params.nFrameWidth; image.mHeight = params.nFrameHeight; @@ -2979,6 +3033,20 @@ bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams ¶ms) { return false; } + // TEMPORARY FIX for some vendors that advertise sliceHeight as 0 + if (params.nStride != 0 && params.nSliceHeight == 0) { + ALOGW("using sliceHeight=%u instead of what codec advertised (=0)", + params.nFrameHeight); + params.nSliceHeight = params.nFrameHeight; + } + + // we need stride and slice-height to be non-zero + if (params.nStride == 0 || params.nSliceHeight == 0) { + ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u", + fmt, fmt, params.nStride, params.nSliceHeight); + return false; + } + // set-up YUV format image.mType = MediaImage::MEDIA_IMAGE_TYPE_YUV; image.mNumPlanes = 3; @@ -3809,23 +3877,12 @@ bool ACodec::BaseState::onOMXEmptyBufferDone(IOMX::buffer_id bufferID) { CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_COMPONENT); info->mStatus = BufferInfo::OWNED_BY_US; - const sp<AMessage> &bufferMeta = info->mData->meta(); - void *mediaBuffer; - if (bufferMeta->findPointer("mediaBuffer", &mediaBuffer) - && mediaBuffer != NULL) { - // We're in "store-metadata-in-buffers" mode, the underlying - // OMX component had access to data that's implicitly refcounted - // by this "mediaBuffer" object. Now that the OMX component has - // told us that it's done with the input buffer, we can decrement - // the mediaBuffer's reference count. - - ALOGV("releasing mbuf %p", mediaBuffer); - - ((MediaBuffer *)mediaBuffer)->release(); - mediaBuffer = NULL; - - bufferMeta->setPointer("mediaBuffer", NULL); - } + // We're in "store-metadata-in-buffers" mode, the underlying + // OMX component had access to data that's implicitly refcounted + // by this "MediaBuffer" object. Now that the OMX component has + // told us that it's done with the input buffer, we can decrement + // the mediaBuffer's reference count. + info->mData->setMediaBufferBase(NULL); PortMode mode = getPortMode(kPortIndexInput); diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp index 9d6fd78..c99db84 100644 --- a/media/libstagefright/DataSource.cpp +++ b/media/libstagefright/DataSource.cpp @@ -186,7 +186,8 @@ sp<DataSource> DataSource::CreateFromURI( const sp<IMediaHTTPService> &httpService, const char *uri, const KeyedVector<String8, String8> *headers, - String8 *contentType) { + String8 *contentType, + HTTPBase *httpSource) { if (contentType != NULL) { *contentType = ""; } @@ -199,7 +200,19 @@ sp<DataSource> DataSource::CreateFromURI( } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8) || isWidevine) { - sp<HTTPBase> httpSource = new MediaHTTP(httpService->makeHTTPConnection()); + if (httpService == NULL) { + ALOGE("Invalid http service!"); + return NULL; + } + + if (httpSource == NULL) { + sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection(); + if (conn == NULL) { + ALOGE("Failed to make http connection from http service!"); + return NULL; + } + httpSource = new MediaHTTP(conn); + } String8 tmp; if (isWidevine) { @@ -253,6 +266,19 @@ sp<DataSource> DataSource::CreateFromURI( return source; } +sp<DataSource> DataSource::CreateMediaHTTP(const sp<IMediaHTTPService> &httpService) { + if (httpService == NULL) { + return NULL; + } + + sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection(); + if (conn == NULL) { + return NULL; + } else { + return new MediaHTTP(conn); + } +} + String8 DataSource::getMIMEType() const { return String8("application/octet-stream"); } diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp index fc2dd30..b568063 100644 --- a/media/libstagefright/MediaCodec.cpp +++ b/media/libstagefright/MediaCodec.cpp @@ -179,7 +179,7 @@ void MediaCodec::PostReplyWithError(int32_t replyID, int32_t err) { response->postReply(replyID); } -status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) { +status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) { // save init parameters for reset mInitName = name; mInitNameIsType = nameIsType; @@ -191,7 +191,7 @@ status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) { // queue. mCodec = new ACodec; bool needDedicatedLooper = false; - if (nameIsType && !strncasecmp(name, "video/", 6)) { + if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) { needDedicatedLooper = true; } else { AString tmp = name; @@ -270,7 +270,20 @@ status_t MediaCodec::configure( } sp<AMessage> response; - return PostAndAwaitResponse(msg, &response); + status_t err = PostAndAwaitResponse(msg, &response); + + if (err != OK && err != INVALID_OPERATION) { + // MediaCodec now set state to UNINITIALIZED upon any fatal error. + // To maintain backward-compatibility, do a reset() to put codec + // back into INITIALIZED state. + // But don't reset if the err is INVALID_OPERATION, which means + // the configure failure is due to wrong state. + + ALOGE("configure failed with err 0x%08x, resetting...", err); + reset(); + } + + return err; } status_t MediaCodec::createInputSurface( @@ -344,7 +357,7 @@ status_t MediaCodec::reset() { mHaveInputSurface = false; if (err == OK) { - err = init(mInitName.c_str(), mInitNameIsType, mInitIsEncoder); + err = init(mInitName, mInitNameIsType, mInitIsEncoder); } return err; } @@ -576,7 +589,12 @@ status_t MediaCodec::getBufferAndFormat( if (index < buffers->size()) { const BufferInfo &info = buffers->itemAt(index); if (info.mOwnedByClient) { - *buffer = info.mData; + // by the time buffers array is initialized, crypto is set + if (portIndex == kPortIndexInput && mCrypto != NULL) { + *buffer = info.mEncryptedData; + } else { + *buffer = info.mData; + } *format = info.mFormat; } } @@ -2123,11 +2141,24 @@ void MediaCodec::postActivityNotificationIfPossible() { return; } - if ((mFlags & (kFlagStickyError + bool isErrorOrOutputChanged = + (mFlags & (kFlagStickyError | kFlagOutputBuffersChanged - | kFlagOutputFormatChanged)) + | kFlagOutputFormatChanged)); + + if (isErrorOrOutputChanged || !mAvailPortBuffers[kPortIndexInput].empty() || !mAvailPortBuffers[kPortIndexOutput].empty()) { + mActivityNotify->setInt32("input-buffers", + mAvailPortBuffers[kPortIndexInput].size()); + + if (isErrorOrOutputChanged) { + // we want consumer to dequeue as many times as it can + mActivityNotify->setInt32("output-buffers", INT32_MAX); + } else { + mActivityNotify->setInt32("output-buffers", + mAvailPortBuffers[kPortIndexOutput].size()); + } mActivityNotify->post(); mActivityNotify.clear(); } diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp index 1a80dcc..0fecda8 100644 --- a/media/libstagefright/MediaCodecSource.cpp +++ b/media/libstagefright/MediaCodecSource.cpp @@ -37,19 +37,6 @@ namespace android { -static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) { - void *mbuf; - if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) - && mbuf != NULL) { - ALOGV("releasing mbuf %p", mbuf); - - accessUnit->meta()->setPointer("mediaBuffer", NULL); - - static_cast<MediaBuffer *>(mbuf)->release(); - mbuf = NULL; - } -} - struct MediaCodecSource::Puller : public AHandler { Puller(const sp<MediaSource> &source); @@ -477,7 +464,7 @@ void MediaCodecSource::releaseEncoder() { for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) { sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i); - ReleaseMediaBufferReference(accessUnit); + accessUnit->setMediaBufferBase(NULL); } mEncoderInputBuffers.clear(); @@ -608,8 +595,8 @@ status_t MediaCodecSource::feedEncoderInputBuffers() { if (mIsVideo) { // video encoder will release MediaBuffer when done // with underlying data. - mEncoderInputBuffers.itemAt(bufferIndex)->meta() - ->setPointer("mediaBuffer", mbuf); + mEncoderInputBuffers.itemAt(bufferIndex)->setMediaBufferBase( + mbuf); } else { mbuf->release(); } @@ -628,11 +615,11 @@ status_t MediaCodecSource::feedEncoderInputBuffers() { return OK; } -status_t MediaCodecSource::doMoreWork() { - status_t err; +status_t MediaCodecSource::doMoreWork(int32_t numInput, int32_t numOutput) { + status_t err = OK; if (!(mFlags & FLAG_USE_SURFACE_INPUT)) { - for (;;) { + while (numInput-- > 0) { size_t bufferIndex; err = mEncoder->dequeueInputBuffer(&bufferIndex); @@ -646,7 +633,7 @@ status_t MediaCodecSource::doMoreWork() { feedEncoderInputBuffers(); } - for (;;) { + while (numOutput-- > 0) { size_t bufferIndex; size_t offset; size_t size; @@ -818,7 +805,16 @@ void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) { break; } - status_t err = doMoreWork(); + int32_t numInput, numOutput; + + if (!msg->findInt32("input-buffers", &numInput)) { + numInput = INT32_MAX; + } + if (!msg->findInt32("output-buffers", &numOutput)) { + numOutput = INT32_MAX; + } + + status_t err = doMoreWork(numInput, numOutput); if (err == OK) { scheduleDoMoreWork(); diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp index c1feff8..bd0a41d 100644 --- a/media/libstagefright/NuCachedSource2.cpp +++ b/media/libstagefright/NuCachedSource2.cpp @@ -191,6 +191,7 @@ NuCachedSource2::NuCachedSource2( mFinalStatus(OK), mLastAccessPos(0), mFetching(true), + mDisconnecting(false), mLastFetchTimeUs(-1), mNumRetriesLeft(kMaxNumRetries), mHighwaterThresholdBytes(kDefaultHighWaterThreshold), @@ -244,6 +245,27 @@ status_t NuCachedSource2::getEstimatedBandwidthKbps(int32_t *kbps) { return ERROR_UNSUPPORTED; } +void NuCachedSource2::disconnect() { + if (mSource->flags() & kIsHTTPBasedSource) { + ALOGV("disconnecting HTTPBasedSource"); + + { + Mutex::Autolock autoLock(mLock); + // set mDisconnecting to true, if a fetch returns after + // this, the source will be marked as EOS. + mDisconnecting = true; + + // explicitly signal mCondition so that the pending readAt() + // will immediately return + mCondition.signal(); + } + + // explicitly disconnect from the source, to allow any + // pending reads to return more promptly + static_cast<HTTPBase *>(mSource.get())->disconnect(); + } +} + status_t NuCachedSource2::setCacheStatCollectFreq(int32_t freqMs) { if (mSource->flags() & kIsHTTPBasedSource) { HTTPBase *source = static_cast<HTTPBase *>(mSource.get()); @@ -307,7 +329,11 @@ void NuCachedSource2::fetchInternal() { Mutex::Autolock autoLock(mLock); - if (err == ERROR_UNSUPPORTED || err == -EPIPE) { + if (mDisconnecting) { + mNumRetriesLeft = 0; + mFinalStatus = ERROR_END_OF_STREAM; + return; + } else if (err == ERROR_UNSUPPORTED || err == -EPIPE) { // These are errors that are not likely to go away even if we // retry, i.e. the server doesn't support range requests or similar. mNumRetriesLeft = 0; @@ -327,7 +353,14 @@ void NuCachedSource2::fetchInternal() { Mutex::Autolock autoLock(mLock); - if (n < 0) { + if (n == 0 || mDisconnecting) { + ALOGI("ERROR_END_OF_STREAM"); + + mNumRetriesLeft = 0; + mFinalStatus = ERROR_END_OF_STREAM; + + mCache->releasePage(page); + } else if (n < 0) { mFinalStatus = n; if (n == ERROR_UNSUPPORTED || n == -EPIPE) { // These are errors that are not likely to go away even if we @@ -337,13 +370,6 @@ void NuCachedSource2::fetchInternal() { ALOGE("source returned error %zd, %d retries left", n, mNumRetriesLeft); mCache->releasePage(page); - } else if (n == 0) { - ALOGI("ERROR_END_OF_STREAM"); - - mNumRetriesLeft = 0; - mFinalStatus = ERROR_END_OF_STREAM; - - mCache->releasePage(page); } else { if (mFinalStatus != OK) { ALOGI("retrying a previously failed read succeeded."); @@ -430,6 +456,10 @@ void NuCachedSource2::onRead(const sp<AMessage> &msg) { } Mutex::Autolock autoLock(mLock); + if (mDisconnecting) { + mCondition.signal(); + return; + } CHECK(mAsyncResult == NULL); @@ -476,6 +506,9 @@ ssize_t NuCachedSource2::readAt(off64_t offset, void *data, size_t size) { ALOGV("readAt offset %lld, size %zu", offset, size); Mutex::Autolock autoLock(mLock); + if (mDisconnecting) { + return ERROR_END_OF_STREAM; + } // If the request can be completely satisfied from the cache, do so. @@ -497,10 +530,15 @@ ssize_t NuCachedSource2::readAt(off64_t offset, void *data, size_t size) { CHECK(mAsyncResult == NULL); msg->post(); - while (mAsyncResult == NULL) { + while (mAsyncResult == NULL && !mDisconnecting) { mCondition.wait(mLock); } + if (mDisconnecting) { + mAsyncResult.clear(); + return ERROR_END_OF_STREAM; + } + int32_t result; CHECK(mAsyncResult->findInt32("result", &result)); diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp index 8b4dd6f..1b6eac4 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp @@ -14,8 +14,8 @@ * limitations under the License. */ -#define LOG_TAG "SoftAAC2" //#define LOG_NDEBUG 0 +#define LOG_TAG "SoftAAC2" #include <utils/Log.h> #include "SoftAAC2.h" @@ -68,7 +68,6 @@ SoftAAC2::SoftAAC2( mOutputBufferCount(0), mSignalledError(false), mLastInHeader(NULL), - mCurrentInputTime(0), mOutputPortSettingsChange(NONE) { initPorts(); CHECK_EQ(initDecoder(), (status_t)OK); @@ -138,6 +137,7 @@ status_t SoftAAC2::initDecoder() { mOutputDelayRingBuffer = new short[mOutputDelayRingBufferSize]; mOutputDelayRingBufferWritePos = 0; mOutputDelayRingBufferReadPos = 0; + mOutputDelayRingBufferFilled = 0; if (mAACDecoder == NULL) { ALOGE("AAC decoder is null. TODO: Can not call aacDecoder_SetParam in the following code"); @@ -368,6 +368,10 @@ OMX_ERRORTYPE SoftAAC2::internalSetParameter( aacPresParams->nEncodedTargetLevel); updateDrcWrapper = true; } + if (aacPresParams->nPCMLimiterEnable >= 0) { + aacDecoder_SetParam(mAACDecoder, AAC_PCM_LIMITER_ENABLE, + (aacPresParams->nPCMLimiterEnable != 0)); + } if (updateDrcWrapper) { mDrcWrap.update(); } @@ -409,6 +413,13 @@ void SoftAAC2::configureDownmix() const { } bool SoftAAC2::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamples) { + if (numSamples == 0) { + return true; + } + if (outputDelayRingBufferSpaceLeft() < numSamples) { + ALOGE("RING BUFFER WOULD OVERFLOW"); + return false; + } if (mOutputDelayRingBufferWritePos + numSamples <= mOutputDelayRingBufferSize && (mOutputDelayRingBufferReadPos <= mOutputDelayRingBufferWritePos || mOutputDelayRingBufferReadPos > mOutputDelayRingBufferWritePos + numSamples)) { @@ -420,10 +431,6 @@ bool SoftAAC2::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamp if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) { mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize; } - if (mOutputDelayRingBufferWritePos == mOutputDelayRingBufferReadPos) { - ALOGE("RING BUFFER OVERFLOW"); - return false; - } } else { ALOGV("slow SoftAAC2::outputDelayRingBufferPutSamples()"); @@ -433,16 +440,19 @@ bool SoftAAC2::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamp if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) { mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize; } - if (mOutputDelayRingBufferWritePos == mOutputDelayRingBufferReadPos) { - ALOGE("RING BUFFER OVERFLOW"); - return false; - } } } + mOutputDelayRingBufferFilled += numSamples; return true; } int32_t SoftAAC2::outputDelayRingBufferGetSamples(INT_PCM *samples, int32_t numSamples) { + + if (numSamples > mOutputDelayRingBufferFilled) { + ALOGE("RING BUFFER WOULD UNDERRUN"); + return -1; + } + if (mOutputDelayRingBufferReadPos + numSamples <= mOutputDelayRingBufferSize && (mOutputDelayRingBufferWritePos < mOutputDelayRingBufferReadPos || mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferReadPos + numSamples)) { @@ -461,10 +471,6 @@ int32_t SoftAAC2::outputDelayRingBufferGetSamples(INT_PCM *samples, int32_t numS ALOGV("slow SoftAAC2::outputDelayRingBufferGetSamples()"); for (int32_t i = 0; i < numSamples; i++) { - if (mOutputDelayRingBufferWritePos == mOutputDelayRingBufferReadPos) { - ALOGE("RING BUFFER UNDERRUN"); - return -1; - } if (samples != 0) { samples[i] = mOutputDelayRingBuffer[mOutputDelayRingBufferReadPos]; } @@ -474,22 +480,15 @@ int32_t SoftAAC2::outputDelayRingBufferGetSamples(INT_PCM *samples, int32_t numS } } } + mOutputDelayRingBufferFilled -= numSamples; return numSamples; } int32_t SoftAAC2::outputDelayRingBufferSamplesAvailable() { - int32_t available = mOutputDelayRingBufferWritePos - mOutputDelayRingBufferReadPos; - if (available < 0) { - available += mOutputDelayRingBufferSize; - } - if (available < 0) { - ALOGE("FATAL RING BUFFER ERROR"); - return 0; - } - return available; + return mOutputDelayRingBufferFilled; } -int32_t SoftAAC2::outputDelayRingBufferSamplesLeft() { +int32_t SoftAAC2::outputDelayRingBufferSpaceLeft() { return mOutputDelayRingBufferSize - outputDelayRingBufferSamplesAvailable(); } @@ -513,6 +512,11 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; mEndOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0; + + if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) { + ALOGE("first buffer should have OMX_BUFFERFLAG_CODECCONFIG set"); + inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG; + } if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) { BufferInfo *inInfo = *inQueue.begin(); OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; @@ -610,9 +614,24 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL); return; } + + // insert buffer size and time stamp + mBufferSizes.add(inBufferLength[0]); + if (mLastInHeader != inHeader) { + mBufferTimestamps.add(inHeader->nTimeStamp); + mLastInHeader = inHeader; + } else { + int64_t currentTime = mBufferTimestamps.top(); + currentTime += mStreamInfo->aacSamplesPerFrame * + 1000000ll / mStreamInfo->sampleRate; + mBufferTimestamps.add(currentTime); + } } else { inBuffer[0] = inHeader->pBuffer + inHeader->nOffset; inBufferLength[0] = inHeader->nFilledLen; + mLastInHeader = inHeader; + mBufferTimestamps.add(inHeader->nTimeStamp); + mBufferSizes.add(inHeader->nFilledLen); } // Fill and decode @@ -621,136 +640,138 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { INT prevSampleRate = mStreamInfo->sampleRate; INT prevNumChannels = mStreamInfo->numChannels; - if (inHeader != mLastInHeader) { - mLastInHeader = inHeader; - mCurrentInputTime = inHeader->nTimeStamp; - } else { - if (mStreamInfo->sampleRate) { - mCurrentInputTime += mStreamInfo->aacSamplesPerFrame * - 1000000ll / mStreamInfo->sampleRate; - } else { - ALOGW("no sample rate yet"); - } - } - mAnchorTimes.add(mCurrentInputTime); aacDecoder_Fill(mAACDecoder, inBuffer, inBufferLength, bytesValid); - // run DRC check - mDrcWrap.submitStreamData(mStreamInfo); - mDrcWrap.update(); - - AAC_DECODER_ERROR decoderErr = - aacDecoder_DecodeFrame(mAACDecoder, - tmpOutBuffer, - 2048 * MAX_CHANNEL_COUNT, - 0 /* flags */); + // run DRC check + mDrcWrap.submitStreamData(mStreamInfo); + mDrcWrap.update(); - if (decoderErr != AAC_DEC_OK) { - ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr); - } + UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; + inHeader->nFilledLen -= inBufferUsedLength; + inHeader->nOffset += inBufferUsedLength; - if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { - ALOGE("AAC_DEC_NOT_ENOUGH_BITS should never happen"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); - return; - } + AAC_DECODER_ERROR decoderErr; + do { + if (outputDelayRingBufferSpaceLeft() < + (mStreamInfo->frameSize * mStreamInfo->numChannels)) { + ALOGV("skipping decode: not enough space left in ringbuffer"); + break; + } - if (bytesValid[0] != 0) { - ALOGE("bytesValid[0] != 0 should never happen"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); - return; - } + int numconsumed = mStreamInfo->numTotalBytes + mStreamInfo->numBadBytes; + decoderErr = aacDecoder_DecodeFrame(mAACDecoder, + tmpOutBuffer, + 2048 * MAX_CHANNEL_COUNT, + 0 /* flags */); - size_t numOutBytes = - mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; + numconsumed = (mStreamInfo->numTotalBytes + mStreamInfo->numBadBytes) - numconsumed; + if (numconsumed != 0) { + mDecodedSizes.add(numconsumed); + } - if (decoderErr == AAC_DEC_OK) { - if (!outputDelayRingBufferPutSamples(tmpOutBuffer, - mStreamInfo->frameSize * mStreamInfo->numChannels)) { - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); - return; + if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { + break; } - UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; - inHeader->nFilledLen -= inBufferUsedLength; - inHeader->nOffset += inBufferUsedLength; - } else { - ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr); - memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow + if (decoderErr != AAC_DEC_OK) { + ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr); + } - if (!outputDelayRingBufferPutSamples(tmpOutBuffer, - mStreamInfo->frameSize * mStreamInfo->numChannels)) { + if (bytesValid[0] != 0) { + ALOGE("bytesValid[0] != 0 should never happen"); mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); + notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); return; } - // Discard input buffer. - inHeader->nFilledLen = 0; - - aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); - - // fall through - } - - /* - * AAC+/eAAC+ streams can be signalled in two ways: either explicitly - * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual - * rate system and the sampling rate in the final output is actually - * doubled compared with the core AAC decoder sampling rate. - * - * Explicit signalling is done by explicitly defining SBR audio object - * type in the bitstream. Implicit signalling is done by embedding - * SBR content in AAC extension payload specific to SBR, and hence - * requires an AAC decoder to perform pre-checks on actual audio frames. - * - * Thus, we could not say for sure whether a stream is - * AAC+/eAAC+ until the first data frame is decoded. - */ - if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1 - if (mStreamInfo->sampleRate != prevSampleRate || - mStreamInfo->numChannels != prevNumChannels) { - ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels", - prevSampleRate, mStreamInfo->sampleRate, - prevNumChannels, mStreamInfo->numChannels); + size_t numOutBytes = + mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; + if (decoderErr == AAC_DEC_OK) { + if (!outputDelayRingBufferPutSamples(tmpOutBuffer, + mStreamInfo->frameSize * mStreamInfo->numChannels)) { + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); + return; + } + } else { + ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr); + + memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow + + if (!outputDelayRingBufferPutSamples(tmpOutBuffer, + mStreamInfo->frameSize * mStreamInfo->numChannels)) { + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); + return; + } + + // Discard input buffer. + if (inHeader) { + inHeader->nFilledLen = 0; + } - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - mInputBufferCount++; - inQueue.erase(inQueue.begin()); - mLastInHeader = NULL; - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; + aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); + + // fall through + } + + /* + * AAC+/eAAC+ streams can be signalled in two ways: either explicitly + * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual + * rate system and the sampling rate in the final output is actually + * doubled compared with the core AAC decoder sampling rate. + * + * Explicit signalling is done by explicitly defining SBR audio object + * type in the bitstream. Implicit signalling is done by embedding + * SBR content in AAC extension payload specific to SBR, and hence + * requires an AAC decoder to perform pre-checks on actual audio frames. + * + * Thus, we could not say for sure whether a stream is + * AAC+/eAAC+ until the first data frame is decoded. + */ + if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1 + if (mStreamInfo->sampleRate != prevSampleRate || + mStreamInfo->numChannels != prevNumChannels) { + ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels", + prevSampleRate, mStreamInfo->sampleRate, + prevNumChannels, mStreamInfo->numChannels); + + notify(OMX_EventPortSettingsChanged, 1, 0, NULL); + mOutputPortSettingsChange = AWAITING_DISABLED; + + if (inHeader && inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + mInputBufferCount++; + inQueue.erase(inQueue.begin()); + mLastInHeader = NULL; + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } + return; } + } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) { + ALOGW("Invalid AAC stream"); + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); return; } - } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) { - ALOGW("Invalid AAC stream"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); - return; - } - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - mInputBufferCount++; - inQueue.erase(inQueue.begin()); - mLastInHeader = NULL; - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; - } else { - ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen); - } + if (inHeader && inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + mInputBufferCount++; + inQueue.erase(inQueue.begin()); + mLastInHeader = NULL; + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } else { + ALOGV("inHeader->nFilledLen = %d", inHeader ? inHeader->nFilledLen : 0); + } + } while (decoderErr == AAC_DEC_OK); } int32_t outputDelay = mStreamInfo->outputDelay * mStreamInfo->numChannels; @@ -809,8 +830,9 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset); + int samplesize = mStreamInfo->numChannels * sizeof(int16_t); if (outHeader->nOffset - + mStreamInfo->frameSize * mStreamInfo->numChannels * sizeof(int16_t) + + mStreamInfo->frameSize * samplesize > outHeader->nAllocLen) { ALOGE("buffer overflow"); mSignalledError = true; @@ -818,17 +840,67 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { return; } - int32_t ns = outputDelayRingBufferGetSamples(outBuffer, - mStreamInfo->frameSize * mStreamInfo->numChannels); // TODO: check for overflow - if (ns != mStreamInfo->frameSize * mStreamInfo->numChannels) { - ALOGE("not a complete frame of samples available"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); - return; + + int available = outputDelayRingBufferSamplesAvailable(); + int numSamples = outHeader->nAllocLen / sizeof(int16_t); + if (numSamples > available) { + numSamples = available; + } + int64_t currentTime = 0; + if (available) { + + int numFrames = numSamples / (mStreamInfo->frameSize * mStreamInfo->numChannels); + numSamples = numFrames * (mStreamInfo->frameSize * mStreamInfo->numChannels); + + ALOGV("%d samples available (%d), or %d frames", + numSamples, available, numFrames); + int64_t *nextTimeStamp = &mBufferTimestamps.editItemAt(0); + currentTime = *nextTimeStamp; + int32_t *currentBufLeft = &mBufferSizes.editItemAt(0); + for (int i = 0; i < numFrames; i++) { + int32_t decodedSize = mDecodedSizes.itemAt(0); + mDecodedSizes.removeAt(0); + ALOGV("decoded %d of %d", decodedSize, *currentBufLeft); + if (*currentBufLeft > decodedSize) { + // adjust/interpolate next time stamp + *currentBufLeft -= decodedSize; + *nextTimeStamp += mStreamInfo->aacSamplesPerFrame * + 1000000ll / mStreamInfo->sampleRate; + ALOGV("adjusted nextTimeStamp/size to %lld/%d", + *nextTimeStamp, *currentBufLeft); + } else { + // move to next timestamp in list + if (mBufferTimestamps.size() > 0) { + mBufferTimestamps.removeAt(0); + nextTimeStamp = &mBufferTimestamps.editItemAt(0); + mBufferSizes.removeAt(0); + currentBufLeft = &mBufferSizes.editItemAt(0); + ALOGV("moved to next time/size: %lld/%d", + *nextTimeStamp, *currentBufLeft); + } + // try to limit output buffer size to match input buffers + // (e.g when an input buffer contained 4 "sub" frames, output + // at most 4 decoded units in the corresponding output buffer) + // This is optional. Remove the next three lines to fill the output + // buffer with as many units as available. + numFrames = i + 1; + numSamples = numFrames * mStreamInfo->frameSize * mStreamInfo->numChannels; + break; + } + } + + ALOGV("getting %d from ringbuffer", numSamples); + int32_t ns = outputDelayRingBufferGetSamples(outBuffer, numSamples); + if (ns != numSamples) { + ALOGE("not a complete frame of samples available"); + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); + return; + } } - outHeader->nFilledLen = mStreamInfo->frameSize * mStreamInfo->numChannels - * sizeof(int16_t); + outHeader->nFilledLen = numSamples * sizeof(int16_t); + if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) { outHeader->nFlags = OMX_BUFFERFLAG_EOS; mEndOfOutput = true; @@ -836,13 +908,13 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { outHeader->nFlags = 0; } - outHeader->nTimeStamp = mAnchorTimes.isEmpty() ? 0 : mAnchorTimes.itemAt(0); - mAnchorTimes.removeAt(0); + outHeader->nTimeStamp = currentTime; mOutputBufferCount++; outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); outInfo = NULL; + ALOGV("out timestamp %lld / %d", outHeader->nTimeStamp, outHeader->nFilledLen); notifyFillBufferDone(outHeader); outHeader = NULL; } @@ -877,8 +949,10 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { outHeader->nFilledLen = 0; outHeader->nFlags = OMX_BUFFERFLAG_EOS; - outHeader->nTimeStamp = mAnchorTimes.itemAt(0); - mAnchorTimes.removeAt(0); + outHeader->nTimeStamp = mBufferTimestamps.itemAt(0); + mBufferTimestamps.clear(); + mBufferSizes.clear(); + mDecodedSizes.clear(); mOutputBufferCount++; outInfo->mOwnedByUs = false; @@ -899,14 +973,20 @@ void SoftAAC2::onPortFlushCompleted(OMX_U32 portIndex) { // depend on fragments from the last one decoded. // drain all existing data drainDecoder(); - mAnchorTimes.clear(); + mBufferTimestamps.clear(); + mBufferSizes.clear(); + mDecodedSizes.clear(); mLastInHeader = NULL; } else { - while (outputDelayRingBufferSamplesAvailable() > 0) { - int32_t ns = outputDelayRingBufferGetSamples(0, - mStreamInfo->frameSize * mStreamInfo->numChannels); - if (ns != mStreamInfo->frameSize * mStreamInfo->numChannels) { + int avail; + while ((avail = outputDelayRingBufferSamplesAvailable()) > 0) { + if (avail > mStreamInfo->frameSize * mStreamInfo->numChannels) { + avail = mStreamInfo->frameSize * mStreamInfo->numChannels; + } + int32_t ns = outputDelayRingBufferGetSamples(0, avail); + if (ns != avail) { ALOGE("not a complete frame of samples available"); + break; } mOutputBufferCount++; } @@ -953,9 +1033,12 @@ void SoftAAC2::onReset() { mOutputDelayCompensated = 0; mOutputDelayRingBufferWritePos = 0; mOutputDelayRingBufferReadPos = 0; + mOutputDelayRingBufferFilled = 0; mEndOfInput = false; mEndOfOutput = false; - mAnchorTimes.clear(); + mBufferTimestamps.clear(); + mBufferSizes.clear(); + mDecodedSizes.clear(); mLastInHeader = NULL; // To make the codec behave the same before and after a reset, we need to invalidate the diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h index 865bd15..c3e4459 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.h +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h @@ -59,8 +59,9 @@ private: size_t mOutputBufferCount; bool mSignalledError; OMX_BUFFERHEADERTYPE *mLastInHeader; - int64_t mCurrentInputTime; - Vector<int64_t> mAnchorTimes; + Vector<int32_t> mBufferSizes; + Vector<int32_t> mDecodedSizes; + Vector<int64_t> mBufferTimestamps; CDrcPresModeWrapper mDrcWrap; @@ -84,10 +85,11 @@ private: short *mOutputDelayRingBuffer; int32_t mOutputDelayRingBufferWritePos; int32_t mOutputDelayRingBufferReadPos; + int32_t mOutputDelayRingBufferFilled; bool outputDelayRingBufferPutSamples(INT_PCM *samples, int numSamples); int32_t outputDelayRingBufferGetSamples(INT_PCM *samples, int numSamples); int32_t outputDelayRingBufferSamplesAvailable(); - int32_t outputDelayRingBufferSamplesLeft(); + int32_t outputDelayRingBufferSpaceLeft(); DISALLOW_EVIL_CONSTRUCTORS(SoftAAC2); }; diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp index b0d0827..f4cba54 100644 --- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp +++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp @@ -67,22 +67,16 @@ SoftHEVC::SoftHEVC( : SoftVideoDecoderOMXComponent(name, componentName, codingType, kProfileLevels, ARRAY_SIZE(kProfileLevels), 320 /* width */, 240 /* height */, callbacks, - appData, component) { + appData, component), + mMemRecords(NULL), + mFlushOutBuffer(NULL), + mOmxColorFormat(OMX_COLOR_FormatYUV420Planar), + mIvColorFormat(IV_YUV_420P), + mNewWidth(mWidth), + mNewHeight(mHeight), + mChangingResolution(false) { initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE); - - mOmxColorFormat = OMX_COLOR_FormatYUV420Planar; - mStride = mWidth; - - if (OMX_COLOR_FormatYUV420Planar == mOmxColorFormat) { - mIvColorFormat = IV_YUV_420P; - } else if (OMX_COLOR_FormatYUV420SemiPlanar == mOmxColorFormat) { - mIvColorFormat = IV_YUV_420SP_UV; - } - - mInitWidth = mWidth; - mInitHeight = mHeight; - CHECK_EQ(initDecoder(), (status_t)OK); } @@ -144,7 +138,7 @@ status_t SoftHEVC::setParams(size_t stride) { s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t); s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t); - ALOGD("Set the run-time (dynamic) parameters"); + ALOGV("Set the run-time (dynamic) parameters stride = %u", stride); status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip, (void *)&s_ctl_op); @@ -188,7 +182,7 @@ status_t SoftHEVC::resetDecoder() { } /* Set the run-time (dynamic) parameters */ - setParams(0); + setParams(outputBufferWidth()); /* Set number of cores/threads to be used by the codec */ setNumCores(); @@ -250,23 +244,25 @@ status_t SoftHEVC::initDecoder() { WORD32 i4_level; mNumCores = GetCPUCoreCount(); - mMemRecords = NULL; - mFlushOutBuffer = NULL; /* Initialize number of ref and reorder modes (for HEVC) */ u4_num_reorder_frames = 16; u4_num_ref_frames = 16; u4_share_disp_buf = 0; - if ((mWidth * mHeight) > (1920 * 1088)) { + uint32_t displayStride = outputBufferWidth(); + uint32_t displayHeight = outputBufferHeight(); + uint32_t displaySizeY = displayStride * displayHeight; + + if (displaySizeY > (1920 * 1088)) { i4_level = 50; - } else if ((mWidth * mHeight) > (1280 * 720)) { + } else if (displaySizeY > (1280 * 720)) { i4_level = 40; - } else if ((mWidth * mHeight) > (960 * 540)) { + } else if (displaySizeY > (960 * 540)) { i4_level = 31; - } else if ((mWidth * mHeight) > (640 * 360)) { + } else if (displaySizeY > (640 * 360)) { i4_level = 30; - } else if ((mWidth * mHeight) > (352 * 288)) { + } else if (displaySizeY > (352 * 288)) { i4_level = 21; } else { i4_level = 20; @@ -317,8 +313,8 @@ status_t SoftHEVC::initDecoder() { s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.e_cmd = IV_CMD_FILL_NUM_MEM_REC; s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.pv_mem_rec_location = mMemRecords; - s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = mWidth; - s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = mHeight; + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = displayStride; + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = displayHeight; s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_size = sizeof(ivdext_fill_mem_rec_op_t); @@ -363,8 +359,8 @@ status_t SoftHEVC::initDecoder() { s_init_ip.s_ivd_init_ip_t.u4_size = sizeof(ivdext_init_ip_t); s_init_ip.s_ivd_init_ip_t.e_cmd = (IVD_API_COMMAND_TYPE_T)IV_CMD_INIT; s_init_ip.s_ivd_init_ip_t.pv_mem_rec_location = mMemRecords; - s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = mWidth; - s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = mHeight; + s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = displayStride; + s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = displayHeight; s_init_ip.i4_level = i4_level; s_init_ip.u4_num_reorder_frames = u4_num_reorder_frames; @@ -395,7 +391,7 @@ status_t SoftHEVC::initDecoder() { resetPlugin(); /* Set the run time (dynamic) parameters */ - setParams(0); + setParams(displayStride); /* Set number of cores/threads to be used by the codec */ setNumCores(); @@ -404,12 +400,15 @@ status_t SoftHEVC::initDecoder() { logVersion(); /* Allocate internal picture buffer */ - mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, mStride * mHeight * 3 / 2); + uint32_t bufferSize = displaySizeY * 3 / 2; + mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize); if (NULL == mFlushOutBuffer) { - ALOGE("Could not allocate flushOutputBuffer of size %zu", mStride * mHeight * 3 / 2); + ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize); return NO_MEMORY; } + mInitNeeded = false; + mFlushNeeded = false; return OK; } @@ -428,11 +427,17 @@ status_t SoftHEVC::deInitDecoder() { ps_mem_rec++; } ivd_aligned_free(mMemRecords); + mMemRecords = NULL; } if(mFlushOutBuffer) { ivd_aligned_free(mFlushOutBuffer); + mFlushOutBuffer = NULL; } + + mInitNeeded = true; + mChangingResolution = false; + return OK; } @@ -449,6 +454,7 @@ status_t SoftHEVC::reInitDecoder() { } return OK; } + void SoftHEVC::onReset() { ALOGD("onReset called"); SoftVideoDecoderOMXComponent::onReset(); @@ -457,12 +463,22 @@ void SoftHEVC::onReset() { resetPlugin(); } +OMX_ERRORTYPE SoftHEVC::internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params) { + const uint32_t oldWidth = mWidth; + const uint32_t oldHeight = mHeight; + OMX_ERRORTYPE ret = SoftVideoDecoderOMXComponent::internalSetParameter(index, params); + if (mWidth != oldWidth || mHeight != oldHeight) { + reInitDecoder(); + } + return ret; +} + void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip, ivd_video_decode_op_t *ps_dec_op, OMX_BUFFERHEADERTYPE *inHeader, OMX_BUFFERHEADERTYPE *outHeader, - size_t sizeY, size_t timeStampIx) { + size_t sizeY = outputBufferWidth() * outputBufferHeight(); size_t sizeUV; uint8_t *pBuf; @@ -502,8 +518,6 @@ void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip, return; } void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) { - ALOGD("onPortFlushCompleted on port %d", portIndex); - /* Once the output buffers are flushed, ignore any buffers that are held in decoder */ if (kOutputPortIndex == portIndex) { setFlushMode(); @@ -514,7 +528,7 @@ void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) { IV_API_CALL_STATUS_T status; size_t sizeY, sizeUV; - setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, NULL, mStride * mHeight, 0); + setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, NULL, 0); status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op); @@ -527,8 +541,6 @@ void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) { } void SoftHEVC::onQueueFilled(OMX_U32 portIndex) { - IV_API_CALL_STATUS_T status; - UNUSED(portIndex); if (mOutputPortSettingsChange != NONE) { @@ -548,7 +560,7 @@ void SoftHEVC::onQueueFilled(OMX_U32 portIndex) { setFlushMode(); } - while (outQueue.size() == kNumBuffers) { + while (!outQueue.empty()) { BufferInfo *inInfo; OMX_BUFFERHEADERTYPE *inHeader; @@ -586,6 +598,16 @@ void SoftHEVC::onQueueFilled(OMX_U32 portIndex) { } } + // When there is an init required and the decoder is not in flush mode, + // update output port's definition and reinitialize decoder. + if (mInitNeeded && !mIsInFlush) { + bool portWillReset = false; + handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight); + + CHECK_EQ(reInitDecoder(), (status_t)OK); + return; + } + /* Get a free slot in timestamp array to hold input timestamp */ { size_t i; @@ -608,68 +630,91 @@ void SoftHEVC::onQueueFilled(OMX_U32 portIndex) { WORD32 timeDelay, timeTaken; size_t sizeY, sizeUV; - setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, - mStride * mHeight, timeStampIx); + setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx); GETTIME(&mTimeStart, NULL); /* Compute time elapsed between end of previous decode() * to start of current decode() */ TIME_DIFF(mTimeEnd, mTimeStart, timeDelay); - status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, - (void *)&s_dec_op); + IV_API_CALL_STATUS_T status; + status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op); + // FIXME: Compare |status| to IHEVCD_UNSUPPORTED_DIMENSIONS, which is not one of the + // IV_API_CALL_STATUS_T, seems be wrong. But this is what the decoder returns right now. + // The decoder should be fixed so that |u4_error_code| instead of |status| returns + // IHEVCD_UNSUPPORTED_DIMENSIONS. + bool unsupportedDimensions = + ((IHEVCD_UNSUPPORTED_DIMENSIONS == status) + || (IHEVCD_UNSUPPORTED_DIMENSIONS == s_dec_op.u4_error_code)); + bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF)); GETTIME(&mTimeEnd, NULL); /* Compute time taken for decode() */ TIME_DIFF(mTimeStart, mTimeEnd, timeTaken); - ALOGD("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay, - s_dec_op.u4_num_bytes_consumed); + ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay, + s_dec_op.u4_num_bytes_consumed); + if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) { + mFlushNeeded = true; + } + + if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) { + /* If the input did not contain picture data, then ignore + * the associated timestamp */ + mTimeStampsValid[timeStampIx] = false; + } - /* If width and height are greater than the - * the dimensions used during codec create, then - * delete the current instance and recreate an instance with - * new dimensions */ + // This is needed to handle CTS DecoderTest testCodecResetsHEVCWithoutSurface, + // which is not sending SPS/PPS after port reconfiguration and flush to the codec. + if (unsupportedDimensions && !mFlushNeeded) { + bool portWillReset = false; + handlePortSettingsChange(&portWillReset, s_dec_op.u4_pic_wd, s_dec_op.u4_pic_ht); - if(IHEVCD_UNSUPPORTED_DIMENSIONS == s_dec_op.u4_error_code) { - mInitWidth = s_dec_op.u4_pic_wd; - mInitHeight = s_dec_op.u4_pic_ht; - mStride = mInitWidth; CHECK_EQ(reInitDecoder(), (status_t)OK); - setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, - mStride * mHeight, timeStampIx); + setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx); - status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, - (void *)&s_dec_op); + ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op); + return; } - if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) { - /* If the input did not contain picture data, then ignore - * the associated timestamp */ - mTimeStampsValid[timeStampIx] = false; + + // If the decoder is in the changing resolution mode and there is no output present, + // that means the switching is done and it's ready to reset the decoder and the plugin. + if (mChangingResolution && !s_dec_op.u4_output_present) { + mChangingResolution = false; + resetDecoder(); + resetPlugin(); + continue; + } + + if (unsupportedDimensions || resChanged) { + mChangingResolution = true; + if (mFlushNeeded) { + setFlushMode(); + } + + if (unsupportedDimensions) { + mNewWidth = s_dec_op.u4_pic_wd; + mNewHeight = s_dec_op.u4_pic_ht; + mInitNeeded = true; + } + continue; } - /* If valid height and width are decoded, - * then look at change in resolution */ if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) { uint32_t width = s_dec_op.u4_pic_wd; uint32_t height = s_dec_op.u4_pic_ht; + bool portWillReset = false; + handlePortSettingsChange(&portWillReset, width, height); - if ((width != mWidth) || (height != mHeight)) { - mWidth = width; - mHeight = height; - mStride = mWidth; - - updatePortDefinitions(); - - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; + if (portWillReset) { + resetDecoder(); return; } } if (s_dec_op.u4_output_present) { - outHeader->nFilledLen = (mStride * mHeight * 3) / 2; + outHeader->nFilledLen = (mWidth * mHeight * 3) / 2; outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts]; mTimeStampsValid[s_dec_op.u4_ts] = false; @@ -711,7 +756,7 @@ void SoftHEVC::onQueueFilled(OMX_U32 portIndex) { } } -} // namespace android +} // namespace android android::SoftOMXComponent *createSoftOMXComponent(const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData, diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h index 233db0c..a91f528 100644 --- a/media/libstagefright/codecs/hevcdec/SoftHEVC.h +++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h @@ -62,6 +62,7 @@ protected: virtual void onQueueFilled(OMX_U32 portIndex); virtual void onPortFlushCompleted(OMX_U32 portIndex); virtual void onReset(); + virtual OMX_ERRORTYPE internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params); private: // Number of input and output buffers enum { @@ -72,12 +73,6 @@ private: iv_mem_rec_t *mMemRecords; // Memory records requested by the codec size_t mNumMemRecords; // Number of memory records requested by the codec - uint32_t mNewWidth; // New width after change in resolution - uint32_t mNewHeight; // New height after change in resolution - uint32_t mInitWidth; // Width used during codec creation - uint32_t mInitHeight; // Height used during codec creation - size_t mStride; // Stride to be used for display buffers - size_t mNumCores; // Number of cores to be uesd by the codec struct timeval mTimeStart; // Time at the start of decode() @@ -98,7 +93,13 @@ private: bool mIsInFlush; // codec is flush mode bool mReceivedEOS; // EOS is receieved on input port - bool mIsAdapting; // plugin in middle of change in resolution + bool mInitNeeded; + uint32_t mNewWidth; + uint32_t mNewHeight; + // The input stream has changed to a different resolution, which is still supported by the + // codec. So the codec is switching to decode the new resolution. + bool mChangingResolution; + bool mFlushNeeded; status_t initDecoder(); status_t deInitDecoder(); @@ -114,7 +115,6 @@ private: ivd_video_decode_op_t *ps_dec_op, OMX_BUFFERHEADERTYPE *inHeader, OMX_BUFFERHEADERTYPE *outHeader, - size_t sizeY, size_t timeStampIx); DISALLOW_EVIL_CONSTRUCTORS (SoftHEVC); diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp index 0d1ab71..1f4b6fd 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp +++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp @@ -134,6 +134,12 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { } uint8_t *bitstream = inHeader->pBuffer + inHeader->nOffset; + uint32_t *start_code = (uint32_t *)bitstream; + bool volHeader = *start_code == 0xB0010000; + if (volHeader) { + PVCleanUpVideoDecoder(mHandle); + mInitialized = false; + } if (!mInitialized) { uint8_t *vol_data[1]; @@ -141,7 +147,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { vol_data[0] = NULL; - if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) { + if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) || volHeader) { vol_data[0] = bitstream; vol_size = inHeader->nFilledLen; } @@ -150,7 +156,8 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { (mMode == MODE_MPEG4) ? MPEG4_MODE : H263_MODE; Bool success = PVInitVideoDecoder( - mHandle, vol_data, &vol_size, 1, mWidth, mHeight, mode); + mHandle, vol_data, &vol_size, 1, + outputBufferWidth(), outputBufferHeight(), mode); if (!success) { ALOGW("PVInitVideoDecoder failed. Unsupported content?"); @@ -169,21 +176,26 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { PVSetPostProcType((VideoDecControls *) mHandle, 0); + bool hasFrameData = false; if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) { inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; + } else if (volHeader) { + hasFrameData = true; } mInitialized = true; - if (mode == MPEG4_MODE && portSettingsChanged()) { + if (mode == MPEG4_MODE && handlePortSettingsChange()) { return; } - continue; + if (!hasFrameData) { + continue; + } } if (!mFramesConfigured) { @@ -223,7 +235,9 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { return; } - if (portSettingsChanged()) { + // H263 doesn't have VOL header, the frame size information is in short header, i.e. the + // decoder may detect size change after PVDecodeVideoFrame. + if (handlePortSettingsChange()) { return; } @@ -269,7 +283,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { } } -bool SoftMPEG4::portSettingsChanged() { +bool SoftMPEG4::handlePortSettingsChange() { uint32_t disp_width, disp_height; PVGetVideoDimensions(mHandle, (int32 *)&disp_width, (int32 *)&disp_height); @@ -282,25 +296,24 @@ bool SoftMPEG4::portSettingsChanged() { ALOGV("disp_width = %d, disp_height = %d, buf_width = %d, buf_height = %d", disp_width, disp_height, buf_width, buf_height); - if (mCropWidth != disp_width - || mCropHeight != disp_height) { - mCropLeft = 0; - mCropTop = 0; - mCropWidth = disp_width; - mCropHeight = disp_height; - - notify(OMX_EventPortSettingsChanged, - 1, - OMX_IndexConfigCommonOutputCrop, - NULL); - } + CropSettingsMode cropSettingsMode = kCropUnSet; + if (disp_width != buf_width || disp_height != buf_height) { + cropSettingsMode = kCropSet; - if (buf_width != mWidth || buf_height != mHeight) { - mWidth = buf_width; - mHeight = buf_height; - - updatePortDefinitions(); + if (mCropWidth != disp_width || mCropHeight != disp_height) { + mCropLeft = 0; + mCropTop = 0; + mCropWidth = disp_width; + mCropHeight = disp_height; + cropSettingsMode = kCropChanged; + } + } + bool portWillReset = false; + const bool fakeStride = true; + SoftVideoDecoderOMXComponent::handlePortSettingsChange( + &portWillReset, buf_width, buf_height, cropSettingsMode, fakeStride); + if (portWillReset) { if (mMode == MODE_H263) { PVCleanUpVideoDecoder(mHandle); @@ -309,7 +322,7 @@ bool SoftMPEG4::portSettingsChanged() { vol_data[0] = NULL; if (!PVInitVideoDecoder( - mHandle, vol_data, &vol_size, 1, mWidth, mHeight, + mHandle, vol_data, &vol_size, 1, outputBufferWidth(), outputBufferHeight(), H263_MODE)) { notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); mSignalledError = true; @@ -318,13 +331,9 @@ bool SoftMPEG4::portSettingsChanged() { } mFramesConfigured = false; - - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; - return true; } - return false; + return portWillReset; } void SoftMPEG4::onPortFlushCompleted(OMX_U32 portIndex) { diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h index de14aaf..8a06a00 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h +++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h @@ -67,7 +67,7 @@ private: status_t initDecoder(); virtual void updatePortDefinitions(); - bool portSettingsChanged(); + bool handlePortSettingsChange(); DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4); }; diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp index b3c350f..b03ec8c 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp +++ b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp @@ -1426,7 +1426,7 @@ PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop) video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1); /* otherwise calculate above */ } size = (int32)video->width * video->height; - if (video->currVop->predictionType == P_VOP && size > video->videoDecControls->size) + if (currVop->predictionType == P_VOP && size > video->videoDecControls->size) { status = PV_FAIL; goto return_point; diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp index 2f63bdd..828577a 100644 --- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp +++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp @@ -137,29 +137,10 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) { uint32_t width = mImg->d_w; uint32_t height = mImg->d_h; - - if (width != mWidth || height != mHeight) { - mWidth = width; - mHeight = height; - - if (!mIsAdaptive || width > mAdaptiveMaxWidth || height > mAdaptiveMaxHeight) { - if (mIsAdaptive) { - if (width > mAdaptiveMaxWidth) { - mAdaptiveMaxWidth = width; - } - if (height > mAdaptiveMaxHeight) { - mAdaptiveMaxHeight = height; - } - } - updatePortDefinitions(); - notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; - return; - } else { - updatePortDefinitions(); - notify(OMX_EventPortSettingsChanged, kOutputPortIndex, - OMX_IndexConfigCommonOutputCrop, NULL); - } + bool portWillReset = false; + handlePortSettingsChange(&portWillReset, width, height); + if (portWillReset) { + return; } outHeader->nOffset = 0; @@ -167,36 +148,14 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) { outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0; outHeader->nTimeStamp = inHeader->nTimeStamp; - uint32_t buffer_stride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth; - uint32_t buffer_height = mIsAdaptive ? mAdaptiveMaxHeight : mHeight; - - const uint8_t *srcLine = (const uint8_t *)mImg->planes[PLANE_Y]; uint8_t *dst = outHeader->pBuffer; - for (size_t i = 0; i < buffer_height; ++i) { - if (i < mImg->d_h) { - memcpy(dst, srcLine, mImg->d_w); - srcLine += mImg->stride[PLANE_Y]; - } - dst += buffer_stride; - } - - srcLine = (const uint8_t *)mImg->planes[PLANE_U]; - for (size_t i = 0; i < buffer_height / 2; ++i) { - if (i < mImg->d_h / 2) { - memcpy(dst, srcLine, mImg->d_w / 2); - srcLine += mImg->stride[PLANE_U]; - } - dst += buffer_stride / 2; - } - - srcLine = (const uint8_t *)mImg->planes[PLANE_V]; - for (size_t i = 0; i < buffer_height / 2; ++i) { - if (i < mImg->d_h / 2) { - memcpy(dst, srcLine, mImg->d_w / 2); - srcLine += mImg->stride[PLANE_V]; - } - dst += buffer_stride / 2; - } + const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y]; + const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U]; + const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V]; + size_t srcYStride = mImg->stride[PLANE_Y]; + size_t srcUStride = mImg->stride[PLANE_U]; + size_t srcVStride = mImg->stride[PLANE_V]; + copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride); mImg = NULL; outInfo->mOwnedByUs = false; diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp index a7bde97..168208f 100644 --- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp +++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp @@ -58,7 +58,6 @@ SoftAVC::SoftAVC( 320 /* width */, 240 /* height */, callbacks, appData, component), mHandle(NULL), mInputBufferCount(0), - mPictureSize(mWidth * mHeight * 3 / 2), mFirstPicture(NULL), mFirstPictureId(-1), mPicId(0), @@ -118,7 +117,7 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { } H264SwDecRet ret = H264SWDEC_PIC_RDY; - bool portSettingsChanged = false; + bool portWillReset = false; while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty()) && outQueue.size() == kNumOutputBuffers) { @@ -161,17 +160,14 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { H264SwDecInfo decoderInfo; CHECK(H264SwDecGetInfo(mHandle, &decoderInfo) == H264SWDEC_OK); - if (handlePortSettingChangeEvent(&decoderInfo)) { - portSettingsChanged = true; - } - - if (decoderInfo.croppingFlag && - handleCropRectEvent(&decoderInfo.cropParams)) { - portSettingsChanged = true; - } + SoftVideoDecoderOMXComponent::CropSettingsMode cropSettingsMode = + handleCropParams(decoderInfo); + handlePortSettingsChange( + &portWillReset, decoderInfo.picWidth, decoderInfo.picHeight, + cropSettingsMode); } } else { - if (portSettingsChanged) { + if (portWillReset) { if (H264SwDecNextPicture(mHandle, &decodedPicture, 0) == H264SWDEC_PIC_RDY) { @@ -199,8 +195,7 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { inInfo->mOwnedByUs = false; notifyEmptyBufferDone(inHeader); - if (portSettingsChanged) { - portSettingsChanged = false; + if (portWillReset) { return; } @@ -215,44 +210,34 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { } } -bool SoftAVC::handlePortSettingChangeEvent(const H264SwDecInfo *info) { - if (mWidth != info->picWidth || mHeight != info->picHeight) { - mWidth = info->picWidth; - mHeight = info->picHeight; - mPictureSize = mWidth * mHeight * 3 / 2; - updatePortDefinitions(); - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; - return true; +SoftVideoDecoderOMXComponent::CropSettingsMode SoftAVC::handleCropParams( + const H264SwDecInfo& decInfo) { + if (!decInfo.croppingFlag) { + return kCropUnSet; } - return false; -} - -bool SoftAVC::handleCropRectEvent(const CropParams *crop) { - if (mCropLeft != crop->cropLeftOffset || - mCropTop != crop->cropTopOffset || - mCropWidth != crop->cropOutWidth || - mCropHeight != crop->cropOutHeight) { - mCropLeft = crop->cropLeftOffset; - mCropTop = crop->cropTopOffset; - mCropWidth = crop->cropOutWidth; - mCropHeight = crop->cropOutHeight; - - notify(OMX_EventPortSettingsChanged, 1, - OMX_IndexConfigCommonOutputCrop, NULL); - - return true; + const CropParams& crop = decInfo.cropParams; + if (mCropLeft == crop.cropLeftOffset && + mCropTop == crop.cropTopOffset && + mCropWidth == crop.cropOutWidth && + mCropHeight == crop.cropOutHeight) { + return kCropSet; } - return false; + + mCropLeft = crop.cropLeftOffset; + mCropTop = crop.cropTopOffset; + mCropWidth = crop.cropOutWidth; + mCropHeight = crop.cropOutHeight; + return kCropChanged; } void SoftAVC::saveFirstOutputBuffer(int32_t picId, uint8_t *data) { CHECK(mFirstPicture == NULL); mFirstPictureId = picId; - mFirstPicture = new uint8_t[mPictureSize]; - memcpy(mFirstPicture, data, mPictureSize); + uint32_t pictureSize = mWidth * mHeight * 3 / 2; + mFirstPicture = new uint8_t[pictureSize]; + memcpy(mFirstPicture, data, pictureSize); } void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) { @@ -263,9 +248,17 @@ void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) { OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId); outHeader->nTimeStamp = header->nTimeStamp; outHeader->nFlags = header->nFlags; - outHeader->nFilledLen = mPictureSize; - memcpy(outHeader->pBuffer + outHeader->nOffset, - data, mPictureSize); + outHeader->nFilledLen = mWidth * mHeight * 3 / 2; + + uint8_t *dst = outHeader->pBuffer + outHeader->nOffset; + const uint8_t *srcY = data; + const uint8_t *srcU = srcY + mWidth * mHeight; + const uint8_t *srcV = srcU + mWidth * mHeight / 4; + size_t srcYStride = mWidth; + size_t srcUStride = mWidth / 2; + size_t srcVStride = srcUStride; + copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride); + mPicToHeaderMap.removeItem(picId); delete header; outInfo->mOwnedByUs = false; diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h index ee69926..069107d 100644 --- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h +++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h @@ -55,8 +55,6 @@ private: size_t mInputBufferCount; - uint32_t mPictureSize; - uint8_t *mFirstPicture; int32_t mFirstPictureId; @@ -75,8 +73,7 @@ private: void drainAllOutputBuffers(bool eos); void drainOneOutputBuffer(int32_t picId, uint8_t *data); void saveFirstOutputBuffer(int32_t pidId, uint8_t *data); - bool handleCropRectEvent(const CropParams* crop); - bool handlePortSettingChangeEvent(const H264SwDecInfo *info); + CropSettingsMode handleCropParams(const H264SwDecInfo& decInfo); DISALLOW_EVIL_CONSTRUCTORS(SoftAVC); }; diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp index cc98da0..1899b40 100644 --- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp +++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp @@ -65,8 +65,8 @@ void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) { CHECK(format->findInt32("color-format", &colorFormatNew)); int32_t widthNew, heightNew; - CHECK(format->findInt32("width", &widthNew)); - CHECK(format->findInt32("height", &heightNew)); + CHECK(format->findInt32("stride", &widthNew)); + CHECK(format->findInt32("slice-height", &heightNew)); int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew; if (!format->findRect( diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml index c97be28..1cbef39 100644 --- a/media/libstagefright/data/media_codecs_google_video.xml +++ b/media/libstagefright/data/media_codecs_google_video.xml @@ -73,22 +73,22 @@ <Encoders> <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp"> <!-- profiles and levels: ProfileBaseline : Level45 --> - <Limit name="size" min="2x2" max="176x144" /> - <Limit name="alignment" value="2x2" /> + <Limit name="size" min="16x16" max="176x144" /> + <Limit name="alignment" value="16x16" /> <Limit name="bitrate" range="1-128000" /> </MediaCodec> <MediaCodec name="OMX.google.h264.encoder" type="video/avc"> <!-- profiles and levels: ProfileBaseline : Level2 --> - <Limit name="size" min="2x2" max="896x896" /> - <Limit name="alignment" value="2x2" /> + <Limit name="size" min="16x16" max="896x896" /> + <Limit name="alignment" value="16x16" /> <Limit name="block-size" value="16x16" /> <Limit name="blocks-per-second" range="1-11880" /> <Limit name="bitrate" range="1-2000000" /> </MediaCodec> <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es"> <!-- profiles and levels: ProfileCore : Level2 --> - <Limit name="size" min="2x2" max="176x144" /> - <Limit name="alignment" value="2x2" /> + <Limit name="size" min="16x16" max="176x144" /> + <Limit name="alignment" value="16x16" /> <Limit name="block-size" value="16x16" /> <Limit name="blocks-per-second" range="12-1485" /> <Limit name="bitrate" range="1-64000" /> diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp index c93c7e8..b214870 100644 --- a/media/libstagefright/foundation/ABuffer.cpp +++ b/media/libstagefright/foundation/ABuffer.cpp @@ -19,11 +19,13 @@ #include "ADebug.h" #include "ALooper.h" #include "AMessage.h" +#include "MediaBufferBase.h" namespace android { ABuffer::ABuffer(size_t capacity) - : mData(malloc(capacity)), + : mMediaBufferBase(NULL), + mData(malloc(capacity)), mCapacity(capacity), mRangeOffset(0), mRangeLength(capacity), @@ -32,7 +34,8 @@ ABuffer::ABuffer(size_t capacity) } ABuffer::ABuffer(void *data, size_t capacity) - : mData(data), + : mMediaBufferBase(NULL), + mData(data), mCapacity(capacity), mRangeOffset(0), mRangeLength(capacity), @@ -59,6 +62,8 @@ ABuffer::~ABuffer() { if (mFarewell != NULL) { mFarewell->post(); } + + setMediaBufferBase(NULL); } void ABuffer::setRange(size_t offset, size_t size) { @@ -80,5 +85,19 @@ sp<AMessage> ABuffer::meta() { return mMeta; } +MediaBufferBase *ABuffer::getMediaBufferBase() { + if (mMediaBufferBase != NULL) { + mMediaBufferBase->add_ref(); + } + return mMediaBufferBase; +} + +void ABuffer::setMediaBufferBase(MediaBufferBase *mediaBuffer) { + if (mMediaBufferBase != NULL) { + mMediaBufferBase->release(); + } + mMediaBufferBase = mediaBuffer; +} + } // namespace android diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp index d268aa4..795e8a6 100644 --- a/media/libstagefright/foundation/AMessage.cpp +++ b/media/libstagefright/foundation/AMessage.cpp @@ -14,6 +14,11 @@ * limitations under the License. */ +#define LOG_TAG "AMessage" +//#define LOG_NDEBUG 0 +//#define DUMP_STATS +#include <cutils/log.h> + #include "AMessage.h" #include <ctype.h> @@ -60,12 +65,14 @@ ALooper::handler_id AMessage::target() const { void AMessage::clear() { for (size_t i = 0; i < mNumItems; ++i) { Item *item = &mItems[i]; - freeItem(item); + delete[] item->mName; + item->mName = NULL; + freeItemValue(item); } mNumItems = 0; } -void AMessage::freeItem(Item *item) { +void AMessage::freeItemValue(Item *item) { switch (item->mType) { case kTypeString: { @@ -88,25 +95,85 @@ void AMessage::freeItem(Item *item) { } } -AMessage::Item *AMessage::allocateItem(const char *name) { - name = AAtomizer::Atomize(name); +#ifdef DUMP_STATS +#include <utils/Mutex.h> + +Mutex gLock; +static int32_t gFindItemCalls = 1; +static int32_t gDupCalls = 1; +static int32_t gAverageNumItems = 0; +static int32_t gAverageNumChecks = 0; +static int32_t gAverageNumMemChecks = 0; +static int32_t gAverageDupItems = 0; +static int32_t gLastChecked = -1; + +static void reportStats() { + int32_t time = (ALooper::GetNowUs() / 1000); + if (time / 1000 != gLastChecked / 1000) { + gLastChecked = time; + ALOGI("called findItemIx %zu times (for len=%.1f i=%.1f/%.1f mem) dup %zu times (for len=%.1f)", + gFindItemCalls, + gAverageNumItems / (float)gFindItemCalls, + gAverageNumChecks / (float)gFindItemCalls, + gAverageNumMemChecks / (float)gFindItemCalls, + gDupCalls, + gAverageDupItems / (float)gDupCalls); + gFindItemCalls = gDupCalls = 1; + gAverageNumItems = gAverageNumChecks = gAverageNumMemChecks = gAverageDupItems = 0; + gLastChecked = time; + } +} +#endif +inline size_t AMessage::findItemIndex(const char *name, size_t len) const { +#ifdef DUMP_STATS + size_t memchecks = 0; +#endif size_t i = 0; - while (i < mNumItems && mItems[i].mName != name) { - ++i; + for (; i < mNumItems; i++) { + if (len != mItems[i].mNameLength) { + continue; + } +#ifdef DUMP_STATS + ++memchecks; +#endif + if (!memcmp(mItems[i].mName, name, len)) { + break; + } + } +#ifdef DUMP_STATS + { + Mutex::Autolock _l(gLock); + ++gFindItemCalls; + gAverageNumItems += mNumItems; + gAverageNumMemChecks += memchecks; + gAverageNumChecks += i; + reportStats(); } +#endif + return i; +} + +// assumes item's name was uninitialized or NULL +void AMessage::Item::setName(const char *name, size_t len) { + mNameLength = len; + mName = new char[len + 1]; + memcpy((void*)mName, name, len + 1); +} +AMessage::Item *AMessage::allocateItem(const char *name) { + size_t len = strlen(name); + size_t i = findItemIndex(name, len); Item *item; if (i < mNumItems) { item = &mItems[i]; - freeItem(item); + freeItemValue(item); } else { CHECK(mNumItems < kMaxNumItems); i = mNumItems++; item = &mItems[i]; - - item->mName = name; + item->setName(name, len); } return item; @@ -114,31 +181,18 @@ AMessage::Item *AMessage::allocateItem(const char *name) { const AMessage::Item *AMessage::findItem( const char *name, Type type) const { - name = AAtomizer::Atomize(name); - - for (size_t i = 0; i < mNumItems; ++i) { + size_t i = findItemIndex(name, strlen(name)); + if (i < mNumItems) { const Item *item = &mItems[i]; + return item->mType == type ? item : NULL; - if (item->mName == name) { - return item->mType == type ? item : NULL; - } } - return NULL; } bool AMessage::contains(const char *name) const { - name = AAtomizer::Atomize(name); - - for (size_t i = 0; i < mNumItems; ++i) { - const Item *item = &mItems[i]; - - if (item->mName == name) { - return true; - } - } - - return false; + size_t i = findItemIndex(name, strlen(name)); + return i < mNumItems; } #define BASIC_TYPE(NAME,FIELDNAME,TYPENAME) \ @@ -297,11 +351,20 @@ sp<AMessage> AMessage::dup() const { sp<AMessage> msg = new AMessage(mWhat, mTarget); msg->mNumItems = mNumItems; +#ifdef DUMP_STATS + { + Mutex::Autolock _l(gLock); + ++gDupCalls; + gAverageDupItems += mNumItems; + reportStats(); + } +#endif + for (size_t i = 0; i < mNumItems; ++i) { const Item *from = &mItems[i]; Item *to = &msg->mItems[i]; - to->mName = from->mName; + to->setName(from->mName, from->mNameLength); to->mType = from->mType; switch (from->mType) { @@ -422,7 +485,7 @@ AString AMessage::debugString(int32_t indent) const { { sp<ABuffer> buffer = static_cast<ABuffer *>(item.u.refValue); - if (buffer != NULL && buffer->size() <= 64) { + if (buffer != NULL && buffer->data() != NULL && buffer->size() <= 64) { tmp = StringPrintf("Buffer %s = {\n", item.mName); hexdump(buffer->data(), buffer->size(), indent + 4, &tmp); appendIndent(&tmp, indent + 2); @@ -472,11 +535,11 @@ sp<AMessage> AMessage::FromParcel(const Parcel &parcel) { sp<AMessage> msg = new AMessage(what); msg->mNumItems = static_cast<size_t>(parcel.readInt32()); - for (size_t i = 0; i < msg->mNumItems; ++i) { Item *item = &msg->mItems[i]; - item->mName = AAtomizer::Atomize(parcel.readCString()); + const char *name = parcel.readCString(); + item->setName(name, strlen(name)); item->mType = static_cast<Type>(parcel.readInt32()); switch (item->mType) { diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp index 7b18348..fba6b09 100644 --- a/media/libstagefright/httplive/LiveSession.cpp +++ b/media/libstagefright/httplive/LiveSession.cpp @@ -63,6 +63,7 @@ LiveSession::LiveSession( mSwapMask(0), mCheckBandwidthGeneration(0), mSwitchGeneration(0), + mSubtitleGeneration(0), mLastDequeuedTimeUs(0ll), mRealTimeBaseUs(0ll), mReconfigurationInProgress(false), @@ -81,6 +82,7 @@ LiveSession::LiveSession( mDiscontinuities.add(indexToType(i), new AnotherPacketSource(NULL /* meta */)); mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */)); mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */)); + mBuffering[i] = false; } } @@ -133,15 +135,40 @@ status_t LiveSession::dequeueAccessUnit( sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream); + ssize_t idx = typeToIndex(stream); if (!packetSource->hasBufferAvailable(&finalResult)) { - return finalResult == OK ? -EAGAIN : finalResult; + if (finalResult == OK) { + mBuffering[idx] = true; + return -EAGAIN; + } else { + return finalResult; + } + } + + if (mBuffering[idx]) { + if (mSwitchInProgress + || packetSource->isFinished(0) + || packetSource->getEstimatedDurationUs() > 10000000ll) { + mBuffering[idx] = false; + } + } + + if (mBuffering[idx]) { + return -EAGAIN; } // wait for counterpart sp<AnotherPacketSource> otherSource; - if (stream == STREAMTYPE_AUDIO && (mStreamMask & STREAMTYPE_VIDEO)) { + uint32_t mask = mNewStreamMask & mStreamMask; + uint32_t fetchersMask = 0; + for (size_t i = 0; i < mFetcherInfos.size(); ++i) { + uint32_t fetcherMask = mFetcherInfos.valueAt(i).mFetcher->getStreamTypeMask(); + fetchersMask |= fetcherMask; + } + mask &= fetchersMask; + if (stream == STREAMTYPE_AUDIO && (mask & STREAMTYPE_VIDEO)) { otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO); - } else if (stream == STREAMTYPE_VIDEO && (mStreamMask & STREAMTYPE_AUDIO)) { + } else if (stream == STREAMTYPE_VIDEO && (mask & STREAMTYPE_AUDIO)) { otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO); } if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) { @@ -263,6 +290,11 @@ status_t LiveSession::dequeueAccessUnit( mLastDequeuedTimeUs = timeUs; mRealTimeBaseUs = ALooper::GetNowUs() - timeUs; } else if (stream == STREAMTYPE_SUBTITLES) { + int32_t subtitleGeneration; + if ((*accessUnit)->meta()->findInt32("subtitleGeneration", &subtitleGeneration) + && subtitleGeneration != mSubtitleGeneration) { + return -EAGAIN; + }; (*accessUnit)->meta()->setInt32( "trackIndex", mPlaylist->getSelectedIndex()); (*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs); @@ -321,10 +353,6 @@ status_t LiveSession::seekTo(int64_t timeUs) { sp<AMessage> response; status_t err = msg->postAndAwaitResponse(&response); - uint32_t replyID; - CHECK(response == mSeekReply && 0 != mSeekReplyID); - mSeekReply.clear(); - mSeekReplyID = 0; return err; } @@ -350,12 +378,16 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { case kWhatSeek: { - CHECK(msg->senderAwaitsResponse(&mSeekReplyID)); + uint32_t seekReplyID; + CHECK(msg->senderAwaitsResponse(&seekReplyID)); + mSeekReplyID = seekReplyID; + mSeekReply = new AMessage; status_t err = onSeek(msg); - mSeekReply = new AMessage; - mSeekReply->setInt32("err", err); + if (err != OK) { + msg->post(50000); + } break; } @@ -390,7 +422,10 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { if (mSeekReplyID != 0) { CHECK(mSeekReply != NULL); + mSeekReply->setInt32("err", OK); mSeekReply->postReply(mSeekReplyID); + mSeekReplyID = 0; + mSeekReply.clear(); } } } @@ -417,6 +452,23 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { ALOGE("XXX Received error %d from PlaylistFetcher.", err); + // handle EOS on subtitle tracks independently + AString uri; + if (err == ERROR_END_OF_STREAM && msg->findString("uri", &uri)) { + ssize_t i = mFetcherInfos.indexOfKey(uri); + if (i >= 0) { + const sp<PlaylistFetcher> &fetcher = mFetcherInfos.valueAt(i).mFetcher; + if (fetcher != NULL) { + uint32_t type = fetcher->getStreamTypeMask(); + if (type == STREAMTYPE_SUBTITLES) { + mPacketSources.valueFor( + STREAMTYPE_SUBTITLES)->signalEOS(err);; + break; + } + } + } + } + if (mInPreparationPhase) { postPrepared(err); } @@ -442,6 +494,10 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { AString uri; CHECK(msg->findString("uri", &uri)); + if (mFetcherInfos.indexOfKey(uri) < 0) { + ALOGE("couldn't find uri"); + break; + } FetcherInfo *info = &mFetcherInfos.editValueFor(uri); info->mIsPrepared = true; @@ -498,7 +554,7 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { break; } - onCheckBandwidth(); + onCheckBandwidth(msg); break; } @@ -531,6 +587,19 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { onSwapped(msg); break; } + + case kWhatCheckSwitchDown: + { + onCheckSwitchDown(); + break; + } + + case kWhatSwitchDown: + { + onSwitchDown(); + break; + } + default: TRESPASS(); break; @@ -554,6 +623,21 @@ LiveSession::StreamType LiveSession::indexToType(int idx) { return (StreamType)(1 << idx); } +// static +ssize_t LiveSession::typeToIndex(int32_t type) { + switch (type) { + case STREAMTYPE_AUDIO: + return 0; + case STREAMTYPE_VIDEO: + return 1; + case STREAMTYPE_SUBTITLES: + return 2; + default: + return -1; + }; + return -1; +} + void LiveSession::onConnect(const sp<AMessage> &msg) { AString url; CHECK(msg->findString("url", &url)); @@ -643,6 +727,9 @@ void LiveSession::finishDisconnect() { // (finishDisconnect, onFinishDisconnect2) cancelBandwidthSwitch(); + // cancel switch down monitor + mSwitchDownMonitor.clear(); + for (size_t i = 0; i < mFetcherInfos.size(); ++i) { mFetcherInfos.valueAt(i).mFetcher->stopAsync(); } @@ -685,7 +772,7 @@ sp<PlaylistFetcher> LiveSession::addFetcher(const char *uri) { notify->setInt32("switchGeneration", mSwitchGeneration); FetcherInfo info; - info.mFetcher = new PlaylistFetcher(notify, this, uri); + info.mFetcher = new PlaylistFetcher(notify, this, uri, mSubtitleGeneration); info.mDurationUs = -1ll; info.mIsPrepared = false; info.mToBeRemoved = false; @@ -919,14 +1006,22 @@ size_t LiveSession::getBandwidthIndex() { } } - // Consider only 80% of the available bandwidth usable. - bandwidthBps = (bandwidthBps * 8) / 10; - // Pick the highest bandwidth stream below or equal to estimated bandwidth. index = mBandwidthItems.size() - 1; - while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth - > (size_t)bandwidthBps) { + while (index > 0) { + // consider only 80% of the available bandwidth, but if we are switching up, + // be even more conservative (70%) to avoid overestimating and immediately + // switching back. + size_t adjustedBandwidthBps = bandwidthBps; + if (index > mCurBandwidthIndex) { + adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10; + } else { + adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10; + } + if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) { + break; + } --index; } } @@ -983,15 +1078,34 @@ size_t LiveSession::getBandwidthIndex() { return index; } +int64_t LiveSession::latestMediaSegmentStartTimeUs() { + sp<AMessage> audioMeta = mPacketSources.valueFor(STREAMTYPE_AUDIO)->getLatestDequeuedMeta(); + int64_t minSegmentStartTimeUs = -1, videoSegmentStartTimeUs = -1; + if (audioMeta != NULL) { + audioMeta->findInt64("segmentStartTimeUs", &minSegmentStartTimeUs); + } + + sp<AMessage> videoMeta = mPacketSources.valueFor(STREAMTYPE_VIDEO)->getLatestDequeuedMeta(); + if (videoMeta != NULL + && videoMeta->findInt64("segmentStartTimeUs", &videoSegmentStartTimeUs)) { + if (minSegmentStartTimeUs < 0 || videoSegmentStartTimeUs < minSegmentStartTimeUs) { + minSegmentStartTimeUs = videoSegmentStartTimeUs; + } + + } + return minSegmentStartTimeUs; +} + status_t LiveSession::onSeek(const sp<AMessage> &msg) { int64_t timeUs; CHECK(msg->findInt64("timeUs", &timeUs)); if (!mReconfigurationInProgress) { - changeConfiguration(timeUs, getBandwidthIndex()); + changeConfiguration(timeUs, mCurBandwidthIndex); + return OK; + } else { + return -EWOULDBLOCK; } - - return OK; } status_t LiveSession::getDuration(int64_t *durationUs) const { @@ -1035,6 +1149,11 @@ sp<AMessage> LiveSession::getTrackInfo(size_t trackIndex) const { } status_t LiveSession::selectTrack(size_t index, bool select) { + if (mPlaylist == NULL) { + return INVALID_OPERATION; + } + + ++mSubtitleGeneration; status_t err = mPlaylist->selectTrack(index, select); if (err == OK) { sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, id()); @@ -1143,7 +1262,10 @@ void LiveSession::changeConfiguration( if (mSeekReplyID != 0) { CHECK(mSeekReply != NULL); + mSeekReply->setInt32("err", OK); mSeekReply->postReply(mSeekReplyID); + mSeekReplyID = 0; + mSeekReply.clear(); } } } @@ -1228,12 +1350,6 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask)); CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask)); - for (size_t i = 0; i < kMaxStreams; ++i) { - if (streamMask & indexToType(i)) { - CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mUri)); - } - } - int64_t timeUs; int32_t pickTrack; bool switching = false; @@ -1249,7 +1365,20 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { mRealTimeBaseUs = ALooper::GetNowUs() - timeUs; } + for (size_t i = 0; i < kMaxStreams; ++i) { + if (streamMask & indexToType(i)) { + if (switching) { + CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mNewUri)); + } else { + CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mUri)); + } + } + } + mNewStreamMask = streamMask | resumeMask; + if (switching) { + mSwapMask = mStreamMask & ~resumeMask; + } // Of all existing fetchers: // * Resume fetchers that are still needed and assign them original packet sources. @@ -1299,7 +1428,7 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { } AString uri; - uri = mStreams[i].mUri; + uri = switching ? mStreams[i].mNewUri : mStreams[i].mUri; sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str()); CHECK(fetcher != NULL); @@ -1310,9 +1439,14 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { int32_t discontinuitySeq = -1; sp<AnotherPacketSource> sources[kMaxStreams]; + if (i == kSubtitleIndex) { + segmentStartTimeUs = latestMediaSegmentStartTimeUs(); + } + // TRICKY: looping from i as earlier streams are already removed from streamMask for (size_t j = i; j < kMaxStreams; ++j) { - if ((streamMask & indexToType(j)) && uri == mStreams[j].mUri) { + const AString &streamUri = switching ? mStreams[j].mNewUri : mStreams[j].mUri; + if ((streamMask & indexToType(j)) && uri == streamUri) { sources[j] = mPacketSources.valueFor(indexToType(j)); if (timeUs >= 0) { @@ -1394,13 +1528,13 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { // All fetchers have now been started, the configuration change // has completed. + cancelCheckBandwidthEvent(); scheduleCheckBandwidthEvent(); ALOGV("XXX configuration change completed."); mReconfigurationInProgress = false; if (switching) { mSwitchInProgress = true; - mSwapMask = streamMask; } else { mStreamMask = mNewStreamMask; } @@ -1419,6 +1553,15 @@ void LiveSession::onSwapped(const sp<AMessage> &msg) { int32_t stream; CHECK(msg->findInt32("stream", &stream)); + + ssize_t idx = typeToIndex(stream); + CHECK(idx >= 0); + if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) { + ALOGW("swapping stream type %d %s to empty stream", stream, mStreams[idx].mUri.c_str()); + } + mStreams[idx].mUri = mStreams[idx].mNewUri; + mStreams[idx].mNewUri.clear(); + mSwapMask &= ~stream; if (mSwapMask != 0) { return; @@ -1430,11 +1573,58 @@ void LiveSession::onSwapped(const sp<AMessage> &msg) { StreamType extraStream = (StreamType) (extraStreams & ~(extraStreams - 1)); swapPacketSource(extraStream); extraStreams &= ~extraStream; + + idx = typeToIndex(extraStream); + CHECK(idx >= 0); + if (mStreams[idx].mNewUri.empty()) { + ALOGW("swapping extra stream type %d %s to empty stream", + extraStream, mStreams[idx].mUri.c_str()); + } + mStreams[idx].mUri = mStreams[idx].mNewUri; + mStreams[idx].mNewUri.clear(); } tryToFinishBandwidthSwitch(); } +void LiveSession::onCheckSwitchDown() { + if (mSwitchDownMonitor == NULL) { + return; + } + + for (size_t i = 0; i < kMaxStreams; ++i) { + int32_t targetDuration; + sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(indexToType(i)); + sp<AMessage> meta = packetSource->getLatestDequeuedMeta(); + + if (meta != NULL && meta->findInt32("targetDuration", &targetDuration) ) { + int64_t bufferedDurationUs = packetSource->getEstimatedDurationUs(); + int64_t targetDurationUs = targetDuration * 1000000ll; + + if (bufferedDurationUs < targetDurationUs / 3) { + (new AMessage(kWhatSwitchDown, id()))->post(); + break; + } + } + } + + mSwitchDownMonitor->post(1000000ll); +} + +void LiveSession::onSwitchDown() { + if (mReconfigurationInProgress || mSwitchInProgress || mCurBandwidthIndex == 0) { + return; + } + + ssize_t bandwidthIndex = getBandwidthIndex(); + if (bandwidthIndex < mCurBandwidthIndex) { + changeConfiguration(-1, bandwidthIndex, false); + return; + } + + changeConfiguration(-1, mCurBandwidthIndex - 1, false); +} + // Mark switch done when: // 1. all old buffers are swapped out void LiveSession::tryToFinishBandwidthSwitch() { @@ -1472,6 +1662,28 @@ void LiveSession::cancelBandwidthSwitch() { mSwitchGeneration++; mSwitchInProgress = false; mSwapMask = 0; + + for (size_t i = 0; i < mFetcherInfos.size(); ++i) { + FetcherInfo& info = mFetcherInfos.editValueAt(i); + if (info.mToBeRemoved) { + info.mToBeRemoved = false; + } + } + + for (size_t i = 0; i < kMaxStreams; ++i) { + if (!mStreams[i].mNewUri.empty()) { + ssize_t j = mFetcherInfos.indexOfKey(mStreams[i].mNewUri); + if (j < 0) { + mStreams[i].mNewUri.clear(); + continue; + } + + const FetcherInfo &info = mFetcherInfos.valueAt(j); + info.mFetcher->stopAsync(); + mFetcherInfos.removeItemsAt(j); + mStreams[i].mNewUri.clear(); + } + } } bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) { @@ -1492,20 +1704,16 @@ bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) { } } -void LiveSession::onCheckBandwidth() { +void LiveSession::onCheckBandwidth(const sp<AMessage> &msg) { size_t bandwidthIndex = getBandwidthIndex(); if (canSwitchBandwidthTo(bandwidthIndex)) { changeConfiguration(-1ll /* timeUs */, bandwidthIndex); } else { - scheduleCheckBandwidthEvent(); + // Come back and check again 10 seconds later in case there is nothing to do now. + // If we DO change configuration, once that completes it'll schedule a new + // check bandwidth event with an incremented mCheckBandwidthGeneration. + msg->post(10000000ll); } - - // Handling the kWhatCheckBandwidth even here does _not_ automatically - // schedule another one on return, only an explicit call to - // scheduleCheckBandwidthEvent will do that. - // This ensures that only one configuration change is ongoing at any - // one time, once that completes it'll schedule another check bandwidth - // event. } void LiveSession::postPrepared(status_t err) { @@ -1522,6 +1730,9 @@ void LiveSession::postPrepared(status_t err) { notify->post(); mInPreparationPhase = false; + + mSwitchDownMonitor = new AMessage(kWhatCheckSwitchDown, id()); + mSwitchDownMonitor->post(); } } // namespace android diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h index 5423f0f..7aacca6 100644 --- a/media/libstagefright/httplive/LiveSession.h +++ b/media/libstagefright/httplive/LiveSession.h @@ -108,6 +108,8 @@ private: kWhatChangeConfiguration3 = 'chC3', kWhatFinishDisconnect2 = 'fin2', kWhatSwapped = 'swap', + kWhatCheckSwitchDown = 'ckSD', + kWhatSwitchDown = 'sDwn', }; struct BandwidthItem { @@ -124,7 +126,7 @@ private: struct StreamItem { const char *mType; - AString mUri; + AString mUri, mNewUri; size_t mCurDiscontinuitySeq; int64_t mLastDequeuedTimeUs; int64_t mLastSampleDurationUs; @@ -151,6 +153,7 @@ private: sp<IMediaHTTPService> mHTTPService; bool mInPreparationPhase; + bool mBuffering[kMaxStreams]; sp<HTTPBase> mHTTPDataSource; KeyedVector<String8, String8> mExtraHeaders; @@ -186,6 +189,7 @@ private: int32_t mCheckBandwidthGeneration; int32_t mSwitchGeneration; + int32_t mSubtitleGeneration; size_t mContinuationCounter; sp<AMessage> mContinuation; @@ -202,6 +206,7 @@ private: bool mFirstTimeUsValid; int64_t mFirstTimeUs; int64_t mLastSeekTimeUs; + sp<AMessage> mSwitchDownMonitor; KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs; KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs; @@ -236,9 +241,11 @@ private: const char *url, uint8_t *curPlaylistHash, bool *unchanged); size_t getBandwidthIndex(); + int64_t latestMediaSegmentStartTimeUs(); static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *); static StreamType indexToType(int idx); + static ssize_t typeToIndex(int32_t type); void changeConfiguration( int64_t timeUs, size_t bandwidthIndex, bool pickTrack = false); @@ -246,6 +253,8 @@ private: void onChangeConfiguration2(const sp<AMessage> &msg); void onChangeConfiguration3(const sp<AMessage> &msg); void onSwapped(const sp<AMessage> &msg); + void onCheckSwitchDown(); + void onSwitchDown(); void tryToFinishBandwidthSwitch(); void scheduleCheckBandwidthEvent(); @@ -257,7 +266,7 @@ private: void cancelBandwidthSwitch(); bool canSwitchBandwidthTo(size_t bandwidthIndex); - void onCheckBandwidth(); + void onCheckBandwidth(const sp<AMessage> &msg); void finishDisconnect(); diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp index 82a4c39..30fa868 100644 --- a/media/libstagefright/httplive/PlaylistFetcher.cpp +++ b/media/libstagefright/httplive/PlaylistFetcher.cpp @@ -55,7 +55,8 @@ const int32_t PlaylistFetcher::kNumSkipFrames = 10; PlaylistFetcher::PlaylistFetcher( const sp<AMessage> ¬ify, const sp<LiveSession> &session, - const char *uri) + const char *uri, + int32_t subtitleGeneration) : mNotify(notify), mStartTimeUsNotify(notify->dup()), mSession(session), @@ -73,6 +74,7 @@ PlaylistFetcher::PlaylistFetcher( mPrepared(false), mNextPTSTimeUs(-1ll), mMonitorQueueGeneration(0), + mSubtitleGeneration(subtitleGeneration), mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY), mFirstPTSValid(false), mAbsoluteTimeAnchorUs(0ll), @@ -737,12 +739,6 @@ void PlaylistFetcher::onDownloadNext() { const int32_t lastSeqNumberInPlaylist = firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1; - if (mStartup && mSeqNumber >= 0 - && (mSeqNumber < firstSeqNumberInPlaylist || mSeqNumber > lastSeqNumberInPlaylist)) { - // in case we guessed wrong during reconfiguration, try fetching the latest content. - mSeqNumber = lastSeqNumberInPlaylist; - } - if (mDiscontinuitySeq < 0) { mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq(); } @@ -951,7 +947,7 @@ void PlaylistFetcher::onDownloadNext() { } if (err == -EAGAIN) { - // starting sequence number too low + // starting sequence number too low/high mTSParser.clear(); postMonitorQueue(); return; @@ -1015,7 +1011,16 @@ void PlaylistFetcher::onDownloadNext() { // bulk extract non-ts files if (tsBuffer == NULL) { - err = extractAndQueueAccessUnits(buffer, itemMeta); + err = extractAndQueueAccessUnits(buffer, itemMeta); + if (err == -EAGAIN) { + // starting sequence number too low/high + postMonitorQueue(); + return; + } else if (err == ERROR_OUT_OF_RANGE) { + // reached stopping point + stopAsync(/* clear = */false); + return; + } } if (err != OK) { @@ -1023,12 +1028,39 @@ void PlaylistFetcher::onDownloadNext() { return; } - mStartup = false; ++mSeqNumber; postMonitorQueue(); } +int32_t PlaylistFetcher::getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const { + int32_t firstSeqNumberInPlaylist, lastSeqNumberInPlaylist; + if (mPlaylist->meta() == NULL + || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) { + firstSeqNumberInPlaylist = 0; + } + lastSeqNumberInPlaylist = firstSeqNumberInPlaylist + mPlaylist->size() - 1; + + int32_t index = mSeqNumber - firstSeqNumberInPlaylist - 1; + while (index >= 0 && anchorTimeUs > mStartTimeUs) { + sp<AMessage> itemMeta; + CHECK(mPlaylist->itemAt(index, NULL /* uri */, &itemMeta)); + + int64_t itemDurationUs; + CHECK(itemMeta->findInt64("durationUs", &itemDurationUs)); + + anchorTimeUs -= itemDurationUs; + --index; + } + + int32_t newSeqNumber = firstSeqNumberInPlaylist + index + 1; + if (newSeqNumber <= lastSeqNumberInPlaylist) { + return newSeqNumber; + } else { + return lastSeqNumberInPlaylist; + } +} + int32_t PlaylistFetcher::getSeqNumberForDiscontinuity(size_t discontinuitySeq) const { int32_t firstSeqNumberInPlaylist; if (mPlaylist->meta() == NULL @@ -1198,60 +1230,84 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu if (timeUs < 0) { timeUs = 0; } - } else if (mAdaptive && timeUs > mStartTimeUs) { - int32_t seq; - if (mStartTimeUsNotify != NULL - && !mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) { - mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq); + } + + if (timeUs < mStartTimeUs) { + // buffer up to the closest preceding IDR frame + ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us", + timeUs, mStartTimeUs); + const char *mime; + sp<MetaData> format = source->getFormat(); + bool isAvc = false; + if (format != NULL && format->findCString(kKeyMIMEType, &mime) + && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) { + isAvc = true; + } + if (isAvc && IsIDR(accessUnit)) { + mVideoBuffer->clear(); } - int64_t startTimeUs; - if (mStartTimeUsNotify != NULL - && !mStartTimeUsNotify->findInt64(key, &startTimeUs)) { - mStartTimeUsNotify->setInt64(key, timeUs); - - uint32_t streamMask = 0; - mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask); - streamMask |= mPacketSources.keyAt(i); - mStartTimeUsNotify->setInt32("streamMask", streamMask); - - if (streamMask == mStreamTypeMask) { - mStartTimeUsNotify->post(); - mStartTimeUsNotify.clear(); - } + if (isAvc) { + mVideoBuffer->queueAccessUnit(accessUnit); } + + continue; } + } - if (timeUs < mStartTimeUs) { - if (mAdaptive) { - int32_t targetDuration; - mPlaylist->meta()->findInt32("target-duration", &targetDuration); - int32_t incr = (mStartTimeUs - timeUs) / 1000000 / targetDuration; - if (incr == 0) { - // increment mSeqNumber by at least one - incr = 1; - } - mSeqNumber += incr; - err = -EAGAIN; - break; + CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs)); + if (mStartTimeUsNotify != NULL && timeUs > mStartTimeUs) { + + int32_t targetDurationSecs; + CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs)); + int64_t targetDurationUs = targetDurationSecs * 1000000ll; + // mStartup + // mStartup is true until we have queued a packet for all the streams + // we are fetching. We queue packets whose timestamps are greater than + // mStartTimeUs. + // mSegmentStartTimeUs >= 0 + // mSegmentStartTimeUs is non-negative when adapting or switching tracks + // timeUs - mStartTimeUs > targetDurationUs: + // This and the 2 above conditions should only happen when adapting in a live + // stream; the old fetcher has already fetched to mStartTimeUs; the new fetcher + // would start fetching after timeUs, which should be greater than mStartTimeUs; + // the old fetcher would then continue fetching data until timeUs. We don't want + // timeUs to be too far ahead of mStartTimeUs because we want the old fetcher to + // stop as early as possible. The definition of being "too far ahead" is + // arbitrary; here we use targetDurationUs as threshold. + if (mStartup && mSegmentStartTimeUs >= 0 + && timeUs - mStartTimeUs > targetDurationUs) { + // we just guessed a starting timestamp that is too high when adapting in a + // live stream; re-adjust based on the actual timestamp extracted from the + // media segment; if we didn't move backward after the re-adjustment + // (newSeqNumber), start at least 1 segment prior. + int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs); + if (newSeqNumber >= mSeqNumber) { + --mSeqNumber; } else { - // buffer up to the closest preceding IDR frame - ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us", - timeUs, mStartTimeUs); - const char *mime; - sp<MetaData> format = source->getFormat(); - bool isAvc = false; - if (format != NULL && format->findCString(kKeyMIMEType, &mime) - && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) { - isAvc = true; - } - if (isAvc && IsIDR(accessUnit)) { - mVideoBuffer->clear(); - } - if (isAvc) { - mVideoBuffer->queueAccessUnit(accessUnit); - } - - continue; + mSeqNumber = newSeqNumber; + } + mStartTimeUsNotify = mNotify->dup(); + mStartTimeUsNotify->setInt32("what", kWhatStartedAt); + return -EAGAIN; + } + + int32_t seq; + if (!mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) { + mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq); + } + int64_t startTimeUs; + if (!mStartTimeUsNotify->findInt64(key, &startTimeUs)) { + mStartTimeUsNotify->setInt64(key, timeUs); + + uint32_t streamMask = 0; + mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask); + streamMask |= mPacketSources.keyAt(i); + mStartTimeUsNotify->setInt32("streamMask", streamMask); + + if (streamMask == mStreamTypeMask) { + mStartup = false; + mStartTimeUsNotify->post(); + mStartTimeUsNotify.clear(); } } } @@ -1353,6 +1409,7 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits( buffer->meta()->setInt64("durationUs", durationUs); buffer->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber)); buffer->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq); + buffer->meta()->setInt32("subtitleGeneration", mSubtitleGeneration); packetSource->queueAccessUnit(buffer); return OK; @@ -1507,14 +1564,52 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits( if (startTimeUs < mStartTimeUs) { continue; } + + if (mStartTimeUsNotify != NULL) { + int32_t targetDurationSecs; + CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs)); + int64_t targetDurationUs = targetDurationSecs * 1000000ll; + + // Duplicated logic from how we handle .ts playlists. + if (mStartup && mSegmentStartTimeUs >= 0 + && timeUs - mStartTimeUs > targetDurationUs) { + int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs); + if (newSeqNumber >= mSeqNumber) { + --mSeqNumber; + } else { + mSeqNumber = newSeqNumber; + } + return -EAGAIN; + } + + mStartTimeUsNotify->setInt64("timeUsAudio", timeUs); + mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq); + mStartTimeUsNotify->setInt32("streamMask", LiveSession::STREAMTYPE_AUDIO); + mStartTimeUsNotify->post(); + mStartTimeUsNotify.clear(); + } + } + + if (mStopParams != NULL) { + // Queue discontinuity in original stream. + int32_t discontinuitySeq; + int64_t stopTimeUs; + if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq) + || discontinuitySeq > mDiscontinuitySeq + || !mStopParams->findInt64("timeUsAudio", &stopTimeUs) + || (discontinuitySeq == mDiscontinuitySeq && unitTimeUs >= stopTimeUs)) { + packetSource->queueAccessUnit(mSession->createFormatChangeBuffer()); + mStreamTypeMask = 0; + mPacketSources.clear(); + return ERROR_OUT_OF_RANGE; + } } sp<ABuffer> unit = new ABuffer(aac_frame_length); memcpy(unit->data(), adtsHeader, aac_frame_length); unit->meta()->setInt64("timeUs", unitTimeUs); - unit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber)); - unit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq); + setAccessUnitProperties(unit, packetSource); packetSource->queueAccessUnit(unit); } diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h index daefb26..78c358f 100644 --- a/media/libstagefright/httplive/PlaylistFetcher.h +++ b/media/libstagefright/httplive/PlaylistFetcher.h @@ -49,7 +49,8 @@ struct PlaylistFetcher : public AHandler { PlaylistFetcher( const sp<AMessage> ¬ify, const sp<LiveSession> &session, - const char *uri); + const char *uri, + int32_t subtitleGeneration); sp<DataSource> getDataSource(); @@ -69,6 +70,10 @@ struct PlaylistFetcher : public AHandler { void resumeUntilAsync(const sp<AMessage> ¶ms); + uint32_t getStreamTypeMask() const { + return mStreamTypeMask; + } + protected: virtual ~PlaylistFetcher(); virtual void onMessageReceived(const sp<AMessage> &msg); @@ -104,7 +109,12 @@ private: uint32_t mStreamTypeMask; int64_t mStartTimeUs; + + // Start time relative to the beginning of the first segment in the initial + // playlist. It's value is initialized to a non-negative value only when we are + // adapting or switching tracks. int64_t mSegmentStartTimeUs; + ssize_t mDiscontinuitySeq; bool mStartTimeUsRelative; sp<AMessage> mStopParams; // message containing the latest timestamps we should fetch. @@ -124,6 +134,7 @@ private: int64_t mNextPTSTimeUs; int32_t mMonitorQueueGeneration; + const int32_t mSubtitleGeneration; enum RefreshState { INITIAL_MINIMUM_RELOAD_DELAY, diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h index 5db4b4b..4252706 100644 --- a/media/libstagefright/include/NuCachedSource2.h +++ b/media/libstagefright/include/NuCachedSource2.h @@ -37,6 +37,8 @@ struct NuCachedSource2 : public DataSource { virtual ssize_t readAt(off64_t offset, void *data, size_t size); + virtual void disconnect(); + virtual status_t getSize(off64_t *size); virtual uint32_t flags(); @@ -103,6 +105,7 @@ private: off64_t mLastAccessPos; sp<AMessage> mAsyncResult; bool mFetching; + bool mDisconnecting; int64_t mLastFetchTimeUs; int32_t mNumRetriesLeft; diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h index ee553d9..9e97ebd 100644 --- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h +++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h @@ -63,7 +63,23 @@ protected: OMX_U32 numOutputBuffers, const char *mimeType); - virtual void updatePortDefinitions(); + virtual void updatePortDefinitions(bool updateCrop = true); + + uint32_t outputBufferWidth(); + uint32_t outputBufferHeight(); + + enum CropSettingsMode { + kCropUnSet = 0, + kCropSet, + kCropChanged, + }; + void handlePortSettingsChange( + bool *portWillReset, uint32_t width, uint32_t height, + CropSettingsMode cropSettingsMode = kCropUnSet, bool fakeStride = false); + + void copyYV12FrameToOutputBuffer( + uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, + size_t srcYStride, size_t srcUStride, size_t srcVStride); enum { kInputPortIndex = 0, diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp index 010063f..a03f6f9 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp @@ -42,7 +42,8 @@ AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta) mLastQueuedTimeUs(0), mEOSResult(OK), mLatestEnqueuedMeta(NULL), - mLatestDequeuedMeta(NULL) { + mLatestDequeuedMeta(NULL), + mQueuedDiscontinuityCount(0) { setFormat(meta); } @@ -122,6 +123,7 @@ status_t AnotherPacketSource::dequeueAccessUnit(sp<ABuffer> *buffer) { mFormat.clear(); } + --mQueuedDiscontinuityCount; return INFO_DISCONTINUITY; } @@ -210,6 +212,11 @@ void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) { mBuffers.push_back(buffer); mCondition.signal(); + int32_t discontinuity; + if (buffer->meta()->findInt32("discontinuity", &discontinuity)) { + ++mQueuedDiscontinuityCount; + } + if (mLatestEnqueuedMeta == NULL) { mLatestEnqueuedMeta = buffer->meta(); } else { @@ -226,6 +233,7 @@ void AnotherPacketSource::clear() { mBuffers.clear(); mEOSResult = OK; + mQueuedDiscontinuityCount = 0; mFormat = NULL; mLatestEnqueuedMeta = NULL; @@ -246,11 +254,6 @@ void AnotherPacketSource::queueDiscontinuity( int32_t oldDiscontinuityType; if (!oldBuffer->meta()->findInt32( "discontinuity", &oldDiscontinuityType)) { - MediaBuffer *mbuf = NULL; - oldBuffer->meta()->findPointer("mediaBuffer", (void**)&mbuf); - if (mbuf != NULL) { - mbuf->release(); - } it = mBuffers.erase(it); continue; } @@ -262,6 +265,7 @@ void AnotherPacketSource::queueDiscontinuity( mEOSResult = OK; mLastQueuedTimeUs = 0; mLatestEnqueuedMeta = NULL; + ++mQueuedDiscontinuityCount; sp<ABuffer> buffer = new ABuffer(0); buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type)); @@ -291,7 +295,10 @@ bool AnotherPacketSource::hasBufferAvailable(status_t *finalResult) { int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) { Mutex::Autolock autoLock(mLock); + return getBufferedDurationUs_l(finalResult); +} +int64_t AnotherPacketSource::getBufferedDurationUs_l(status_t *finalResult) { *finalResult = mEOSResult; if (mBuffers.empty()) { @@ -300,6 +307,7 @@ int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) { int64_t time1 = -1; int64_t time2 = -1; + int64_t durationUs = 0; List<sp<ABuffer> >::iterator it = mBuffers.begin(); while (it != mBuffers.end()) { @@ -307,20 +315,64 @@ int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) { int64_t timeUs; if (buffer->meta()->findInt64("timeUs", &timeUs)) { - if (time1 < 0) { + if (time1 < 0 || timeUs < time1) { time1 = timeUs; } - time2 = timeUs; + if (time2 < 0 || timeUs > time2) { + time2 = timeUs; + } } else { // This is a discontinuity, reset everything. + durationUs += time2 - time1; time1 = time2 = -1; } ++it; } - return time2 - time1; + return durationUs + (time2 - time1); +} + +// A cheaper but less precise version of getBufferedDurationUs that we would like to use in +// LiveSession::dequeueAccessUnit to trigger downwards adaptation. +int64_t AnotherPacketSource::getEstimatedDurationUs() { + Mutex::Autolock autoLock(mLock); + if (mBuffers.empty()) { + return 0; + } + + if (mQueuedDiscontinuityCount > 0) { + status_t finalResult; + return getBufferedDurationUs_l(&finalResult); + } + + List<sp<ABuffer> >::iterator it = mBuffers.begin(); + sp<ABuffer> buffer = *it; + + int64_t startTimeUs; + buffer->meta()->findInt64("timeUs", &startTimeUs); + if (startTimeUs < 0) { + return 0; + } + + it = mBuffers.end(); + --it; + buffer = *it; + + int64_t endTimeUs; + buffer->meta()->findInt64("timeUs", &endTimeUs); + if (endTimeUs < 0) { + return 0; + } + + int64_t diffUs; + if (endTimeUs > startTimeUs) { + diffUs = endTimeUs - startTimeUs; + } else { + diffUs = startTimeUs - endTimeUs; + } + return diffUs; } status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) { diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h index 0c717d7..809a858 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h @@ -49,6 +49,8 @@ struct AnotherPacketSource : public MediaSource { // presentation timestamps since the last discontinuity (if any). int64_t getBufferedDurationUs(status_t *finalResult); + int64_t getEstimatedDurationUs(); + status_t nextBufferTime(int64_t *timeUs); void queueAccessUnit(const sp<ABuffer> &buffer); @@ -83,7 +85,10 @@ private: sp<AMessage> mLatestEnqueuedMeta; sp<AMessage> mLatestDequeuedMeta; + size_t mQueuedDiscontinuityCount; + bool wasFormatChange(int32_t discontinuityType) const; + int64_t getBufferedDurationUs_l(status_t *finalResult); DISALLOW_EVIL_CONSTRUCTORS(AnotherPacketSource); }; diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp index 3c8f03e..ef1cd3d 100644 --- a/media/libstagefright/mpeg2ts/ESQueue.cpp +++ b/media/libstagefright/mpeg2ts/ESQueue.cpp @@ -604,6 +604,8 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() { // having to interpolate. // The final AAC frame may well extend into the next RangeInfo but // that's ok. + // TODO: the logic commented above is skipped because codec cannot take + // arbitrary sized input buffers; size_t offset = 0; while (offset < info.mLength) { if (offset + 7 > mBuffer->size()) { @@ -668,9 +670,12 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() { size_t headerSize = protection_absent ? 7 : 9; offset += aac_frame_length; + // TODO: move back to concatenation when codec can support arbitrary input buffers. + // For now only queue a single buffer + break; } - int64_t timeUs = fetchTimestamp(offset); + int64_t timeUs = fetchTimestampAAC(offset); sp<ABuffer> accessUnit = new ABuffer(offset); memcpy(accessUnit->data(), mBuffer->data(), offset); @@ -717,6 +722,45 @@ int64_t ElementaryStreamQueue::fetchTimestamp(size_t size) { return timeUs; } +// TODO: avoid interpolating timestamps once codec supports arbitrary sized input buffers +int64_t ElementaryStreamQueue::fetchTimestampAAC(size_t size) { + int64_t timeUs = -1; + bool first = true; + + size_t samplesize = size; + while (size > 0) { + CHECK(!mRangeInfos.empty()); + + RangeInfo *info = &*mRangeInfos.begin(); + + if (first) { + timeUs = info->mTimestampUs; + first = false; + } + + if (info->mLength > size) { + int32_t sampleRate; + CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate)); + info->mLength -= size; + size_t numSamples = 1024 * size / samplesize; + info->mTimestampUs += numSamples * 1000000ll / sampleRate; + size = 0; + } else { + size -= info->mLength; + + mRangeInfos.erase(mRangeInfos.begin()); + info = NULL; + } + + } + + if (timeUs == 0ll) { + ALOGV("Returning 0 timestamp"); + } + + return timeUs; +} + struct NALPosition { size_t nalOffset; size_t nalSize; diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h index a2cca77..7c81ff0 100644 --- a/media/libstagefright/mpeg2ts/ESQueue.h +++ b/media/libstagefright/mpeg2ts/ESQueue.h @@ -77,6 +77,7 @@ private: // consume a logical (compressed) access unit of size "size", // returns its timestamp in us (or -1 if no time information). int64_t fetchTimestamp(size_t size); + int64_t fetchTimestampAAC(size_t size); DISALLOW_EVIL_CONSTRUCTORS(ElementaryStreamQueue); }; diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp index 69b572e..2f83610 100644 --- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp +++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp @@ -123,16 +123,18 @@ void SoftVideoDecoderOMXComponent::initPorts( updatePortDefinitions(); } -void SoftVideoDecoderOMXComponent::updatePortDefinitions() { +void SoftVideoDecoderOMXComponent::updatePortDefinitions(bool updateCrop) { OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef; def->format.video.nFrameWidth = mWidth; def->format.video.nFrameHeight = mHeight; def->format.video.nStride = def->format.video.nFrameWidth; def->format.video.nSliceHeight = def->format.video.nFrameHeight; + def->nBufferSize = def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2; + def = &editPortInfo(kOutputPortIndex)->mDef; - def->format.video.nFrameWidth = mIsAdaptive ? mAdaptiveMaxWidth : mWidth; - def->format.video.nFrameHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight; + def->format.video.nFrameWidth = outputBufferWidth(); + def->format.video.nFrameHeight = outputBufferHeight(); def->format.video.nStride = def->format.video.nFrameWidth; def->format.video.nSliceHeight = def->format.video.nFrameHeight; @@ -140,10 +142,105 @@ void SoftVideoDecoderOMXComponent::updatePortDefinitions() { (def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3) / 2; - mCropLeft = 0; - mCropTop = 0; - mCropWidth = mWidth; - mCropHeight = mHeight; + if (updateCrop) { + mCropLeft = 0; + mCropTop = 0; + mCropWidth = mWidth; + mCropHeight = mHeight; + } +} + + +uint32_t SoftVideoDecoderOMXComponent::outputBufferWidth() { + return mIsAdaptive ? mAdaptiveMaxWidth : mWidth; +} + +uint32_t SoftVideoDecoderOMXComponent::outputBufferHeight() { + return mIsAdaptive ? mAdaptiveMaxHeight : mHeight; +} + +void SoftVideoDecoderOMXComponent::handlePortSettingsChange( + bool *portWillReset, uint32_t width, uint32_t height, + CropSettingsMode cropSettingsMode, bool fakeStride) { + *portWillReset = false; + bool sizeChanged = (width != mWidth || height != mHeight); + bool updateCrop = (cropSettingsMode == kCropUnSet); + bool cropChanged = (cropSettingsMode == kCropChanged); + bool strideChanged = false; + if (fakeStride) { + OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef; + if (def->format.video.nStride != width || def->format.video.nSliceHeight != height) { + strideChanged = true; + } + } + + if (sizeChanged || cropChanged || strideChanged) { + mWidth = width; + mHeight = height; + + if ((sizeChanged && !mIsAdaptive) + || width > mAdaptiveMaxWidth + || height > mAdaptiveMaxHeight) { + if (mIsAdaptive) { + if (width > mAdaptiveMaxWidth) { + mAdaptiveMaxWidth = width; + } + if (height > mAdaptiveMaxHeight) { + mAdaptiveMaxHeight = height; + } + } + updatePortDefinitions(updateCrop); + notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL); + mOutputPortSettingsChange = AWAITING_DISABLED; + *portWillReset = true; + } else { + updatePortDefinitions(updateCrop); + + if (fakeStride) { + // MAJOR HACK that is not pretty, it's just to fool the renderer to read the correct + // data. + // Some software decoders (e.g. SoftMPEG4) fill decoded frame directly to output + // buffer without considering the output buffer stride and slice height. So this is + // used to signal how the buffer is arranged. The alternative is to re-arrange the + // output buffer in SoftMPEG4, but that results in memcopies. + OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef; + def->format.video.nStride = mWidth; + def->format.video.nSliceHeight = mHeight; + } + + notify(OMX_EventPortSettingsChanged, kOutputPortIndex, + OMX_IndexConfigCommonOutputCrop, NULL); + } + } +} + +void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer( + uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, + size_t srcYStride, size_t srcUStride, size_t srcVStride) { + size_t dstYStride = outputBufferWidth(); + size_t dstUVStride = dstYStride / 2; + size_t dstHeight = outputBufferHeight(); + uint8_t *dstStart = dst; + + for (size_t i = 0; i < mHeight; ++i) { + memcpy(dst, srcY, mWidth); + srcY += srcYStride; + dst += dstYStride; + } + + dst = dstStart + dstYStride * dstHeight; + for (size_t i = 0; i < mHeight / 2; ++i) { + memcpy(dst, srcU, mWidth / 2); + srcU += srcUStride; + dst += dstUVStride; + } + + dst = dstStart + (5 * dstYStride * dstHeight) / 4; + for (size_t i = 0; i < mHeight / 2; ++i) { + memcpy(dst, srcV, mWidth / 2); + srcV += srcVStride; + dst += dstUVStride; + } } OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter( @@ -255,6 +352,40 @@ OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter( return OMX_ErrorNone; } + case OMX_IndexParamPortDefinition: + { + OMX_PARAM_PORTDEFINITIONTYPE *newParams = + (OMX_PARAM_PORTDEFINITIONTYPE *)params; + OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &newParams->format.video; + OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(newParams->nPortIndex)->mDef; + + uint32_t oldWidth = def->format.video.nFrameWidth; + uint32_t oldHeight = def->format.video.nFrameHeight; + uint32_t newWidth = video_def->nFrameWidth; + uint32_t newHeight = video_def->nFrameHeight; + if (newWidth != oldWidth || newHeight != oldHeight) { + bool outputPort = (newParams->nPortIndex == kOutputPortIndex); + def->format.video.nFrameWidth = + (mIsAdaptive && outputPort) ? mAdaptiveMaxWidth : newWidth; + def->format.video.nFrameHeight = + (mIsAdaptive && outputPort) ? mAdaptiveMaxHeight : newHeight; + def->format.video.nStride = def->format.video.nFrameWidth; + def->format.video.nSliceHeight = def->format.video.nFrameHeight; + def->nBufferSize = + def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2; + if (outputPort) { + mWidth = newWidth; + mHeight = newHeight; + mCropLeft = 0; + mCropTop = 0; + mCropWidth = newWidth; + mCropHeight = newHeight; + } + newParams->nBufferSize = def->nBufferSize; + } + return SimpleSoftOMXComponent::internalSetParameter(index, params); + } + default: return SimpleSoftOMXComponent::internalSetParameter(index, params); } diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp index a9b3330..98498e9 100644 --- a/media/libstagefright/rtsp/ASessionDescription.cpp +++ b/media/libstagefright/rtsp/ASessionDescription.cpp @@ -319,6 +319,11 @@ bool ASessionDescription::parseNTPRange( s = end + 1; // skip the dash. + if (*s == '\0') { + *npt2 = FLT_MAX; // open ended. + return true; + } + if (!strncmp("now", s, 3)) { return false; // no absolute end time available } diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h index f3dfc59..423a420 100644 --- a/media/libstagefright/rtsp/MyHandler.h +++ b/media/libstagefright/rtsp/MyHandler.h @@ -254,7 +254,9 @@ struct MyHandler : public AHandler { static void addSDES(int s, const sp<ABuffer> &buffer) { struct sockaddr_in addr; socklen_t addrSize = sizeof(addr); - CHECK_EQ(0, getsockname(s, (sockaddr *)&addr, &addrSize)); + if (getsockname(s, (sockaddr *)&addr, &addrSize) != 0) { + inet_aton("0.0.0.0", &(addr.sin_addr)); + } uint8_t *data = buffer->data() + buffer->size(); data[0] = 0x80 | 1; diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk index 903af49..99b480ad 100644 --- a/media/libstagefright/tests/Android.mk +++ b/media/libstagefright/tests/Android.mk @@ -9,7 +9,7 @@ LOCAL_MODULE := SurfaceMediaSource_test LOCAL_MODULE_TAGS := tests LOCAL_SRC_FILES := \ - SurfaceMediaSource_test.cpp \ + SurfaceMediaSource_test.cpp \ DummyRecorder.cpp \ LOCAL_SHARED_LIBRARIES := \ @@ -33,10 +33,10 @@ LOCAL_STATIC_LIBRARIES := \ libgtest_main \ LOCAL_C_INCLUDES := \ - bionic \ - bionic/libstdc++/include \ - external/gtest/include \ - external/stlport/stlport \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ frameworks/av/media/libstagefright \ frameworks/av/media/libstagefright/include \ $(TOP)/frameworks/native/include/media/openmax \ @@ -47,6 +47,41 @@ include $(BUILD_EXECUTABLE) endif + +include $(CLEAR_VARS) + +LOCAL_MODULE := Utils_test + +LOCAL_MODULE_TAGS := tests + +LOCAL_SRC_FILES := \ + Utils_test.cpp \ + +LOCAL_SHARED_LIBRARIES := \ + libcutils \ + liblog \ + libmedia \ + libstagefright \ + libstagefright_foundation \ + libstagefright_omx \ + libstlport \ + +LOCAL_STATIC_LIBRARIES := \ + libgtest \ + libgtest_main \ + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ + frameworks/av/include \ + frameworks/av/media/libstagefright \ + frameworks/av/media/libstagefright/include \ + $(TOP)/frameworks/native/include/media/openmax \ + +include $(BUILD_EXECUTABLE) + # Include subdirectory makefiles # ============================================================ diff --git a/media/libstagefright/tests/Utils_test.cpp b/media/libstagefright/tests/Utils_test.cpp new file mode 100644 index 0000000..f2825dd --- /dev/null +++ b/media/libstagefright/tests/Utils_test.cpp @@ -0,0 +1,101 @@ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "Utils_test" + +#include <gtest/gtest.h> +#include <utils/String8.h> +#include <utils/Errors.h> +#include <fcntl.h> +#include <unistd.h> + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AUtils.h> +#include <media/stagefright/Utils.h> + +namespace android { + +class UtilsTest : public ::testing::Test { +}; + +TEST_F(UtilsTest, TestFourCC) { + ASSERT_EQ(FOURCC('s', 't', 'm' , 'u'), 'stmu'); +} + +TEST_F(UtilsTest, TestMathTemplates) { + ASSERT_EQ(divRound(-10, -4), 3); + ASSERT_EQ(divRound(-11, -4), 3); + ASSERT_EQ(divRound(-12, -4), 3); + ASSERT_EQ(divRound(-13, -4), 3); + ASSERT_EQ(divRound(-14, -4), 4); + + ASSERT_EQ(divRound(10, -4), -3); + ASSERT_EQ(divRound(11, -4), -3); + ASSERT_EQ(divRound(12, -4), -3); + ASSERT_EQ(divRound(13, -4), -3); + ASSERT_EQ(divRound(14, -4), -4); + + ASSERT_EQ(divRound(-10, 4), -3); + ASSERT_EQ(divRound(-11, 4), -3); + ASSERT_EQ(divRound(-12, 4), -3); + ASSERT_EQ(divRound(-13, 4), -3); + ASSERT_EQ(divRound(-14, 4), -4); + + ASSERT_EQ(divRound(10, 4), 3); + ASSERT_EQ(divRound(11, 4), 3); + ASSERT_EQ(divRound(12, 4), 3); + ASSERT_EQ(divRound(13, 4), 3); + ASSERT_EQ(divRound(14, 4), 4); + + ASSERT_EQ(divUp(-11, -4), 3); + ASSERT_EQ(divUp(-12, -4), 3); + ASSERT_EQ(divUp(-13, -4), 4); + + ASSERT_EQ(divUp(11, -4), -2); + ASSERT_EQ(divUp(12, -4), -3); + ASSERT_EQ(divUp(13, -4), -3); + + ASSERT_EQ(divUp(-11, 4), -2); + ASSERT_EQ(divUp(-12, 4), -3); + ASSERT_EQ(divUp(-13, 4), -3); + + ASSERT_EQ(divUp(11, 4), 3); + ASSERT_EQ(divUp(12, 4), 3); + ASSERT_EQ(divUp(13, 4), 4); + + ASSERT_EQ(abs(5L), 5L); + ASSERT_EQ(abs(-25), 25); + + ASSERT_EQ(min(5.6f, 6.0f), 5.6f); + ASSERT_EQ(min(6.0f, 5.6f), 5.6f); + ASSERT_EQ(min(-4.3, 8.6), -4.3); + ASSERT_EQ(min(8.6, -4.3), -4.3); + + ASSERT_EQ(max(5.6f, 6.0f), 6.0f); + ASSERT_EQ(max(6.0f, 5.6f), 6.0f); + ASSERT_EQ(max(-4.3, 8.6), 8.6); + ASSERT_EQ(max(8.6, -4.3), 8.6); + + ASSERT_EQ(periodicError(124, 100), 24); + ASSERT_EQ(periodicError(288, 100), 12); + ASSERT_EQ(periodicError(-345, 100), 45); + ASSERT_EQ(periodicError(-493, 100), 7); + ASSERT_EQ(periodicError(-550, 100), 50); + ASSERT_EQ(periodicError(-600, 100), 0); +} + +} // namespace android diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp index 753b3ec..2834a66 100644 --- a/media/libstagefright/wifi-display/source/Converter.cpp +++ b/media/libstagefright/wifi-display/source/Converter.cpp @@ -74,19 +74,6 @@ Converter::Converter( } } -static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) { - void *mbuf; - if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) - && mbuf != NULL) { - ALOGV("releasing mbuf %p", mbuf); - - accessUnit->meta()->setPointer("mediaBuffer", NULL); - - static_cast<MediaBuffer *>(mbuf)->release(); - mbuf = NULL; - } -} - void Converter::releaseEncoder() { if (mEncoder == NULL) { return; @@ -95,18 +82,7 @@ void Converter::releaseEncoder() { mEncoder->release(); mEncoder.clear(); - while (!mInputBufferQueue.empty()) { - sp<ABuffer> accessUnit = *mInputBufferQueue.begin(); - mInputBufferQueue.erase(mInputBufferQueue.begin()); - - ReleaseMediaBufferReference(accessUnit); - } - - for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) { - sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i); - ReleaseMediaBufferReference(accessUnit); - } - + mInputBufferQueue.clear(); mEncoderInputBuffers.clear(); mEncoderOutputBuffers.clear(); } @@ -328,7 +304,7 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) { sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); - ReleaseMediaBufferReference(accessUnit); + accessUnit->setMediaBufferBase(NULL); } break; } @@ -351,15 +327,16 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) { ALOGI("dropping frame."); } - ReleaseMediaBufferReference(accessUnit); + accessUnit->setMediaBufferBase(NULL); break; } #if 0 - void *mbuf; - if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) - && mbuf != NULL) { + MediaBuffer *mbuf = + (MediaBuffer *)(accessUnit->getMediaBufferBase()); + if (mbuf != NULL) { ALOGI("queueing mbuf %p", mbuf); + mbuf->release(); } #endif @@ -647,13 +624,13 @@ status_t Converter::feedEncoderInputBuffers() { buffer->data(), buffer->size()); - void *mediaBuffer; - if (buffer->meta()->findPointer("mediaBuffer", &mediaBuffer) - && mediaBuffer != NULL) { - mEncoderInputBuffers.itemAt(bufferIndex)->meta() - ->setPointer("mediaBuffer", mediaBuffer); + MediaBuffer *mediaBuffer = + (MediaBuffer *)(buffer->getMediaBufferBase()); + if (mediaBuffer != NULL) { + mEncoderInputBuffers.itemAt(bufferIndex)->setMediaBufferBase( + mediaBuffer); - buffer->meta()->setPointer("mediaBuffer", NULL); + buffer->setMediaBufferBase(NULL); } } else { flags = MediaCodec::BUFFER_FLAG_EOS; diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp index 7e8891d..86b918f 100644 --- a/media/libstagefright/wifi-display/source/MediaPuller.cpp +++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp @@ -179,7 +179,7 @@ void MediaPuller::onMessageReceived(const sp<AMessage> &msg) { } else { // video encoder will release MediaBuffer when done // with underlying data. - accessUnit->meta()->setPointer("mediaBuffer", mbuf); + accessUnit->setMediaBufferBase(mbuf); } sp<AMessage> notify = mNotify->dup(); |