diff options
37 files changed, 970 insertions, 365 deletions
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h index a3cc396..72e51f9 100644 --- a/include/media/AudioTrack.h +++ b/include/media/AudioTrack.h @@ -430,7 +430,7 @@ public: * - NO_ERROR: successful operation * - BAD_VALUE: position is NULL */ - status_t getPosition(uint32_t *position) const; + status_t getPosition(uint32_t *position); /* For static buffer mode only, this returns the current playback position in frames * relative to start of buffer. It is analogous to the position units used by @@ -581,6 +581,7 @@ public: * if you need a high resolution mapping between frame position and presentation time, * consider implementing that at application level, based on the low resolution timestamps. * Returns NO_ERROR if timestamp is valid. + * The timestamp parameter is undefined on return, if status is not NO_ERROR. */ status_t getTimestamp(AudioTimestamp& timestamp); @@ -639,7 +640,7 @@ protected: // caller must hold lock on mLock for all _l methods - status_t createTrack_l(size_t epoch); + status_t createTrack_l(); // can only be called when mState != STATE_ACTIVE void flush_l(); @@ -659,6 +660,9 @@ protected: bool isDirect_l() const { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; } + // increment mPosition by the delta of mServer, and return new value of mPosition + uint32_t updateAndGetPosition_l(); + // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0 sp<IAudioTrack> mAudioTrack; sp<IMemory> mCblkMemory; @@ -731,6 +735,18 @@ protected: bool mMarkerReached; uint32_t mNewPosition; // in frames uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS + uint32_t mServer; // in frames, last known mProxy->getPosition() + // which is count of frames consumed by server, + // reset by new IAudioTrack, + // whether it is reset by stop() is TBD + uint32_t mPosition; // in frames, like mServer except continues + // monotonically after new IAudioTrack, + // and could be easily widened to uint64_t + uint32_t mReleased; // in frames, count of frames released to server + // but not necessarily consumed by server, + // reset by stop() but continues monotonically + // after new IAudioTrack to restore mPosition, + // and could be easily widened to uint64_t audio_output_flags_t mFlags; // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD. diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h index 5c8a484..619ac78 100644 --- a/include/media/IAudioTrack.h +++ b/include/media/IAudioTrack.h @@ -88,7 +88,7 @@ public: /* Send parameters to the audio hardware */ virtual status_t setParameters(const String8& keyValuePairs) = 0; - /* Return NO_ERROR if timestamp is valid */ + /* Return NO_ERROR if timestamp is valid. timestamp is undefined otherwise. */ virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0; /* Signal the playback thread for a change in control block */ diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h index be0c15b..d422576 100644 --- a/include/media/nbaio/NBAIO.h +++ b/include/media/nbaio/NBAIO.h @@ -227,7 +227,7 @@ public: // Returns NO_ERROR if a timestamp is available. The timestamp includes the total number // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC - // as of this presentation count. + // as of this presentation count. The timestamp parameter is undefined if error is returned. virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; } protected: diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index d87e6f5..ff7da83 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -398,7 +398,7 @@ status_t AudioTrack::set( } // create the IAudioTrack - status = createTrack_l(0 /*epoch*/); + status = createTrack_l(); if (status != NO_ERROR) { if (mAudioTrackThread != 0) { @@ -417,6 +417,9 @@ status_t AudioTrack::set( mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; + mServer = 0; + mPosition = 0; + mReleased = 0; AudioSystem::acquireAudioSessionId(mSessionId, mClientPid); mSequence = 1; mObservedSequence = mSequence; @@ -443,14 +446,16 @@ status_t AudioTrack::start() } else { mState = STATE_ACTIVE; } + (void) updateAndGetPosition_l(); if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) { // reset current position as seen by client to 0 - mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition()); + mPosition = 0; + mReleased = 0; // force refresh of remaining frames by processAudioBuffer() as last // write before stop could be partial. mRefreshRemaining = true; } - mNewPosition = mProxy->getPosition() + mUpdatePeriod; + mNewPosition = mPosition + mUpdatePeriod; int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags); sp<AudioTrackThread> t = mAudioTrackThread; @@ -709,7 +714,7 @@ void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount) { // FIXME If setting a loop also sets position to start of loop, then // this is correct. Otherwise it should be removed. - mNewPosition = mProxy->getPosition() + mUpdatePeriod; + mNewPosition = updateAndGetPosition_l() + mUpdatePeriod; mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0; mStaticProxy->setLoop(loopStart, loopEnd, loopCount); } @@ -751,7 +756,7 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) } AutoMutex lock(mLock); - mNewPosition = mProxy->getPosition() + updatePeriod; + mNewPosition = updateAndGetPosition_l() + updatePeriod; mUpdatePeriod = updatePeriod; return NO_ERROR; @@ -791,7 +796,7 @@ status_t AudioTrack::setPosition(uint32_t position) if (mState == STATE_ACTIVE) { return INVALID_OPERATION; } - mNewPosition = mProxy->getPosition() + mUpdatePeriod; + mNewPosition = updateAndGetPosition_l() + mUpdatePeriod; mLoopPeriod = 0; // FIXME Check whether loops and setting position are incompatible in old code. // If we use setLoop for both purposes we lose the capability to set the position while looping. @@ -800,7 +805,7 @@ status_t AudioTrack::setPosition(uint32_t position) return NO_ERROR; } -status_t AudioTrack::getPosition(uint32_t *position) const +status_t AudioTrack::getPosition(uint32_t *position) { if (position == NULL) { return BAD_VALUE; @@ -823,8 +828,8 @@ status_t AudioTrack::getPosition(uint32_t *position) const *position = dspFrames; } else { // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes - *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 : - mProxy->getPosition(); + *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? + 0 : updateAndGetPosition_l(); } return NO_ERROR; } @@ -881,7 +886,7 @@ status_t AudioTrack::attachAuxEffect(int effectId) // ------------------------------------------------------------------------- // must be called with mLock held -status_t AudioTrack::createTrack_l(size_t epoch) +status_t AudioTrack::createTrack_l() { status_t status; const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); @@ -1184,7 +1189,6 @@ status_t AudioTrack::createTrack_l(size_t epoch) mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY); mProxy->setSendLevel(mSendLevel); mProxy->setSampleRate(mSampleRate); - mProxy->setEpoch(epoch); mProxy->setMinimum(mNotificationFramesAct); mDeathNotifier = new DeathNotifier(this); @@ -1319,6 +1323,7 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer) buffer.mRaw = audioBuffer->raw; AutoMutex lock(mLock); + mReleased += stepCount; mInUnderrun = false; mProxy->releaseBuffer(&buffer); @@ -1531,7 +1536,7 @@ nsecs_t AudioTrack::processAudioBuffer() } // Get current position of server - size_t position = mProxy->getPosition(); + size_t position = updateAndGetPosition_l(); // Manage marker callback bool markerReached = false; @@ -1796,14 +1801,18 @@ status_t AudioTrack::restoreTrack_l(const char *from) return DEAD_OBJECT; } - // if the new IAudioTrack is created, createTrack_l() will modify the + // save the old static buffer position + size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0; + + // If a new IAudioTrack is successfully created, createTrack_l() will modify the // following member variables: mAudioTrack, mCblkMemory and mCblk. - // It will also delete the strong references on previous IAudioTrack and IMemory + // It will also delete the strong references on previous IAudioTrack and IMemory. + // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact. + result = createTrack_l(); // take the frames that will be lost by track recreation into account in saved position - size_t position = mProxy->getPosition() + mProxy->getFramesFilled(); - size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0; - result = createTrack_l(position /*epoch*/); + (void) updateAndGetPosition_l(); + mPosition = mReleased; if (result == NO_ERROR) { // continue playback from last known position, but @@ -1838,6 +1847,27 @@ status_t AudioTrack::restoreTrack_l(const char *from) return result; } +uint32_t AudioTrack::updateAndGetPosition_l() +{ + // This is the sole place to read server consumed frames + uint32_t newServer = mProxy->getPosition(); + int32_t delta = newServer - mServer; + mServer = newServer; + // TODO There is controversy about whether there can be "negative jitter" in server position. + // This should be investigated further, and if possible, it should be addressed. + // A more definite failure mode is infrequent polling by client. + // One could call (void)getPosition_l() in releaseBuffer(), + // so mReleased and mPosition are always lock-step as best possible. + // That should ensure delta never goes negative for infrequent polling + // unless the server has more than 2^31 frames in its buffer, + // in which case the use of uint32_t for these counters has bigger issues. + if (delta < 0) { + ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta); + delta = 0; + } + return mPosition += (uint32_t) delta; +} + status_t AudioTrack::setParameters(const String8& keyValuePairs) { AutoMutex lock(mLock); @@ -1854,9 +1884,34 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp) if (mState != STATE_ACTIVE && mState != STATE_PAUSED) { return INVALID_OPERATION; } + // The presented frame count must always lag behind the consumed frame count. + // To avoid a race, read the presented frames first. This ensures that presented <= consumed. status_t status = mAudioTrack->getTimestamp(timestamp); if (status == NO_ERROR) { - timestamp.mPosition += mProxy->getEpoch(); + // Update the mapping between local consumed (mPosition) and server consumed (mServer) + (void) updateAndGetPosition_l(); + // Server consumed (mServer) and presented both use the same server time base, + // and server consumed is always >= presented. + // The delta between these represents the number of frames in the buffer pipeline. + // If this delta between these is greater than the client position, it means that + // actually presented is still stuck at the starting line (figuratively speaking), + // waiting for the first frame to go by. So we can't report a valid timestamp yet. + if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) { + return INVALID_OPERATION; + } + // Convert timestamp position from server time base to client time base. + // TODO The following code should work OK now because timestamp.mPosition is 32-bit. + // But if we change it to 64-bit then this could fail. + // If (mPosition - mServer) can be negative then should use: + // (int32_t)(mPosition - mServer) + timestamp.mPosition += mPosition - mServer; + // Immediately after a call to getPosition_l(), mPosition and + // mServer both represent the same frame position. mPosition is + // in client's point of view, and mServer is in server's point of + // view. So the difference between them is the "fudge factor" + // between client and server views due to stop() and/or new + // IAudioTrack. And timestamp.mPosition is initially in server's + // point of view, so we need to apply the same fudge factor to it. } return status; } diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp index 8e1987a..d8ed836 100644 --- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp +++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp @@ -54,7 +54,8 @@ NuPlayer::GenericSource::GenericSource( mDrmManagerClient(NULL), mMetaDataSize(-1ll), mBitrate(-1ll), - mPollBufferingGeneration(0) { + mPollBufferingGeneration(0), + mPendingReadBufferTypes(0) { resetDataSource(); DataSource::RegisterDefaultSniffers(); } @@ -169,6 +170,8 @@ status_t NuPlayer::GenericSource::initFromDataSource() { if (mAudioTrack.mSource == NULL) { mAudioTrack.mIndex = i; mAudioTrack.mSource = track; + mAudioTrack.mPackets = + new AnotherPacketSource(mAudioTrack.mSource->getFormat()); if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) { mAudioIsVorbis = true; @@ -180,6 +183,8 @@ status_t NuPlayer::GenericSource::initFromDataSource() { if (mVideoTrack.mSource == NULL) { mVideoTrack.mIndex = i; mVideoTrack.mSource = track; + mVideoTrack.mPackets = + new AnotherPacketSource(mVideoTrack.mSource->getFormat()); // check if the source requires secure buffers int32_t secure; @@ -427,16 +432,12 @@ void NuPlayer::GenericSource::start() { if (mAudioTrack.mSource != NULL) { CHECK_EQ(mAudioTrack.mSource->start(), (status_t)OK); - mAudioTrack.mPackets = - new AnotherPacketSource(mAudioTrack.mSource->getFormat()); postReadBuffer(MEDIA_TRACK_TYPE_AUDIO); } if (mVideoTrack.mSource != NULL) { CHECK_EQ(mVideoTrack.mSource->start(), (status_t)OK); - mVideoTrack.mPackets = - new AnotherPacketSource(mVideoTrack.mSource->getFormat()); postReadBuffer(MEDIA_TRACK_TYPE_VIDEO); } @@ -1148,15 +1149,27 @@ sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer( } void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) { - sp<AMessage> msg = new AMessage(kWhatReadBuffer, id()); - msg->setInt32("trackType", trackType); - msg->post(); + Mutex::Autolock _l(mReadBufferLock); + + if ((mPendingReadBufferTypes & (1 << trackType)) == 0) { + mPendingReadBufferTypes |= (1 << trackType); + sp<AMessage> msg = new AMessage(kWhatReadBuffer, id()); + msg->setInt32("trackType", trackType); + msg->post(); + } } void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) { int32_t tmpType; CHECK(msg->findInt32("trackType", &tmpType)); media_track_type trackType = (media_track_type)tmpType; + { + // only protect the variable change, as readBuffer may + // take considerable time. This may result in one extra + // read being processed, but that is benign. + Mutex::Autolock _l(mReadBufferLock); + mPendingReadBufferTypes &= ~(1 << trackType); + } readBuffer(trackType); } diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h index 50ff98a..c70c48e 100644 --- a/media/libmediaplayerservice/nuplayer/GenericSource.h +++ b/media/libmediaplayerservice/nuplayer/GenericSource.h @@ -131,6 +131,8 @@ private: off64_t mMetaDataSize; int64_t mBitrate; int32_t mPollBufferingGeneration; + uint32_t mPendingReadBufferTypes; + mutable Mutex mReadBufferLock; sp<ALooper> mLooper; diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp index 8b4dd6f..4569c1c 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp @@ -14,8 +14,8 @@ * limitations under the License. */ -#define LOG_TAG "SoftAAC2" //#define LOG_NDEBUG 0 +#define LOG_TAG "SoftAAC2" #include <utils/Log.h> #include "SoftAAC2.h" @@ -68,7 +68,6 @@ SoftAAC2::SoftAAC2( mOutputBufferCount(0), mSignalledError(false), mLastInHeader(NULL), - mCurrentInputTime(0), mOutputPortSettingsChange(NONE) { initPorts(); CHECK_EQ(initDecoder(), (status_t)OK); @@ -610,9 +609,24 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL); return; } + + // insert buffer size and time stamp + mBufferSizes.add(inBufferLength[0]); + if (mLastInHeader != inHeader) { + mBufferTimestamps.add(inHeader->nTimeStamp); + mLastInHeader = inHeader; + } else { + int64_t currentTime = mBufferTimestamps.top(); + currentTime += mStreamInfo->aacSamplesPerFrame * + 1000000ll / mStreamInfo->sampleRate; + mBufferTimestamps.add(currentTime); + } } else { inBuffer[0] = inHeader->pBuffer + inHeader->nOffset; inBufferLength[0] = inHeader->nFilledLen; + mLastInHeader = inHeader; + mBufferTimestamps.add(inHeader->nTimeStamp); + mBufferSizes.add(inHeader->nFilledLen); } // Fill and decode @@ -621,136 +635,136 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { INT prevSampleRate = mStreamInfo->sampleRate; INT prevNumChannels = mStreamInfo->numChannels; - if (inHeader != mLastInHeader) { - mLastInHeader = inHeader; - mCurrentInputTime = inHeader->nTimeStamp; - } else { - if (mStreamInfo->sampleRate) { - mCurrentInputTime += mStreamInfo->aacSamplesPerFrame * - 1000000ll / mStreamInfo->sampleRate; - } else { - ALOGW("no sample rate yet"); - } - } - mAnchorTimes.add(mCurrentInputTime); aacDecoder_Fill(mAACDecoder, inBuffer, inBufferLength, bytesValid); - // run DRC check - mDrcWrap.submitStreamData(mStreamInfo); - mDrcWrap.update(); + // run DRC check + mDrcWrap.submitStreamData(mStreamInfo); + mDrcWrap.update(); - AAC_DECODER_ERROR decoderErr = - aacDecoder_DecodeFrame(mAACDecoder, - tmpOutBuffer, - 2048 * MAX_CHANNEL_COUNT, - 0 /* flags */); + UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; + inHeader->nFilledLen -= inBufferUsedLength; + inHeader->nOffset += inBufferUsedLength; - if (decoderErr != AAC_DEC_OK) { - ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr); - } - - if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { - ALOGE("AAC_DEC_NOT_ENOUGH_BITS should never happen"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); - return; - } + AAC_DECODER_ERROR decoderErr; + do { + if (outputDelayRingBufferSamplesLeft() < + (mStreamInfo->frameSize * mStreamInfo->numChannels)) { + ALOGV("skipping decode: not enough space left in ringbuffer"); + break; + } - if (bytesValid[0] != 0) { - ALOGE("bytesValid[0] != 0 should never happen"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); - return; - } + int numconsumed = mStreamInfo->numTotalBytes + mStreamInfo->numBadBytes; + decoderErr = aacDecoder_DecodeFrame(mAACDecoder, + tmpOutBuffer, + 2048 * MAX_CHANNEL_COUNT, + 0 /* flags */); - size_t numOutBytes = - mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; + numconsumed = (mStreamInfo->numTotalBytes + mStreamInfo->numBadBytes) - numconsumed; + if (numconsumed != 0) { + mDecodedSizes.add(numconsumed); + } - if (decoderErr == AAC_DEC_OK) { - if (!outputDelayRingBufferPutSamples(tmpOutBuffer, - mStreamInfo->frameSize * mStreamInfo->numChannels)) { - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); - return; + if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { + break; } - UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; - inHeader->nFilledLen -= inBufferUsedLength; - inHeader->nOffset += inBufferUsedLength; - } else { - ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr); - memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow + if (decoderErr != AAC_DEC_OK) { + ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr); + } - if (!outputDelayRingBufferPutSamples(tmpOutBuffer, - mStreamInfo->frameSize * mStreamInfo->numChannels)) { + if (bytesValid[0] != 0) { + ALOGE("bytesValid[0] != 0 should never happen"); mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); + notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); return; } - // Discard input buffer. - inHeader->nFilledLen = 0; - - aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); - - // fall through - } - - /* - * AAC+/eAAC+ streams can be signalled in two ways: either explicitly - * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual - * rate system and the sampling rate in the final output is actually - * doubled compared with the core AAC decoder sampling rate. - * - * Explicit signalling is done by explicitly defining SBR audio object - * type in the bitstream. Implicit signalling is done by embedding - * SBR content in AAC extension payload specific to SBR, and hence - * requires an AAC decoder to perform pre-checks on actual audio frames. - * - * Thus, we could not say for sure whether a stream is - * AAC+/eAAC+ until the first data frame is decoded. - */ - if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1 - if (mStreamInfo->sampleRate != prevSampleRate || - mStreamInfo->numChannels != prevNumChannels) { - ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels", - prevSampleRate, mStreamInfo->sampleRate, - prevNumChannels, mStreamInfo->numChannels); + size_t numOutBytes = + mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; + if (decoderErr == AAC_DEC_OK) { + if (!outputDelayRingBufferPutSamples(tmpOutBuffer, + mStreamInfo->frameSize * mStreamInfo->numChannels)) { + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); + return; + } + } else { + ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr); + + memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - mInputBufferCount++; - inQueue.erase(inQueue.begin()); - mLastInHeader = NULL; - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; + if (!outputDelayRingBufferPutSamples(tmpOutBuffer, + mStreamInfo->frameSize * mStreamInfo->numChannels)) { + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); + return; } + + // Discard input buffer. + inHeader->nFilledLen = 0; + + aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); + + // fall through + } + + /* + * AAC+/eAAC+ streams can be signalled in two ways: either explicitly + * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual + * rate system and the sampling rate in the final output is actually + * doubled compared with the core AAC decoder sampling rate. + * + * Explicit signalling is done by explicitly defining SBR audio object + * type in the bitstream. Implicit signalling is done by embedding + * SBR content in AAC extension payload specific to SBR, and hence + * requires an AAC decoder to perform pre-checks on actual audio frames. + * + * Thus, we could not say for sure whether a stream is + * AAC+/eAAC+ until the first data frame is decoded. + */ + if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1 + if (mStreamInfo->sampleRate != prevSampleRate || + mStreamInfo->numChannels != prevNumChannels) { + ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels", + prevSampleRate, mStreamInfo->sampleRate, + prevNumChannels, mStreamInfo->numChannels); + + notify(OMX_EventPortSettingsChanged, 1, 0, NULL); + mOutputPortSettingsChange = AWAITING_DISABLED; + + if (inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + mInputBufferCount++; + inQueue.erase(inQueue.begin()); + mLastInHeader = NULL; + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } + return; + } + } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) { + ALOGW("Invalid AAC stream"); + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); return; } - } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) { - ALOGW("Invalid AAC stream"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); - return; - } - if (inHeader->nFilledLen == 0) { - inInfo->mOwnedByUs = false; - mInputBufferCount++; - inQueue.erase(inQueue.begin()); - mLastInHeader = NULL; - inInfo = NULL; - notifyEmptyBufferDone(inHeader); - inHeader = NULL; - } else { - ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen); - } + if (inHeader && inHeader->nFilledLen == 0) { + inInfo->mOwnedByUs = false; + mInputBufferCount++; + inQueue.erase(inQueue.begin()); + mLastInHeader = NULL; + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } else { + ALOGV("inHeader->nFilledLen = %d", inHeader ? inHeader->nFilledLen : 0); + } + } while (decoderErr == AAC_DEC_OK); } int32_t outputDelay = mStreamInfo->outputDelay * mStreamInfo->numChannels; @@ -809,8 +823,9 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset); + int samplesize = mStreamInfo->numChannels * sizeof(int16_t); if (outHeader->nOffset - + mStreamInfo->frameSize * mStreamInfo->numChannels * sizeof(int16_t) + + mStreamInfo->frameSize * samplesize > outHeader->nAllocLen) { ALOGE("buffer overflow"); mSignalledError = true; @@ -818,17 +833,67 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { return; } - int32_t ns = outputDelayRingBufferGetSamples(outBuffer, - mStreamInfo->frameSize * mStreamInfo->numChannels); // TODO: check for overflow - if (ns != mStreamInfo->frameSize * mStreamInfo->numChannels) { - ALOGE("not a complete frame of samples available"); - mSignalledError = true; - notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); - return; + + int available = outputDelayRingBufferSamplesAvailable(); + int numSamples = outHeader->nAllocLen / sizeof(int16_t); + if (numSamples > available) { + numSamples = available; + } + int64_t currentTime = 0; + if (available) { + + int numFrames = numSamples / (mStreamInfo->frameSize * mStreamInfo->numChannels); + numSamples = numFrames * (mStreamInfo->frameSize * mStreamInfo->numChannels); + + ALOGV("%d samples available (%d), or %d frames", + numSamples, available, numFrames); + int64_t *nextTimeStamp = &mBufferTimestamps.editItemAt(0); + currentTime = *nextTimeStamp; + int32_t *currentBufLeft = &mBufferSizes.editItemAt(0); + for (int i = 0; i < numFrames; i++) { + int32_t decodedSize = mDecodedSizes.itemAt(0); + mDecodedSizes.removeAt(0); + ALOGV("decoded %d of %d", decodedSize, *currentBufLeft); + if (*currentBufLeft > decodedSize) { + // adjust/interpolate next time stamp + *currentBufLeft -= decodedSize; + *nextTimeStamp += mStreamInfo->aacSamplesPerFrame * + 1000000ll / mStreamInfo->sampleRate; + ALOGV("adjusted nextTimeStamp/size to %lld/%d", + *nextTimeStamp, *currentBufLeft); + } else { + // move to next timestamp in list + if (mBufferTimestamps.size() > 0) { + mBufferTimestamps.removeAt(0); + nextTimeStamp = &mBufferTimestamps.editItemAt(0); + mBufferSizes.removeAt(0); + currentBufLeft = &mBufferSizes.editItemAt(0); + ALOGV("moved to next time/size: %lld/%d", + *nextTimeStamp, *currentBufLeft); + } + // try to limit output buffer size to match input buffers + // (e.g when an input buffer contained 4 "sub" frames, output + // at most 4 decoded units in the corresponding output buffer) + // This is optional. Remove the next three lines to fill the output + // buffer with as many units as available. + numFrames = i + 1; + numSamples = numFrames * mStreamInfo->frameSize * mStreamInfo->numChannels; + break; + } + } + + ALOGV("getting %d from ringbuffer", numSamples); + int32_t ns = outputDelayRingBufferGetSamples(outBuffer, numSamples); + if (ns != numSamples) { + ALOGE("not a complete frame of samples available"); + mSignalledError = true; + notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); + return; + } } - outHeader->nFilledLen = mStreamInfo->frameSize * mStreamInfo->numChannels - * sizeof(int16_t); + outHeader->nFilledLen = numSamples * sizeof(int16_t); + if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) { outHeader->nFlags = OMX_BUFFERFLAG_EOS; mEndOfOutput = true; @@ -836,13 +901,13 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { outHeader->nFlags = 0; } - outHeader->nTimeStamp = mAnchorTimes.isEmpty() ? 0 : mAnchorTimes.itemAt(0); - mAnchorTimes.removeAt(0); + outHeader->nTimeStamp = currentTime; mOutputBufferCount++; outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); outInfo = NULL; + ALOGV("out timestamp %lld / %d", outHeader->nTimeStamp, outHeader->nFilledLen); notifyFillBufferDone(outHeader); outHeader = NULL; } @@ -877,8 +942,10 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) { outHeader->nFilledLen = 0; outHeader->nFlags = OMX_BUFFERFLAG_EOS; - outHeader->nTimeStamp = mAnchorTimes.itemAt(0); - mAnchorTimes.removeAt(0); + outHeader->nTimeStamp = mBufferTimestamps.itemAt(0); + mBufferTimestamps.clear(); + mBufferSizes.clear(); + mDecodedSizes.clear(); mOutputBufferCount++; outInfo->mOwnedByUs = false; @@ -899,7 +966,9 @@ void SoftAAC2::onPortFlushCompleted(OMX_U32 portIndex) { // depend on fragments from the last one decoded. // drain all existing data drainDecoder(); - mAnchorTimes.clear(); + mBufferTimestamps.clear(); + mBufferSizes.clear(); + mDecodedSizes.clear(); mLastInHeader = NULL; } else { while (outputDelayRingBufferSamplesAvailable() > 0) { @@ -955,7 +1024,9 @@ void SoftAAC2::onReset() { mOutputDelayRingBufferReadPos = 0; mEndOfInput = false; mEndOfOutput = false; - mAnchorTimes.clear(); + mBufferTimestamps.clear(); + mBufferSizes.clear(); + mDecodedSizes.clear(); mLastInHeader = NULL; // To make the codec behave the same before and after a reset, we need to invalidate the diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h index 865bd15..9fcb598 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC2.h +++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h @@ -59,8 +59,9 @@ private: size_t mOutputBufferCount; bool mSignalledError; OMX_BUFFERHEADERTYPE *mLastInHeader; - int64_t mCurrentInputTime; - Vector<int64_t> mAnchorTimes; + Vector<int32_t> mBufferSizes; + Vector<int32_t> mDecodedSizes; + Vector<int64_t> mBufferTimestamps; CDrcPresModeWrapper mDrcWrap; diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp index 0d1ab71..5b2ab84 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp +++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp @@ -134,6 +134,12 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { } uint8_t *bitstream = inHeader->pBuffer + inHeader->nOffset; + uint32_t *start_code = (uint32_t *)bitstream; + bool volHeader = *start_code == 0xB0010000; + if (volHeader) { + PVCleanUpVideoDecoder(mHandle); + mInitialized = false; + } if (!mInitialized) { uint8_t *vol_data[1]; @@ -141,7 +147,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { vol_data[0] = NULL; - if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) { + if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) || volHeader) { vol_data[0] = bitstream; vol_size = inHeader->nFilledLen; } @@ -169,21 +175,26 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { PVSetPostProcType((VideoDecControls *) mHandle, 0); + bool hasFrameData = false; if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) { inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; + } else if (volHeader) { + hasFrameData = true; } mInitialized = true; - if (mode == MPEG4_MODE && portSettingsChanged()) { + if (mode == MPEG4_MODE && handlePortSettingsChange()) { return; } - continue; + if (!hasFrameData) { + continue; + } } if (!mFramesConfigured) { @@ -223,7 +234,9 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { return; } - if (portSettingsChanged()) { + // H263 doesn't have VOL header, the frame size information is in short header, i.e. the + // decoder may detect size change after PVDecodeVideoFrame. + if (handlePortSettingsChange()) { return; } @@ -269,7 +282,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) { } } -bool SoftMPEG4::portSettingsChanged() { +bool SoftMPEG4::handlePortSettingsChange() { uint32_t disp_width, disp_height; PVGetVideoDimensions(mHandle, (int32 *)&disp_width, (int32 *)&disp_height); @@ -282,25 +295,20 @@ bool SoftMPEG4::portSettingsChanged() { ALOGV("disp_width = %d, disp_height = %d, buf_width = %d, buf_height = %d", disp_width, disp_height, buf_width, buf_height); - if (mCropWidth != disp_width - || mCropHeight != disp_height) { + bool cropChanged = false; + if (mCropWidth != disp_width || mCropHeight != disp_height) { mCropLeft = 0; mCropTop = 0; mCropWidth = disp_width; mCropHeight = disp_height; - - notify(OMX_EventPortSettingsChanged, - 1, - OMX_IndexConfigCommonOutputCrop, - NULL); + cropChanged = true; } - if (buf_width != mWidth || buf_height != mHeight) { - mWidth = buf_width; - mHeight = buf_height; - - updatePortDefinitions(); - + bool portWillReset = false; + const bool fakeStride = true; + SoftVideoDecoderOMXComponent::handlePortSettingsChange( + &portWillReset, buf_width, buf_height, cropChanged, fakeStride); + if (portWillReset) { if (mMode == MODE_H263) { PVCleanUpVideoDecoder(mHandle); @@ -318,13 +326,9 @@ bool SoftMPEG4::portSettingsChanged() { } mFramesConfigured = false; - - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; - return true; } - return false; + return portWillReset; } void SoftMPEG4::onPortFlushCompleted(OMX_U32 portIndex) { diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h index de14aaf..8a06a00 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h +++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h @@ -67,7 +67,7 @@ private: status_t initDecoder(); virtual void updatePortDefinitions(); - bool portSettingsChanged(); + bool handlePortSettingsChange(); DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4); }; diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp index b3c350f..b03ec8c 100644 --- a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp +++ b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp @@ -1426,7 +1426,7 @@ PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop) video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1); /* otherwise calculate above */ } size = (int32)video->width * video->height; - if (video->currVop->predictionType == P_VOP && size > video->videoDecControls->size) + if (currVop->predictionType == P_VOP && size > video->videoDecControls->size) { status = PV_FAIL; goto return_point; diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp index 2f63bdd..828577a 100644 --- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp +++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp @@ -137,29 +137,10 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) { uint32_t width = mImg->d_w; uint32_t height = mImg->d_h; - - if (width != mWidth || height != mHeight) { - mWidth = width; - mHeight = height; - - if (!mIsAdaptive || width > mAdaptiveMaxWidth || height > mAdaptiveMaxHeight) { - if (mIsAdaptive) { - if (width > mAdaptiveMaxWidth) { - mAdaptiveMaxWidth = width; - } - if (height > mAdaptiveMaxHeight) { - mAdaptiveMaxHeight = height; - } - } - updatePortDefinitions(); - notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; - return; - } else { - updatePortDefinitions(); - notify(OMX_EventPortSettingsChanged, kOutputPortIndex, - OMX_IndexConfigCommonOutputCrop, NULL); - } + bool portWillReset = false; + handlePortSettingsChange(&portWillReset, width, height); + if (portWillReset) { + return; } outHeader->nOffset = 0; @@ -167,36 +148,14 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) { outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0; outHeader->nTimeStamp = inHeader->nTimeStamp; - uint32_t buffer_stride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth; - uint32_t buffer_height = mIsAdaptive ? mAdaptiveMaxHeight : mHeight; - - const uint8_t *srcLine = (const uint8_t *)mImg->planes[PLANE_Y]; uint8_t *dst = outHeader->pBuffer; - for (size_t i = 0; i < buffer_height; ++i) { - if (i < mImg->d_h) { - memcpy(dst, srcLine, mImg->d_w); - srcLine += mImg->stride[PLANE_Y]; - } - dst += buffer_stride; - } - - srcLine = (const uint8_t *)mImg->planes[PLANE_U]; - for (size_t i = 0; i < buffer_height / 2; ++i) { - if (i < mImg->d_h / 2) { - memcpy(dst, srcLine, mImg->d_w / 2); - srcLine += mImg->stride[PLANE_U]; - } - dst += buffer_stride / 2; - } - - srcLine = (const uint8_t *)mImg->planes[PLANE_V]; - for (size_t i = 0; i < buffer_height / 2; ++i) { - if (i < mImg->d_h / 2) { - memcpy(dst, srcLine, mImg->d_w / 2); - srcLine += mImg->stride[PLANE_V]; - } - dst += buffer_stride / 2; - } + const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y]; + const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U]; + const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V]; + size_t srcYStride = mImg->stride[PLANE_Y]; + size_t srcUStride = mImg->stride[PLANE_U]; + size_t srcVStride = mImg->stride[PLANE_V]; + copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride); mImg = NULL; outInfo->mOwnedByUs = false; diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp index a7bde97..cf3c3e3 100644 --- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp +++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp @@ -58,7 +58,6 @@ SoftAVC::SoftAVC( 320 /* width */, 240 /* height */, callbacks, appData, component), mHandle(NULL), mInputBufferCount(0), - mPictureSize(mWidth * mHeight * 3 / 2), mFirstPicture(NULL), mFirstPictureId(-1), mPicId(0), @@ -118,7 +117,7 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { } H264SwDecRet ret = H264SWDEC_PIC_RDY; - bool portSettingsChanged = false; + bool portWillReset = false; while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty()) && outQueue.size() == kNumOutputBuffers) { @@ -161,17 +160,13 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { H264SwDecInfo decoderInfo; CHECK(H264SwDecGetInfo(mHandle, &decoderInfo) == H264SWDEC_OK); - if (handlePortSettingChangeEvent(&decoderInfo)) { - portSettingsChanged = true; - } - - if (decoderInfo.croppingFlag && - handleCropRectEvent(&decoderInfo.cropParams)) { - portSettingsChanged = true; - } + bool cropChanged = handleCropChange(decoderInfo); + handlePortSettingsChange( + &portWillReset, decoderInfo.picWidth, decoderInfo.picHeight, + cropChanged); } } else { - if (portSettingsChanged) { + if (portWillReset) { if (H264SwDecNextPicture(mHandle, &decodedPicture, 0) == H264SWDEC_PIC_RDY) { @@ -199,8 +194,7 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { inInfo->mOwnedByUs = false; notifyEmptyBufferDone(inHeader); - if (portSettingsChanged) { - portSettingsChanged = false; + if (portWillReset) { return; } @@ -215,44 +209,33 @@ void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) { } } -bool SoftAVC::handlePortSettingChangeEvent(const H264SwDecInfo *info) { - if (mWidth != info->picWidth || mHeight != info->picHeight) { - mWidth = info->picWidth; - mHeight = info->picHeight; - mPictureSize = mWidth * mHeight * 3 / 2; - updatePortDefinitions(); - notify(OMX_EventPortSettingsChanged, 1, 0, NULL); - mOutputPortSettingsChange = AWAITING_DISABLED; - return true; +bool SoftAVC::handleCropChange(const H264SwDecInfo& decInfo) { + if (!decInfo.croppingFlag) { + return false; } - return false; -} - -bool SoftAVC::handleCropRectEvent(const CropParams *crop) { - if (mCropLeft != crop->cropLeftOffset || - mCropTop != crop->cropTopOffset || - mCropWidth != crop->cropOutWidth || - mCropHeight != crop->cropOutHeight) { - mCropLeft = crop->cropLeftOffset; - mCropTop = crop->cropTopOffset; - mCropWidth = crop->cropOutWidth; - mCropHeight = crop->cropOutHeight; - - notify(OMX_EventPortSettingsChanged, 1, - OMX_IndexConfigCommonOutputCrop, NULL); - - return true; + const CropParams& crop = decInfo.cropParams; + if (mCropLeft == crop.cropLeftOffset && + mCropTop == crop.cropTopOffset && + mCropWidth == crop.cropOutWidth && + mCropHeight == crop.cropOutHeight) { + return false; } - return false; + + mCropLeft = crop.cropLeftOffset; + mCropTop = crop.cropTopOffset; + mCropWidth = crop.cropOutWidth; + mCropHeight = crop.cropOutHeight; + return true; } void SoftAVC::saveFirstOutputBuffer(int32_t picId, uint8_t *data) { CHECK(mFirstPicture == NULL); mFirstPictureId = picId; - mFirstPicture = new uint8_t[mPictureSize]; - memcpy(mFirstPicture, data, mPictureSize); + uint32_t pictureSize = mWidth * mHeight * 3 / 2; + mFirstPicture = new uint8_t[pictureSize]; + memcpy(mFirstPicture, data, pictureSize); } void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) { @@ -263,9 +246,17 @@ void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) { OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId); outHeader->nTimeStamp = header->nTimeStamp; outHeader->nFlags = header->nFlags; - outHeader->nFilledLen = mPictureSize; - memcpy(outHeader->pBuffer + outHeader->nOffset, - data, mPictureSize); + outHeader->nFilledLen = mWidth * mHeight * 3 / 2; + + uint8_t *dst = outHeader->pBuffer + outHeader->nOffset; + const uint8_t *srcY = data; + const uint8_t *srcU = srcY + mWidth * mHeight; + const uint8_t *srcV = srcU + mWidth * mHeight / 4; + size_t srcYStride = mWidth; + size_t srcUStride = mWidth / 2; + size_t srcVStride = srcUStride; + copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride); + mPicToHeaderMap.removeItem(picId); delete header; outInfo->mOwnedByUs = false; diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h index ee69926..253a406 100644 --- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h +++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h @@ -55,8 +55,6 @@ private: size_t mInputBufferCount; - uint32_t mPictureSize; - uint8_t *mFirstPicture; int32_t mFirstPictureId; @@ -75,8 +73,7 @@ private: void drainAllOutputBuffers(bool eos); void drainOneOutputBuffer(int32_t picId, uint8_t *data); void saveFirstOutputBuffer(int32_t pidId, uint8_t *data); - bool handleCropRectEvent(const CropParams* crop); - bool handlePortSettingChangeEvent(const H264SwDecInfo *info); + bool handleCropChange(const H264SwDecInfo& decInfo); DISALLOW_EVIL_CONSTRUCTORS(SoftAVC); }; diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp index cc98da0..1899b40 100644 --- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp +++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp @@ -65,8 +65,8 @@ void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) { CHECK(format->findInt32("color-format", &colorFormatNew)); int32_t widthNew, heightNew; - CHECK(format->findInt32("width", &widthNew)); - CHECK(format->findInt32("height", &heightNew)); + CHECK(format->findInt32("stride", &widthNew)); + CHECK(format->findInt32("slice-height", &heightNew)); int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew; if (!format->findRect( diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp index 7b18348..3720085 100644 --- a/media/libstagefright/httplive/LiveSession.cpp +++ b/media/libstagefright/httplive/LiveSession.cpp @@ -81,6 +81,7 @@ LiveSession::LiveSession( mDiscontinuities.add(indexToType(i), new AnotherPacketSource(NULL /* meta */)); mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */)); mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */)); + mBuffering[i] = false; } } @@ -133,8 +134,26 @@ status_t LiveSession::dequeueAccessUnit( sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream); + ssize_t idx = typeToIndex(stream); if (!packetSource->hasBufferAvailable(&finalResult)) { - return finalResult == OK ? -EAGAIN : finalResult; + if (finalResult == OK) { + mBuffering[idx] = true; + return -EAGAIN; + } else { + return finalResult; + } + } + + if (mBuffering[idx]) { + if (mSwitchInProgress + || packetSource->isFinished(0) + || packetSource->getEstimatedDurationUs() > 10000000ll) { + mBuffering[idx] = false; + } + } + + if (mBuffering[idx]) { + return -EAGAIN; } // wait for counterpart @@ -498,7 +517,7 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { break; } - onCheckBandwidth(); + onCheckBandwidth(msg); break; } @@ -531,6 +550,19 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) { onSwapped(msg); break; } + + case kWhatCheckSwitchDown: + { + onCheckSwitchDown(); + break; + } + + case kWhatSwitchDown: + { + onSwitchDown(); + break; + } + default: TRESPASS(); break; @@ -554,6 +586,21 @@ LiveSession::StreamType LiveSession::indexToType(int idx) { return (StreamType)(1 << idx); } +// static +ssize_t LiveSession::typeToIndex(int32_t type) { + switch (type) { + case STREAMTYPE_AUDIO: + return 0; + case STREAMTYPE_VIDEO: + return 1; + case STREAMTYPE_SUBTITLES: + return 2; + default: + return -1; + }; + return -1; +} + void LiveSession::onConnect(const sp<AMessage> &msg) { AString url; CHECK(msg->findString("url", &url)); @@ -643,6 +690,9 @@ void LiveSession::finishDisconnect() { // (finishDisconnect, onFinishDisconnect2) cancelBandwidthSwitch(); + // cancel switch down monitor + mSwitchDownMonitor.clear(); + for (size_t i = 0; i < mFetcherInfos.size(); ++i) { mFetcherInfos.valueAt(i).mFetcher->stopAsync(); } @@ -919,14 +969,22 @@ size_t LiveSession::getBandwidthIndex() { } } - // Consider only 80% of the available bandwidth usable. - bandwidthBps = (bandwidthBps * 8) / 10; - // Pick the highest bandwidth stream below or equal to estimated bandwidth. index = mBandwidthItems.size() - 1; - while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth - > (size_t)bandwidthBps) { + while (index > 0) { + // consider only 80% of the available bandwidth, but if we are switching up, + // be even more conservative (70%) to avoid overestimating and immediately + // switching back. + size_t adjustedBandwidthBps = bandwidthBps; + if (index > mCurBandwidthIndex) { + adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10; + } else { + adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10; + } + if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) { + break; + } --index; } } @@ -1228,12 +1286,6 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask)); CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask)); - for (size_t i = 0; i < kMaxStreams; ++i) { - if (streamMask & indexToType(i)) { - CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mUri)); - } - } - int64_t timeUs; int32_t pickTrack; bool switching = false; @@ -1249,7 +1301,20 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { mRealTimeBaseUs = ALooper::GetNowUs() - timeUs; } + for (size_t i = 0; i < kMaxStreams; ++i) { + if (streamMask & indexToType(i)) { + if (switching) { + CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mNewUri)); + } else { + CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mUri)); + } + } + } + mNewStreamMask = streamMask | resumeMask; + if (switching) { + mSwapMask = mStreamMask & ~resumeMask; + } // Of all existing fetchers: // * Resume fetchers that are still needed and assign them original packet sources. @@ -1299,7 +1364,7 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { } AString uri; - uri = mStreams[i].mUri; + uri = switching ? mStreams[i].mNewUri : mStreams[i].mUri; sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str()); CHECK(fetcher != NULL); @@ -1312,7 +1377,8 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { // TRICKY: looping from i as earlier streams are already removed from streamMask for (size_t j = i; j < kMaxStreams; ++j) { - if ((streamMask & indexToType(j)) && uri == mStreams[j].mUri) { + const AString &streamUri = switching ? mStreams[j].mNewUri : mStreams[j].mUri; + if ((streamMask & indexToType(j)) && uri == streamUri) { sources[j] = mPacketSources.valueFor(indexToType(j)); if (timeUs >= 0) { @@ -1394,13 +1460,13 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) { // All fetchers have now been started, the configuration change // has completed. + cancelCheckBandwidthEvent(); scheduleCheckBandwidthEvent(); ALOGV("XXX configuration change completed."); mReconfigurationInProgress = false; if (switching) { mSwitchInProgress = true; - mSwapMask = streamMask; } else { mStreamMask = mNewStreamMask; } @@ -1419,6 +1485,15 @@ void LiveSession::onSwapped(const sp<AMessage> &msg) { int32_t stream; CHECK(msg->findInt32("stream", &stream)); + + ssize_t idx = typeToIndex(stream); + CHECK(idx >= 0); + if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) { + ALOGW("swapping stream type %d %s to empty stream", stream, mStreams[idx].mUri.c_str()); + } + mStreams[idx].mUri = mStreams[idx].mNewUri; + mStreams[idx].mNewUri.clear(); + mSwapMask &= ~stream; if (mSwapMask != 0) { return; @@ -1430,11 +1505,58 @@ void LiveSession::onSwapped(const sp<AMessage> &msg) { StreamType extraStream = (StreamType) (extraStreams & ~(extraStreams - 1)); swapPacketSource(extraStream); extraStreams &= ~extraStream; + + idx = typeToIndex(extraStream); + CHECK(idx >= 0); + if (mStreams[idx].mNewUri.empty()) { + ALOGW("swapping extra stream type %d %s to empty stream", + extraStream, mStreams[idx].mUri.c_str()); + } + mStreams[idx].mUri = mStreams[idx].mNewUri; + mStreams[idx].mNewUri.clear(); } tryToFinishBandwidthSwitch(); } +void LiveSession::onCheckSwitchDown() { + if (mSwitchDownMonitor == NULL) { + return; + } + + for (size_t i = 0; i < kMaxStreams; ++i) { + int32_t targetDuration; + sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(indexToType(i)); + sp<AMessage> meta = packetSource->getLatestDequeuedMeta(); + + if (meta != NULL && meta->findInt32("targetDuration", &targetDuration) ) { + int64_t bufferedDurationUs = packetSource->getEstimatedDurationUs(); + int64_t targetDurationUs = targetDuration * 1000000ll; + + if (bufferedDurationUs < targetDurationUs / 3) { + (new AMessage(kWhatSwitchDown, id()))->post(); + break; + } + } + } + + mSwitchDownMonitor->post(1000000ll); +} + +void LiveSession::onSwitchDown() { + if (mReconfigurationInProgress || mSwitchInProgress || mCurBandwidthIndex == 0) { + return; + } + + ssize_t bandwidthIndex = getBandwidthIndex(); + if (bandwidthIndex < mCurBandwidthIndex) { + changeConfiguration(-1, bandwidthIndex, false); + return; + } + + changeConfiguration(-1, mCurBandwidthIndex - 1, false); +} + // Mark switch done when: // 1. all old buffers are swapped out void LiveSession::tryToFinishBandwidthSwitch() { @@ -1472,6 +1594,28 @@ void LiveSession::cancelBandwidthSwitch() { mSwitchGeneration++; mSwitchInProgress = false; mSwapMask = 0; + + for (size_t i = 0; i < mFetcherInfos.size(); ++i) { + FetcherInfo& info = mFetcherInfos.editValueAt(i); + if (info.mToBeRemoved) { + info.mToBeRemoved = false; + } + } + + for (size_t i = 0; i < kMaxStreams; ++i) { + if (!mStreams[i].mNewUri.empty()) { + ssize_t j = mFetcherInfos.indexOfKey(mStreams[i].mNewUri); + if (j < 0) { + mStreams[i].mNewUri.clear(); + continue; + } + + const FetcherInfo &info = mFetcherInfos.valueAt(j); + info.mFetcher->stopAsync(); + mFetcherInfos.removeItemsAt(j); + mStreams[i].mNewUri.clear(); + } + } } bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) { @@ -1492,20 +1636,16 @@ bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) { } } -void LiveSession::onCheckBandwidth() { +void LiveSession::onCheckBandwidth(const sp<AMessage> &msg) { size_t bandwidthIndex = getBandwidthIndex(); if (canSwitchBandwidthTo(bandwidthIndex)) { changeConfiguration(-1ll /* timeUs */, bandwidthIndex); } else { - scheduleCheckBandwidthEvent(); + // Come back and check again 10 seconds later in case there is nothing to do now. + // If we DO change configuration, once that completes it'll schedule a new + // check bandwidth event with an incremented mCheckBandwidthGeneration. + msg->post(10000000ll); } - - // Handling the kWhatCheckBandwidth even here does _not_ automatically - // schedule another one on return, only an explicit call to - // scheduleCheckBandwidthEvent will do that. - // This ensures that only one configuration change is ongoing at any - // one time, once that completes it'll schedule another check bandwidth - // event. } void LiveSession::postPrepared(status_t err) { @@ -1522,6 +1662,9 @@ void LiveSession::postPrepared(status_t err) { notify->post(); mInPreparationPhase = false; + + mSwitchDownMonitor = new AMessage(kWhatCheckSwitchDown, id()); + mSwitchDownMonitor->post(); } } // namespace android diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h index 5423f0f..6be86cf 100644 --- a/media/libstagefright/httplive/LiveSession.h +++ b/media/libstagefright/httplive/LiveSession.h @@ -108,6 +108,8 @@ private: kWhatChangeConfiguration3 = 'chC3', kWhatFinishDisconnect2 = 'fin2', kWhatSwapped = 'swap', + kWhatCheckSwitchDown = 'ckSD', + kWhatSwitchDown = 'sDwn', }; struct BandwidthItem { @@ -124,7 +126,7 @@ private: struct StreamItem { const char *mType; - AString mUri; + AString mUri, mNewUri; size_t mCurDiscontinuitySeq; int64_t mLastDequeuedTimeUs; int64_t mLastSampleDurationUs; @@ -151,6 +153,7 @@ private: sp<IMediaHTTPService> mHTTPService; bool mInPreparationPhase; + bool mBuffering[kMaxStreams]; sp<HTTPBase> mHTTPDataSource; KeyedVector<String8, String8> mExtraHeaders; @@ -202,6 +205,7 @@ private: bool mFirstTimeUsValid; int64_t mFirstTimeUs; int64_t mLastSeekTimeUs; + sp<AMessage> mSwitchDownMonitor; KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs; KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs; @@ -239,6 +243,7 @@ private: static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *); static StreamType indexToType(int idx); + static ssize_t typeToIndex(int32_t type); void changeConfiguration( int64_t timeUs, size_t bandwidthIndex, bool pickTrack = false); @@ -246,6 +251,8 @@ private: void onChangeConfiguration2(const sp<AMessage> &msg); void onChangeConfiguration3(const sp<AMessage> &msg); void onSwapped(const sp<AMessage> &msg); + void onCheckSwitchDown(); + void onSwitchDown(); void tryToFinishBandwidthSwitch(); void scheduleCheckBandwidthEvent(); @@ -257,7 +264,7 @@ private: void cancelBandwidthSwitch(); bool canSwitchBandwidthTo(size_t bandwidthIndex); - void onCheckBandwidth(); + void onCheckBandwidth(const sp<AMessage> &msg); void finishDisconnect(); diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp index 82a4c39..3ef0f06 100644 --- a/media/libstagefright/httplive/PlaylistFetcher.cpp +++ b/media/libstagefright/httplive/PlaylistFetcher.cpp @@ -737,12 +737,6 @@ void PlaylistFetcher::onDownloadNext() { const int32_t lastSeqNumberInPlaylist = firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1; - if (mStartup && mSeqNumber >= 0 - && (mSeqNumber < firstSeqNumberInPlaylist || mSeqNumber > lastSeqNumberInPlaylist)) { - // in case we guessed wrong during reconfiguration, try fetching the latest content. - mSeqNumber = lastSeqNumberInPlaylist; - } - if (mDiscontinuitySeq < 0) { mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq(); } diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h index ee553d9..8cb8ed7 100644 --- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h +++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h @@ -63,7 +63,15 @@ protected: OMX_U32 numOutputBuffers, const char *mimeType); - virtual void updatePortDefinitions(); + virtual void updatePortDefinitions(bool updateCrop = true); + + void handlePortSettingsChange( + bool *portWillReset, uint32_t width, uint32_t height, + bool cropChanged = false, bool fakeStride = false); + + void copyYV12FrameToOutputBuffer( + uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, + size_t srcYStride, size_t srcUStride, size_t srcVStride); enum { kInputPortIndex = 0, diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp index 010063f..c74c3e7 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp @@ -42,7 +42,8 @@ AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta) mLastQueuedTimeUs(0), mEOSResult(OK), mLatestEnqueuedMeta(NULL), - mLatestDequeuedMeta(NULL) { + mLatestDequeuedMeta(NULL), + mQueuedDiscontinuityCount(0) { setFormat(meta); } @@ -122,6 +123,7 @@ status_t AnotherPacketSource::dequeueAccessUnit(sp<ABuffer> *buffer) { mFormat.clear(); } + --mQueuedDiscontinuityCount; return INFO_DISCONTINUITY; } @@ -210,6 +212,11 @@ void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) { mBuffers.push_back(buffer); mCondition.signal(); + int32_t discontinuity; + if (buffer->meta()->findInt32("discontinuity", &discontinuity)) { + ++mQueuedDiscontinuityCount; + } + if (mLatestEnqueuedMeta == NULL) { mLatestEnqueuedMeta = buffer->meta(); } else { @@ -226,6 +233,7 @@ void AnotherPacketSource::clear() { mBuffers.clear(); mEOSResult = OK; + mQueuedDiscontinuityCount = 0; mFormat = NULL; mLatestEnqueuedMeta = NULL; @@ -262,6 +270,7 @@ void AnotherPacketSource::queueDiscontinuity( mEOSResult = OK; mLastQueuedTimeUs = 0; mLatestEnqueuedMeta = NULL; + ++mQueuedDiscontinuityCount; sp<ABuffer> buffer = new ABuffer(0); buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type)); @@ -291,7 +300,10 @@ bool AnotherPacketSource::hasBufferAvailable(status_t *finalResult) { int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) { Mutex::Autolock autoLock(mLock); + return getBufferedDurationUs_l(finalResult); +} +int64_t AnotherPacketSource::getBufferedDurationUs_l(status_t *finalResult) { *finalResult = mEOSResult; if (mBuffers.empty()) { @@ -300,6 +312,7 @@ int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) { int64_t time1 = -1; int64_t time2 = -1; + int64_t durationUs = 0; List<sp<ABuffer> >::iterator it = mBuffers.begin(); while (it != mBuffers.end()) { @@ -307,20 +320,64 @@ int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) { int64_t timeUs; if (buffer->meta()->findInt64("timeUs", &timeUs)) { - if (time1 < 0) { + if (time1 < 0 || timeUs < time1) { time1 = timeUs; } - time2 = timeUs; + if (time2 < 0 || timeUs > time2) { + time2 = timeUs; + } } else { // This is a discontinuity, reset everything. + durationUs += time2 - time1; time1 = time2 = -1; } ++it; } - return time2 - time1; + return durationUs + (time2 - time1); +} + +// A cheaper but less precise version of getBufferedDurationUs that we would like to use in +// LiveSession::dequeueAccessUnit to trigger downwards adaptation. +int64_t AnotherPacketSource::getEstimatedDurationUs() { + Mutex::Autolock autoLock(mLock); + if (mBuffers.empty()) { + return 0; + } + + if (mQueuedDiscontinuityCount > 0) { + status_t finalResult; + return getBufferedDurationUs_l(&finalResult); + } + + List<sp<ABuffer> >::iterator it = mBuffers.begin(); + sp<ABuffer> buffer = *it; + + int64_t startTimeUs; + buffer->meta()->findInt64("timeUs", &startTimeUs); + if (startTimeUs < 0) { + return 0; + } + + it = mBuffers.end(); + --it; + buffer = *it; + + int64_t endTimeUs; + buffer->meta()->findInt64("timeUs", &endTimeUs); + if (endTimeUs < 0) { + return 0; + } + + int64_t diffUs; + if (endTimeUs > startTimeUs) { + diffUs = endTimeUs - startTimeUs; + } else { + diffUs = startTimeUs - endTimeUs; + } + return diffUs; } status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) { diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h index 0c717d7..809a858 100644 --- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h +++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h @@ -49,6 +49,8 @@ struct AnotherPacketSource : public MediaSource { // presentation timestamps since the last discontinuity (if any). int64_t getBufferedDurationUs(status_t *finalResult); + int64_t getEstimatedDurationUs(); + status_t nextBufferTime(int64_t *timeUs); void queueAccessUnit(const sp<ABuffer> &buffer); @@ -83,7 +85,10 @@ private: sp<AMessage> mLatestEnqueuedMeta; sp<AMessage> mLatestDequeuedMeta; + size_t mQueuedDiscontinuityCount; + bool wasFormatChange(int32_t discontinuityType) const; + int64_t getBufferedDurationUs_l(status_t *finalResult); DISALLOW_EVIL_CONSTRUCTORS(AnotherPacketSource); }; diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp index 69b572e..741ac96 100644 --- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp +++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp @@ -123,13 +123,15 @@ void SoftVideoDecoderOMXComponent::initPorts( updatePortDefinitions(); } -void SoftVideoDecoderOMXComponent::updatePortDefinitions() { +void SoftVideoDecoderOMXComponent::updatePortDefinitions(bool updateCrop) { OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef; def->format.video.nFrameWidth = mWidth; def->format.video.nFrameHeight = mHeight; def->format.video.nStride = def->format.video.nFrameWidth; def->format.video.nSliceHeight = def->format.video.nFrameHeight; + def->nBufferSize = def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2; + def = &editPortInfo(kOutputPortIndex)->mDef; def->format.video.nFrameWidth = mIsAdaptive ? mAdaptiveMaxWidth : mWidth; def->format.video.nFrameHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight; @@ -140,10 +142,90 @@ void SoftVideoDecoderOMXComponent::updatePortDefinitions() { (def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3) / 2; - mCropLeft = 0; - mCropTop = 0; - mCropWidth = mWidth; - mCropHeight = mHeight; + if (updateCrop) { + mCropLeft = 0; + mCropTop = 0; + mCropWidth = mWidth; + mCropHeight = mHeight; + } +} + +void SoftVideoDecoderOMXComponent::handlePortSettingsChange( + bool *portWillReset, uint32_t width, uint32_t height, bool cropChanged, bool fakeStride) { + *portWillReset = false; + bool sizeChanged = (width != mWidth || height != mHeight); + + if (sizeChanged || cropChanged) { + mWidth = width; + mHeight = height; + + bool updateCrop = !cropChanged; + if ((sizeChanged && !mIsAdaptive) + || width > mAdaptiveMaxWidth + || height > mAdaptiveMaxHeight) { + if (mIsAdaptive) { + if (width > mAdaptiveMaxWidth) { + mAdaptiveMaxWidth = width; + } + if (height > mAdaptiveMaxHeight) { + mAdaptiveMaxHeight = height; + } + } + updatePortDefinitions(updateCrop); + notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL); + mOutputPortSettingsChange = AWAITING_DISABLED; + *portWillReset = true; + } else { + updatePortDefinitions(updateCrop); + + if (fakeStride) { + // MAJOR HACK that is not pretty, it's just to fool the renderer to read the correct + // data. + // Some software decoders (e.g. SoftMPEG4) fill decoded frame directly to output + // buffer without considering the output buffer stride and slice height. So this is + // used to signal how the buffer is arranged. The alternative is to re-arrange the + // output buffer in SoftMPEG4, but that results in memcopies. + OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef; + def->format.video.nStride = mWidth; + def->format.video.nSliceHeight = mHeight; + } + + notify(OMX_EventPortSettingsChanged, kOutputPortIndex, + OMX_IndexConfigCommonOutputCrop, NULL); + } + } +} + +void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer( + uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, + size_t srcYStride, size_t srcUStride, size_t srcVStride) { + size_t dstYStride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth; + size_t dstUVStride = dstYStride / 2; + size_t dstHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight; + + for (size_t i = 0; i < dstHeight; ++i) { + if (i < mHeight) { + memcpy(dst, srcY, mWidth); + srcY += srcYStride; + } + dst += dstYStride; + } + + for (size_t i = 0; i < dstHeight / 2; ++i) { + if (i < mHeight / 2) { + memcpy(dst, srcU, mWidth / 2); + srcU += srcUStride; + } + dst += dstUVStride; + } + + for (size_t i = 0; i < dstHeight / 2; ++i) { + if (i < mHeight / 2) { + memcpy(dst, srcV, mWidth / 2); + srcV += srcVStride; + } + dst += dstUVStride; + } } OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter( diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp index 22c4e04..abdbc5c 100644 --- a/services/audiopolicy/AudioPolicyManager.cpp +++ b/services/audiopolicy/AudioPolicyManager.cpp @@ -2294,14 +2294,14 @@ status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch, } sp<DeviceDescriptor> srcDeviceDesc = mAvailableInputDevices.getDeviceFromId(patch->sources[0].id); + if (srcDeviceDesc == 0) { + return BAD_VALUE; + } //update source and sink with our own data as the data passed in the patch may // be incomplete. struct audio_patch newPatch = *patch; srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]); - if (srcDeviceDesc == 0) { - return BAD_VALUE; - } for (size_t i = 0; i < patch->num_sinks; i++) { if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) { diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index 36a93b2..10038c5 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -921,6 +921,13 @@ void Camera2Client::stopPreviewL() { "stop preview: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); } + { + // Ideally we should recover the override after recording stopped, but + // right now recording stream will live until here, so we are forced to + // recover here. TODO: find a better way to handle that (b/17495165) + SharedParameters::Lock l(mParameters); + l.mParameters.recoverOverriddenJpegSize(); + } // no break case Parameters::WAITING_FOR_PREVIEW_WINDOW: { SharedParameters::Lock l(mParameters); @@ -1075,34 +1082,65 @@ status_t Camera2Client::startRecordingL(Parameters ¶ms, bool restart) { // and we can't fail record start without stagefright asserting. params.previewCallbackFlags = 0; - res = updateProcessorStream< - StreamingProcessor, - &StreamingProcessor::updateRecordingStream>(mStreamingProcessor, - params); + bool recordingStreamNeedsUpdate; + res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate); if (res != OK) { - ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)", - __FUNCTION__, mCameraId, strerror(-res), res); + ALOGE("%s: Camera %d: Can't query recording stream", + __FUNCTION__, mCameraId); return res; } + if (recordingStreamNeedsUpdate) { + // Need to stop stream here in case updateRecordingStream fails + // Right now camera device cannot handle configureStream failure gracefully + // when device is streaming + res = mStreamingProcessor->stopStream(); + if (res != OK) { + ALOGE("%s: Camera %d: Can't stop streaming to update record stream", + __FUNCTION__, mCameraId); + return res; + } + res = mDevice->waitUntilDrained(); + if (res != OK) { + ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } + res = updateProcessorStream< + StreamingProcessor, + &StreamingProcessor::updateRecordingStream>(mStreamingProcessor, + params); + + // updateRecordingStream might trigger a configureStream call and device might fail + // configureStream due to jpeg size > video size. Try again with jpeg size overridden + // to video size. + // TODO: This may not be needed after we add stop streaming above. Remove that if + // it's the case. + if (res == BAD_VALUE) { + overrideVideoSnapshotSize(params); + res = updateProcessorStream< + StreamingProcessor, + &StreamingProcessor::updateRecordingStream>(mStreamingProcessor, + params); + } + if (res != OK) { + ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + return res; + } + } + Vector<int32_t> outputStreams; outputStreams.push(getPreviewStreamId()); outputStreams.push(getRecordingStreamId()); res = mStreamingProcessor->startStream(StreamingProcessor::RECORD, outputStreams); - // try to reconfigure jpeg to video size if configureStreams failed - if (res == BAD_VALUE) { - ALOGV("%s: Camera %d: configure still size to video size before recording" - , __FUNCTION__, mCameraId); - params.overrideJpegSizeByVideoSize(); - res = updateProcessorStream(mJpegProcessor, params); - if (res != OK) { - ALOGE("%s: Camera %d: Can't configure still image size to video size: %s (%d)", - __FUNCTION__, mCameraId, strerror(-res), res); - return res; - } + // startStream might trigger a configureStream call and device might fail + // configureStream due to jpeg size > video size. Try again with jpeg size overridden + // to video size. + if (res == BAD_VALUE) { + overrideVideoSnapshotSize(params); res = mStreamingProcessor->startStream(StreamingProcessor::RECORD, outputStreams); } @@ -1146,7 +1184,6 @@ void Camera2Client::stopRecording() { mCameraService->playSound(CameraService::SOUND_RECORDING); - l.mParameters.recoverOverriddenJpegSize(); res = startPreviewL(l.mParameters, true); if (res != OK) { ALOGE("%s: Camera %d: Unable to return to preview", @@ -1291,6 +1328,9 @@ status_t Camera2Client::cancelAutoFocus() { return OK; } + if (l.mParameters.zslMode) { + mZslProcessor->clearZslQueue(); + } } syncWithDevice(); @@ -1379,8 +1419,14 @@ status_t Camera2Client::setParameters(const String8& params) { SharedParameters::Lock l(mParameters); + Parameters::focusMode_t focusModeBefore = l.mParameters.focusMode; res = l.mParameters.set(params); if (res != OK) return res; + Parameters::focusMode_t focusModeAfter = l.mParameters.focusMode; + + if (l.mParameters.zslMode && focusModeAfter != focusModeBefore) { + mZslProcessor->clearZslQueue(); + } res = updateRequests(l.mParameters); @@ -1914,6 +1960,18 @@ status_t Camera2Client::updateProcessorStream(sp<ProcessorT> processor, return res; } +status_t Camera2Client::overrideVideoSnapshotSize(Parameters ¶ms) { + ALOGV("%s: Camera %d: configure still size to video size before recording" + , __FUNCTION__, mCameraId); + params.overrideJpegSizeByVideoSize(); + status_t res = updateProcessorStream(mJpegProcessor, params); + if (res != OK) { + ALOGE("%s: Camera %d: Can't override video snapshot size to video size: %s (%d)", + __FUNCTION__, mCameraId, strerror(-res), res); + } + return res; +} + const char* Camera2Client::kAutofocusLabel = "autofocus"; const char* Camera2Client::kTakepictureLabel = "take_picture"; diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h index f5c3a30..d68bb29 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.h +++ b/services/camera/libcameraservice/api1/Camera2Client.h @@ -208,6 +208,9 @@ private: // Wait until the camera device has received the latest control settings status_t syncWithDevice(); + + // Video snapshot jpeg size overriding helper function + status_t overrideVideoSnapshotSize(Parameters ¶ms); }; }; // namespace android diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp index 33bdaa3..1a4d9a6 100644 --- a/services/camera/libcameraservice/api1/CameraClient.cpp +++ b/services/camera/libcameraservice/api1/CameraClient.cpp @@ -122,6 +122,16 @@ status_t CameraClient::dump(int fd, const Vector<String16>& args) { mClientPid); len = (len > SIZE - 1) ? SIZE - 1 : len; write(fd, buffer, len); + + len = snprintf(buffer, SIZE, "Latest set parameters:\n"); + len = (len > SIZE - 1) ? SIZE - 1 : len; + write(fd, buffer, len); + + mLatestSetParameters.dump(fd, args); + + const char *enddump = "\n\n"; + write(fd, enddump, strlen(enddump)); + return mHardware->dump(fd, args); } @@ -550,6 +560,7 @@ status_t CameraClient::setParameters(const String8& params) { status_t result = checkPidAndHardware(); if (result != NO_ERROR) return result; + mLatestSetParameters = CameraParameters(params); CameraParameters p(params); return mHardware->setParameters(p); } diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h index 6779f5e..63a9d0f 100644 --- a/services/camera/libcameraservice/api1/CameraClient.h +++ b/services/camera/libcameraservice/api1/CameraClient.h @@ -142,6 +142,9 @@ private: // of the original one), we allocate mPreviewBuffer and reuse it if possible. sp<MemoryHeapBase> mPreviewBuffer; + // Debugging information + CameraParameters mLatestSetParameters; + // We need to avoid the deadlock when the incoming command thread and // the CameraHardwareInterface callback thread both want to grab mLock. // An extra flag is used to tell the callback thread that it should stop diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index 8d00590..ed9137f 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -76,9 +76,29 @@ status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) { res = getFilteredSizes(MAX_VIDEO_SIZE, &availableVideoSizes); if (res != OK) return res; - // TODO: Pick more intelligently - previewWidth = availablePreviewSizes[0].width; - previewHeight = availablePreviewSizes[0].height; + // Select initial preview and video size that's under the initial bound and + // on the list of both preview and recording sizes + previewWidth = 0; + previewHeight = 0; + for (size_t i = 0 ; i < availablePreviewSizes.size(); i++) { + int newWidth = availablePreviewSizes[i].width; + int newHeight = availablePreviewSizes[i].height; + if (newWidth >= previewWidth && newHeight >= previewHeight && + newWidth <= MAX_INITIAL_PREVIEW_WIDTH && + newHeight <= MAX_INITIAL_PREVIEW_HEIGHT) { + for (size_t j = 0; j < availableVideoSizes.size(); j++) { + if (availableVideoSizes[j].width == newWidth && + availableVideoSizes[j].height == newHeight) { + previewWidth = newWidth; + previewHeight = newHeight; + } + } + } + } + if (previewWidth == 0) { + ALOGE("%s: No initial preview size can be found!", __FUNCTION__); + return BAD_VALUE; + } videoWidth = previewWidth; videoHeight = previewHeight; diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h index 5e6e6ab..815cc55 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.h +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -179,8 +179,13 @@ struct Parameters { // Number of zoom steps to simulate static const unsigned int NUM_ZOOM_STEPS = 100; // Max preview size allowed + // This is set to a 1:1 value to allow for any aspect ratio that has + // a max long side of 1920 pixels static const unsigned int MAX_PREVIEW_WIDTH = 1920; - static const unsigned int MAX_PREVIEW_HEIGHT = 1080; + static const unsigned int MAX_PREVIEW_HEIGHT = 1920; + // Initial max preview/recording size bound + static const int MAX_INITIAL_PREVIEW_WIDTH = 1920; + static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080; // Aspect ratio tolerance static const float ASPECT_RATIO_TOLERANCE = 0.001; diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp index ab0af0d..9e7fff8 100644 --- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp @@ -318,6 +318,44 @@ status_t StreamingProcessor::updateRecordingRequest(const Parameters ¶ms) { return OK; } +status_t StreamingProcessor::recordingStreamNeedsUpdate( + const Parameters ¶ms, bool *needsUpdate) { + status_t res; + + if (needsUpdate == 0) { + ALOGE("%s: Camera %d: invalid argument", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + if (mRecordingStreamId == NO_STREAM) { + *needsUpdate = true; + return OK; + } + + sp<CameraDeviceBase> device = mDevice.promote(); + if (device == 0) { + ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); + return INVALID_OPERATION; + } + + uint32_t currentWidth, currentHeight; + res = device->getStreamInfo(mRecordingStreamId, + ¤tWidth, ¤tHeight, 0); + if (res != OK) { + ALOGE("%s: Camera %d: Error querying recording output stream info: " + "%s (%d)", __FUNCTION__, mId, + strerror(-res), res); + return res; + } + + if (mRecordingConsumer == 0 || currentWidth != (uint32_t)params.videoWidth || + currentHeight != (uint32_t)params.videoHeight) { + *needsUpdate = true; + } + *needsUpdate = false; + return res; +} + status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { ATRACE_CALL(); status_t res; diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h index 833bb8f..8466af4 100644 --- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h @@ -54,6 +54,9 @@ class StreamingProcessor: status_t setRecordingBufferCount(size_t count); status_t updateRecordingRequest(const Parameters ¶ms); + // If needsUpdate is set to true, a updateRecordingStream call with params will recreate + // recording stream + status_t recordingStreamNeedsUpdate(const Parameters ¶ms, bool *needsUpdate); status_t updateRecordingStream(const Parameters ¶ms); status_t deleteRecordingStream(); int getRecordingStreamId() const; diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp index 2d31275..fa65b74 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp @@ -44,6 +44,7 @@ ZslProcessor3::ZslProcessor3( sp<Camera2Client> client, wp<CaptureSequencer> sequencer): Thread(false), + mLatestClearedBufferTimestamp(0), mState(RUNNING), mClient(client), mSequencer(sequencer), @@ -107,7 +108,6 @@ void ZslProcessor3::onResultAvailable(const CaptureResult &result) { ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__); return; } - (void)timestamp; entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT); if (entry.count == 0) { @@ -120,6 +120,9 @@ void ZslProcessor3::onResultAvailable(const CaptureResult &result) { if (mState != RUNNING) return; + // Corresponding buffer has been cleared. No need to push into mFrameList + if (timestamp <= mLatestClearedBufferTimestamp) return; + mFrameList.editItemAt(mFrameListHead) = result.mMetadata; mFrameListHead = (mFrameListHead + 1) % mFrameListDepth; } @@ -392,7 +395,7 @@ status_t ZslProcessor3::clearZslQueueLocked() { if (mZslStream != 0) { // clear result metadata list first. clearZslResultQueueLocked(); - return mZslStream->clearInputRingBuffer(); + return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp); } return OK; } @@ -454,6 +457,23 @@ void ZslProcessor3::dumpZslQueue(int fd) const { } } +bool ZslProcessor3::isFixedFocusMode(uint8_t afMode) const { + switch (afMode) { + case ANDROID_CONTROL_AF_MODE_AUTO: + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO: + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE: + case ANDROID_CONTROL_AF_MODE_MACRO: + return false; + break; + case ANDROID_CONTROL_AF_MODE_OFF: + case ANDROID_CONTROL_AF_MODE_EDOF: + return true; + default: + ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode); + return false; + } +} + nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const { /** * Find the smallest timestamp we know about so far @@ -499,8 +519,16 @@ nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const { continue; } - // Check AF state if device has focuser - if (mHasFocuser) { + entry = frame.find(ANDROID_CONTROL_AF_MODE); + if (entry.count == 0) { + ALOGW("%s: ZSL queue frame has no AF mode field!", + __FUNCTION__); + continue; + } + uint8_t afMode = entry.data.u8[0]; + + // Check AF state if device has focuser and focus mode isn't fixed + if (mHasFocuser && !isFixedFocusMode(afMode)) { // Make sure the candidate frame has good focus. entry = frame.find(ANDROID_CONTROL_AF_STATE); if (entry.count == 0) { diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h index daa352b..2975f7c 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h @@ -82,6 +82,7 @@ class ZslProcessor3 : private: static const nsecs_t kWaitDuration = 10000000; // 10 ms + nsecs_t mLatestClearedBufferTimestamp; enum { RUNNING, @@ -132,6 +133,8 @@ class ZslProcessor3 : void dumpZslQueue(int id) const; nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const; + + bool isFixedFocusMode(uint8_t afMode) const; }; diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp index 92bf81b..81330ea 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp @@ -315,20 +315,24 @@ status_t Camera3ZslStream::enqueueInputBufferByTimestamp( return OK; } -status_t Camera3ZslStream::clearInputRingBuffer() { +status_t Camera3ZslStream::clearInputRingBuffer(nsecs_t* latestTimestamp) { Mutex::Autolock l(mLock); - return clearInputRingBufferLocked(); + return clearInputRingBufferLocked(latestTimestamp); } -status_t Camera3ZslStream::clearInputRingBufferLocked() { +status_t Camera3ZslStream::clearInputRingBufferLocked(nsecs_t* latestTimestamp) { + + if (latestTimestamp) { + *latestTimestamp = mProducer->getLatestTimestamp(); + } mInputBufferQueue.clear(); return mProducer->clear(); } status_t Camera3ZslStream::disconnectLocked() { - clearInputRingBufferLocked(); + clearInputRingBufferLocked(NULL); return Camera3OutputStream::disconnectLocked(); } diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h index d89c38d..5323a49 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.h +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h @@ -59,8 +59,10 @@ class Camera3ZslStream : /** * Clears the buffers that can be used by enqueueInputBufferByTimestamp + * latestTimestamp will be filled with the largest timestamp of buffers + * being cleared, 0 if there is no buffer being clear. */ - status_t clearInputRingBuffer(); + status_t clearInputRingBuffer(nsecs_t* latestTimestamp); protected: @@ -100,7 +102,7 @@ class Camera3ZslStream : // Disconnet the Camera3ZslStream specific bufferQueues. virtual status_t disconnectLocked(); - status_t clearInputRingBufferLocked(); + status_t clearInputRingBufferLocked(nsecs_t* latestTimestamp); }; // class Camera3ZslStream diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp index e4ec5fd..f8562ec 100644 --- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp +++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp @@ -41,7 +41,8 @@ RingBufferConsumer::RingBufferConsumer(const sp<IGraphicBufferConsumer>& consume uint32_t consumerUsage, int bufferCount) : ConsumerBase(consumer), - mBufferCount(bufferCount) + mBufferCount(bufferCount), + mLatestTimestamp(0) { mConsumer->setConsumerUsageBits(consumerUsage); mConsumer->setMaxAcquiredBufferCount(bufferCount); @@ -152,6 +153,14 @@ status_t RingBufferConsumer::clear() { return OK; } +nsecs_t RingBufferConsumer::getLatestTimestamp() { + Mutex::Autolock _l(mMutex); + if (mBufferItemList.size() == 0) { + return 0; + } + return mLatestTimestamp; +} + void RingBufferConsumer::pinBufferLocked(const BufferItem& item) { List<RingBufferItem>::iterator it, end; @@ -302,6 +311,13 @@ void RingBufferConsumer::onFrameAvailable() { item.mTimestamp, mBufferItemList.size(), mBufferCount); + if (item.mTimestamp < mLatestTimestamp) { + BI_LOGE("Timestamp decreases from %" PRId64 " to %" PRId64, + mLatestTimestamp, item.mTimestamp); + } + + mLatestTimestamp = item.mTimestamp; + item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer; } // end of mMutex lock diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h index a03736d..da97a11 100644 --- a/services/camera/libcameraservice/gui/RingBufferConsumer.h +++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h @@ -159,6 +159,9 @@ class RingBufferConsumer : public ConsumerBase, // Release all the non-pinned buffers in the ring buffer status_t clear(); + // Return 0 if RingBuffer is empty, otherwise return timestamp of latest buffer. + nsecs_t getLatestTimestamp(); + private: // Override ConsumerBase::onFrameAvailable @@ -180,6 +183,9 @@ class RingBufferConsumer : public ConsumerBase, // List of acquired buffers in our ring buffer List<RingBufferItem> mBufferItemList; const int mBufferCount; + + // Timestamp of latest buffer + nsecs_t mLatestTimestamp; }; } // namespace android |