diff options
-rw-r--r-- | include/media/MediaPlayerInterface.h | 1 | ||||
-rw-r--r-- | include/media/stagefright/Utils.h | 3 | ||||
-rwxr-xr-x | libvideoeditor/lvpp/VideoEditorPlayer.h | 1 | ||||
-rw-r--r-- | media/libmediaplayerservice/MediaPlayerService.h | 8 | ||||
-rw-r--r-- | media/libmediaplayerservice/StagefrightRecorder.cpp | 28 | ||||
-rw-r--r-- | media/libmediaplayerservice/StagefrightRecorder.h | 12 | ||||
-rw-r--r-- | media/libstagefright/AwesomePlayer.cpp | 48 | ||||
-rw-r--r-- | media/libstagefright/MPEG4Extractor.cpp | 26 | ||||
-rw-r--r-- | media/libstagefright/MetaData.cpp | 3 | ||||
-rw-r--r-- | media/libstagefright/OMXCodec.cpp | 8 | ||||
-rw-r--r-- | media/libstagefright/Utils.cpp | 5 | ||||
-rw-r--r-- | media/libstagefright/mpeg2ts/ATSParser.cpp | 5 | ||||
-rw-r--r-- | services/audioflinger/Threads.cpp | 59 | ||||
-rw-r--r-- | services/audioflinger/Threads.h | 2 | ||||
-rw-r--r-- | services/camera/libcameraservice/api1/client2/Parameters.cpp | 51 | ||||
-rw-r--r-- | services/camera/libcameraservice/api1/client2/Parameters.h | 2 |
16 files changed, 154 insertions, 108 deletions
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h index 3b151ef..cc244f0 100644 --- a/include/media/MediaPlayerInterface.h +++ b/include/media/MediaPlayerInterface.h @@ -99,6 +99,7 @@ public: virtual status_t getPosition(uint32_t *position) const = 0; virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0; virtual int getSessionId() const = 0; + virtual audio_stream_type_t getAudioStreamType() const = 0; // If no callback is specified, use the "write" API below to submit // audio data. diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h index c24f612..bbad271 100644 --- a/include/media/stagefright/Utils.h +++ b/include/media/stagefright/Utils.h @@ -57,7 +57,8 @@ status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime); status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta); // Check whether the stream defined by meta can be offloaded to hardware -bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming); +bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, + bool isStreaming, audio_stream_type_t streamType); } // namespace android diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h index ab6d731..5862c08 100755 --- a/libvideoeditor/lvpp/VideoEditorPlayer.h +++ b/libvideoeditor/lvpp/VideoEditorPlayer.h @@ -62,6 +62,7 @@ class VideoEditorPlayer : public MediaPlayerInterface { virtual void pause(); virtual void close(); void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; } + virtual audio_stream_type_t getAudioStreamType() const { return mStreamType; } void setVolume(float left, float right); virtual status_t dump(int fd,const Vector<String16>& args) const; diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h index 05d44d4..a486cb5 100644 --- a/media/libmediaplayerservice/MediaPlayerService.h +++ b/media/libmediaplayerservice/MediaPlayerService.h @@ -100,7 +100,10 @@ class MediaPlayerService : public BnMediaPlayerService virtual void flush(); virtual void pause(); virtual void close(); - void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; } + void setAudioStreamType(audio_stream_type_t streamType) { + mStreamType = streamType; } + virtual audio_stream_type_t getAudioStreamType() const { return mStreamType; } + void setVolume(float left, float right); virtual status_t setPlaybackRatePermille(int32_t ratePermille); status_t setAuxEffectSendLevel(float level); @@ -207,6 +210,9 @@ class MediaPlayerService : public BnMediaPlayerService virtual void pause() {} virtual void close() {} void setAudioStreamType(audio_stream_type_t streamType) {} + // stream type is not used for AudioCache + virtual audio_stream_type_t getAudioStreamType() const { return AUDIO_STREAM_DEFAULT; } + void setVolume(float left, float right) {} virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; } uint32_t sampleRate() const { return mSampleRate; } diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index f9d9020..78dad19 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -973,7 +973,7 @@ status_t StagefrightRecorder::startRTPRecording() { return err; } - err = setupVideoEncoder(mediaSource, mVideoBitRate, &source); + err = setupVideoEncoder(mediaSource, &source); if (err != OK) { return err; } @@ -1017,7 +1017,7 @@ status_t StagefrightRecorder::startMPEG2TSRecording() { } sp<MediaSource> encoder; - err = setupVideoEncoder(mediaSource, mVideoBitRate, &encoder); + err = setupVideoEncoder(mediaSource, &encoder); if (err != OK) { return err; @@ -1383,12 +1383,11 @@ status_t StagefrightRecorder::setupCameraSource( status_t StagefrightRecorder::setupVideoEncoder( sp<MediaSource> cameraSource, - int32_t videoBitRate, sp<MediaSource> *source) { source->clear(); sp<MetaData> enc_meta = new MetaData; - enc_meta->setInt32(kKeyBitRate, videoBitRate); + enc_meta->setInt32(kKeyBitRate, mVideoBitRate); enc_meta->setInt32(kKeyFrameRate, mFrameRate); switch (mVideoEncoder) { @@ -1495,16 +1494,11 @@ status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) { return OK; } -status_t StagefrightRecorder::setupMPEG4Recording( - int outputFd, - int32_t videoWidth, int32_t videoHeight, - int32_t videoBitRate, - int32_t *totalBitRate, - sp<MediaWriter> *mediaWriter) { - mediaWriter->clear(); +status_t StagefrightRecorder::setupMPEG4Recording(int32_t *totalBitRate) { + mWriter.clear(); *totalBitRate = 0; status_t err = OK; - sp<MediaWriter> writer = new MPEG4Writer(outputFd); + sp<MediaWriter> writer = new MPEG4Writer(mOutputFd); if (mVideoSource < VIDEO_SOURCE_LIST_END) { @@ -1515,13 +1509,13 @@ status_t StagefrightRecorder::setupMPEG4Recording( } sp<MediaSource> encoder; - err = setupVideoEncoder(mediaSource, videoBitRate, &encoder); + err = setupVideoEncoder(mediaSource, &encoder); if (err != OK) { return err; } writer->addSource(encoder); - *totalBitRate += videoBitRate; + *totalBitRate += mVideoBitRate; } // Audio source is added at the end if it exists. @@ -1555,7 +1549,7 @@ status_t StagefrightRecorder::setupMPEG4Recording( } writer->setListener(mListener); - *mediaWriter = writer; + mWriter = writer; return OK; } @@ -1578,9 +1572,7 @@ void StagefrightRecorder::setupMPEG4MetaData(int64_t startTimeUs, int32_t totalB status_t StagefrightRecorder::startMPEG4Recording() { int32_t totalBitRate; - status_t err = setupMPEG4Recording( - mOutputFd, mVideoWidth, mVideoHeight, - mVideoBitRate, &totalBitRate, &mWriter); + status_t err = setupMPEG4Recording(&totalBitRate); if (err != OK) { return err; } diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h index 31f09e0..bc43488 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.h +++ b/media/libmediaplayerservice/StagefrightRecorder.h @@ -124,12 +124,7 @@ private: // frame buffers will be queued and dequeued sp<SurfaceMediaSource> mSurfaceMediaSource; - status_t setupMPEG4Recording( - int outputFd, - int32_t videoWidth, int32_t videoHeight, - int32_t videoBitRate, - int32_t *totalBitRate, - sp<MediaWriter> *mediaWriter); + status_t setupMPEG4Recording(int32_t *totalBitRate); void setupMPEG4MetaData(int64_t startTimeUs, int32_t totalBitRate, sp<MetaData> *meta); status_t startMPEG4Recording(); @@ -151,10 +146,7 @@ private: status_t setupSurfaceMediaSource(); status_t setupAudioEncoder(const sp<MediaWriter>& writer); - status_t setupVideoEncoder( - sp<MediaSource> cameraSource, - int32_t videoBitRate, - sp<MediaSource> *source); + status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaSource> *source); // Encoding parameter handling utilities status_t setParameter(const String8 &key, const String8 &value); diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp index e1f6563..130207d 100644 --- a/media/libstagefright/AwesomePlayer.cpp +++ b/media/libstagefright/AwesomePlayer.cpp @@ -1202,8 +1202,7 @@ void AwesomePlayer::initRenderer_l() { setVideoScalingMode_l(mVideoScalingMode); if (USE_SURFACE_ALLOC && !strncmp(component, "OMX.", 4) - && strncmp(component, "OMX.google.", 11) - && strcmp(component, "OMX.Nvidia.mpeg2v.decode")) { + && strncmp(component, "OMX.google.", 11)) { // Hardware decoders avoid the CPU color conversion by decoding // directly to ANativeBuffers, so we must use a renderer that // just pushes those buffers to the ANativeWindow. @@ -1500,7 +1499,13 @@ status_t AwesomePlayer::initAudioDecoder() { // This doesn't guarantee that the hardware has a free stream // but it avoids us attempting to open (and re-open) an offload // stream to hardware that doesn't have the necessary codec - mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), isStreamingHTTP()); + audio_stream_type_t streamType = AUDIO_STREAM_MUSIC; + if (mAudioSink != NULL) { + streamType = mAudioSink->getAudioStreamType(); + } + + mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), + isStreamingHTTP(), streamType); if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) { ALOGV("createAudioPlayer: bypass OMX (raw)"); @@ -1932,7 +1937,7 @@ void AwesomePlayer::onVideoEvent() { ++mStats.mNumVideoFramesDropped; } - postVideoEvent_l(); + postVideoEvent_l(0); return; } } @@ -1972,6 +1977,41 @@ void AwesomePlayer::onVideoEvent() { return; } + /* get next frame time */ + if (wasSeeking == NO_SEEK) { + MediaSource::ReadOptions options; + for (;;) { + status_t err = mVideoSource->read(&mVideoBuffer, &options); + if (err != OK) { + // deal with any errors next time + CHECK(mVideoBuffer == NULL); + postVideoEvent_l(0); + return; + } + + if (mVideoBuffer->range_length() != 0) { + break; + } + + // Some decoders, notably the PV AVC software decoder + // return spurious empty buffers that we just want to ignore. + + mVideoBuffer->release(); + mVideoBuffer = NULL; + } + + { + Mutex::Autolock autoLock(mStatsLock); + ++mStats.mNumVideoFramesDecoded; + } + + int64_t nextTimeUs; + CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &nextTimeUs)); + int64_t delayUs = nextTimeUs - ts->getRealTimeUs() + mTimeSourceDeltaUs; + postVideoEvent_l(delayUs > 10000 ? 10000 : delayUs < 0 ? 0 : delayUs); + return; + } + postVideoEvent_l(); } diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp index cbc169b..8cf5026 100644 --- a/media/libstagefright/MPEG4Extractor.cpp +++ b/media/libstagefright/MPEG4Extractor.cpp @@ -1379,19 +1379,33 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { } else { // No size was specified. Pick a conservatively large size. int32_t width, height; - if (mLastTrack->meta->findInt32(kKeyWidth, &width) && - mLastTrack->meta->findInt32(kKeyHeight, &height)) { - mLastTrack->meta->setInt32(kKeyMaxInputSize, width * height * 3 / 2); - } else { + if (!mLastTrack->meta->findInt32(kKeyWidth, &width) || + !mLastTrack->meta->findInt32(kKeyHeight, &height)) { ALOGE("No width or height, assuming worst case 1080p"); - mLastTrack->meta->setInt32(kKeyMaxInputSize, 3110400); + width = 1920; + height = 1080; + } + + const char *mime; + CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime)); + if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) { + // AVC requires compression ratio of at least 2, and uses + // macroblocks + max_size = ((width + 15) / 16) * ((height + 15) / 16) * 192; + } else { + // For all other formats there is no minimum compression + // ratio. Use compression ratio of 1. + max_size = width * height * 3 / 2; } + mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size); } *offset += chunk_size; - // Calculate average frame rate. + // NOTE: setting another piece of metadata invalidates any pointers (such as the + // mimetype) previously obtained, so don't cache them. const char *mime; CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime)); + // Calculate average frame rate. if (!strncasecmp("video/", mime, 6)) { size_t nSamples = mLastTrack->sampleTable->countSamples(); int64_t durationUs; diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp index ae6ae2d..7b60afc 100644 --- a/media/libstagefright/MetaData.cpp +++ b/media/libstagefright/MetaData.cpp @@ -89,6 +89,9 @@ bool MetaData::setRect( return setData(key, TYPE_RECT, &r, sizeof(r)); } +/** + * Note that the returned pointer becomes invalid when additional metadata is set. + */ bool MetaData::findCString(uint32_t key, const char **value) { uint32_t type; const void *data; diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp index 063ab49..625922f 100644 --- a/media/libstagefright/OMXCodec.cpp +++ b/media/libstagefright/OMXCodec.cpp @@ -361,12 +361,7 @@ sp<MediaSource> OMXCodec::Create( observer->setCodec(codec); err = codec->configureCodec(meta); - if (err == OK) { - if (!strcmp("OMX.Nvidia.mpeg2v.decode", componentName)) { - codec->mFlags |= kOnlySubmitOneInputBufferAtOneTime; - } - return codec; } @@ -1359,8 +1354,7 @@ OMXCodec::OMXCodec( mLeftOverBuffer(NULL), mPaused(false), mNativeWindow( - (!strncmp(componentName, "OMX.google.", 11) - || !strcmp(componentName, "OMX.Nvidia.mpeg2v.decode")) + (!strncmp(componentName, "OMX.google.", 11)) ? NULL : nativeWindow) { mPortStatus[kPortIndexInput] = ENABLED; mPortStatus[kPortIndexOutput] = ENABLED; diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp index 4db8e80..9041c21 100644 --- a/media/libstagefright/Utils.cpp +++ b/media/libstagefright/Utils.cpp @@ -540,7 +540,8 @@ const struct mime_conv_t* p = &mimeLookup[0]; return BAD_VALUE; } -bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming) +bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, + bool isStreaming, audio_stream_type_t streamType) { const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); @@ -594,7 +595,7 @@ bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming) info.bit_rate = brate; - info.stream_type = AUDIO_STREAM_MUSIC; + info.stream_type = streamType; info.has_video = hasVideo; info.is_streaming = isStreaming; diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp index f87b9da..cb57a2f 100644 --- a/media/libstagefright/mpeg2ts/ATSParser.cpp +++ b/media/libstagefright/mpeg2ts/ATSParser.cpp @@ -1199,7 +1199,10 @@ status_t ATSParser::parseTS(ABitReader *br) { unsigned sync_byte = br->getBits(8); CHECK_EQ(sync_byte, 0x47u); - MY_LOGV("transport_error_indicator = %u", br->getBits(1)); + if (br->getBits(1)) { // transport_error_indicator + // silently ignore. + return OK; + } unsigned payload_unit_start_indicator = br->getBits(1); ALOGV("payload_unit_start_indicator = %u", payload_unit_start_indicator); diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index 6987dbd..4e23129 100644 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -3946,8 +3946,7 @@ AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger, : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD), mHwPaused(false), mFlushPending(false), - mPausedBytesRemaining(0), - mPreviousTrack(NULL) + mPausedBytesRemaining(0) { //FIXME: mStandby should be set to true by ThreadBase constructor mStandby = true; @@ -3994,23 +3993,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr sp<Track> l = mLatestActiveTrack.promote(); bool last = l.get() == track; - if (mPreviousTrack != NULL) { - if (track != mPreviousTrack) { - // Flush any data still being written from last track - mBytesRemaining = 0; - if (mPausedBytesRemaining) { - // Last track was paused so we also need to flush saved - // mixbuffer state and invalidate track so that it will - // re-submit that unwritten data when it is next resumed - mPausedBytesRemaining = 0; - // Invalidate is a bit drastic - would be more efficient - // to have a flag to tell client that some of the - // previously written data was lost - mPreviousTrack->invalidate(); - } - } - } - mPreviousTrack = track; if (track->isPausing()) { track->setPaused(); if (last) { @@ -4058,6 +4040,31 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr } if (last) { + sp<Track> previousTrack = mPreviousTrack.promote(); + if (previousTrack != 0) { + if (track != previousTrack.get()) { + // Flush any data still being written from last track + mBytesRemaining = 0; + if (mPausedBytesRemaining) { + // Last track was paused so we also need to flush saved + // mixbuffer state and invalidate track so that it will + // re-submit that unwritten data when it is next resumed + mPausedBytesRemaining = 0; + // Invalidate is a bit drastic - would be more efficient + // to have a flag to tell client that some of the + // previously written data was lost + previousTrack->invalidate(); + } + // flush data already sent to the DSP if changing audio session as audio + // comes from a different source. Also invalidate previous track to force a + // seek when resuming. + if (previousTrack->sessionId() != track->sessionId()) { + previousTrack->invalidate(); + mFlushPending = true; + } + } + } + mPreviousTrack = track; // reset retry count track->mRetryCount = kMaxTrackRetriesOffload; mActiveTrack = t; @@ -4076,14 +4083,18 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr track->mState = TrackBase::STOPPING_2; // so presentation completes after drain // do not drain if no data was ever sent to HAL (mStandby == true) if (last && !mStandby) { - sleepTime = 0; - standbyTime = systemTime() + standbyDelay; - mixerStatus = MIXER_DRAIN_TRACK; - mDrainSequence += 2; + // do not modify drain sequence if we are already draining. This happens + // when resuming from pause after drain. + if ((mDrainSequence & 1) == 0) { + sleepTime = 0; + standbyTime = systemTime() + standbyDelay; + mixerStatus = MIXER_DRAIN_TRACK; + mDrainSequence += 2; + } if (mHwPaused) { // It is possible to move from PAUSED to STOPPING_1 without // a resume so we must ensure hardware is running - mOutput->stream->resume(mOutput->stream); + doHwResume = true; mHwPaused = false; } } diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h index 43e335d..d31009e 100644 --- a/services/audioflinger/Threads.h +++ b/services/audioflinger/Threads.h @@ -764,7 +764,7 @@ private: bool mFlushPending; size_t mPausedWriteLength; // length in bytes of write interrupted by pause size_t mPausedBytesRemaining; // bytes still waiting in mixbuffer after resume - Track *mPreviousTrack; // used to detect track switch + wp<Track> mPreviousTrack; // used to detect track switch }; class AsyncCallbackThread : public Thread { diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index 1e425ba..08af566 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -183,8 +183,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { // still have to do something sane for them // NOTE: Not scaled like FPS range values are. - previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]); - lastSetPreviewFps = previewFps; + int previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]); params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE, previewFps); @@ -1134,13 +1133,22 @@ status_t Parameters::set(const String8& paramString) { // PREVIEW_FPS_RANGE bool fpsRangeChanged = false; + int32_t lastSetFpsRange[2]; + + params.getPreviewFpsRange(&lastSetFpsRange[0], &lastSetFpsRange[1]); + lastSetFpsRange[0] /= kFpsToApiScale; + lastSetFpsRange[1] /= kFpsToApiScale; + newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0], &validatedParams.previewFpsRange[1]); validatedParams.previewFpsRange[0] /= kFpsToApiScale; validatedParams.previewFpsRange[1] /= kFpsToApiScale; - if (validatedParams.previewFpsRange[0] != previewFpsRange[0] || - validatedParams.previewFpsRange[1] != previewFpsRange[1]) { + // Compare the FPS range value from the last set() to the current set() + // to determine if the client has changed it + if (validatedParams.previewFpsRange[0] != lastSetFpsRange[0] || + validatedParams.previewFpsRange[1] != lastSetFpsRange[1]) { + fpsRangeChanged = true; camera_metadata_ro_entry_t availablePreviewFpsRanges = staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2); @@ -1158,16 +1166,6 @@ status_t Parameters::set(const String8& paramString) { validatedParams.previewFpsRange[1]); return BAD_VALUE; } - validatedParams.previewFps = - fpsFromRange(validatedParams.previewFpsRange[0], - validatedParams.previewFpsRange[1]); - - // Update our last-seen single preview FPS, needed for disambiguating - // when the application is intending to use the deprecated single-FPS - // setting vs. the range FPS setting - validatedParams.lastSetPreviewFps = newParams.getPreviewFrameRate(); - - newParams.setPreviewFrameRate(validatedParams.previewFps); } // PREVIEW_FORMAT @@ -1205,12 +1203,11 @@ status_t Parameters::set(const String8& paramString) { // PREVIEW_FRAME_RATE Deprecated, only use if the preview fps range is // unchanged this time. The single-value FPS is the same as the minimum of // the range. To detect whether the application has changed the value of - // previewFps, compare against their last-set preview FPS instead of the - // single FPS we may have synthesized from a range FPS set. + // previewFps, compare against their last-set preview FPS. if (!fpsRangeChanged) { - validatedParams.previewFps = newParams.getPreviewFrameRate(); - if (validatedParams.previewFps != lastSetPreviewFps || - recordingHintChanged) { + int previewFps = newParams.getPreviewFrameRate(); + int lastSetPreviewFps = params.getPreviewFrameRate(); + if (previewFps != lastSetPreviewFps || recordingHintChanged) { camera_metadata_ro_entry_t availableFrameRates = staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES); /** @@ -1223,8 +1220,8 @@ status_t Parameters::set(const String8& paramString) { * Either way, in case of multiple ranges, break the tie by * selecting the smaller range. */ - int targetFps = validatedParams.previewFps; - // all ranges which have targetFps + + // all ranges which have previewFps Vector<Range> candidateRanges; for (i = 0; i < availableFrameRates.count; i+=2) { Range r = { @@ -1232,13 +1229,13 @@ status_t Parameters::set(const String8& paramString) { availableFrameRates.data.i32[i+1] }; - if (r.min <= targetFps && targetFps <= r.max) { + if (r.min <= previewFps && previewFps <= r.max) { candidateRanges.push(r); } } if (candidateRanges.isEmpty()) { ALOGE("%s: Requested preview frame rate %d is not supported", - __FUNCTION__, validatedParams.previewFps); + __FUNCTION__, previewFps); return BAD_VALUE; } // most applicable range with targetFps @@ -1277,14 +1274,6 @@ status_t Parameters::set(const String8& paramString) { validatedParams.previewFpsRange[1], validatedParams.recordingHint); } - newParams.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, - String8::format("%d,%d", - validatedParams.previewFpsRange[0] * kFpsToApiScale, - validatedParams.previewFpsRange[1] * kFpsToApiScale)); - // Update our last-seen single preview FPS, needed for disambiguating - // when the application is intending to use the deprecated single-FPS - // setting vs. the range FPS setting - validatedParams.lastSetPreviewFps = validatedParams.previewFps; } // PICTURE_SIZE diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h index 93ab113..32dbd42 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.h +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -46,8 +46,6 @@ struct Parameters { int previewWidth, previewHeight; int32_t previewFpsRange[2]; - int lastSetPreviewFps; // the last single FPS value seen in a set call - int previewFps; // deprecated, here only for tracking changes int previewFormat; int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION |