summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/Android.mk1
-rw-r--r--camera/ICameraService.cpp3
-rw-r--r--camera/ICameraServiceProxy.cpp55
-rw-r--r--include/camera/ICameraServiceProxy.h52
-rw-r--r--include/media/AudioSystem.h3
-rw-r--r--include/media/AudioTrack.h17
-rw-r--r--include/media/IAudioFlinger.h3
-rw-r--r--include/media/IOMX.h7
-rw-r--r--include/media/MediaPlayerInterface.h3
-rw-r--r--include/media/MediaProfiles.h79
-rw-r--r--include/media/stagefright/MediaCodecList.h2
-rw-r--r--media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp56
-rw-r--r--media/libeffects/lvm/wrapper/Bundle/EffectBundle.h12
-rw-r--r--media/libmedia/Android.mk4
-rw-r--r--media/libmedia/AudioSystem.cpp7
-rw-r--r--media/libmedia/AudioTrack.cpp16
-rw-r--r--media/libmedia/IAudioFlinger.cpp14
-rw-r--r--media/libmedia/IOMX.cpp12
-rw-r--r--media/libmedia/IResourceManagerService.cpp4
-rw-r--r--media/libmedia/JetPlayer.cpp8
-rw-r--r--media/libmedia/MediaProfiles.cpp126
-rw-r--r--media/libmediaplayerservice/Android.mk2
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp19
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h3
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.cpp34
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.h5
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.cpp61
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.h3
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp6
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp14
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp31
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h20
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerSource.h4
-rw-r--r--media/libstagefright/ACodec.cpp12
-rw-r--r--media/libstagefright/Android.mk4
-rw-r--r--media/libstagefright/AudioSource.cpp3
-rw-r--r--media/libstagefright/MediaCodec.cpp30
-rw-r--r--media/libstagefright/MediaCodecList.cpp20
-rw-r--r--media/libstagefright/MediaCodecListOverrides.cpp88
-rw-r--r--media/libstagefright/MediaCodecListOverrides.h3
-rw-r--r--media/libstagefright/NuCachedSource2.cpp7
-rw-r--r--media/libstagefright/OMXClient.cpp12
-rw-r--r--media/libstagefright/OMXCodec.cpp6
-rw-r--r--media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp10
-rwxr-xr-xmedia/libstagefright/codecs/avcenc/SoftAVCEnc.cpp5
-rw-r--r--media/libstagefright/include/MPEG2TSExtractor.h13
-rw-r--r--media/libstagefright/include/OMX.h4
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h7
-rw-r--r--media/libstagefright/mpeg2ts/ATSParser.cpp96
-rw-r--r--media/libstagefright/mpeg2ts/ATSParser.h56
-rw-r--r--media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp316
-rw-r--r--media/libstagefright/omx/OMX.cpp8
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp69
-rw-r--r--media/libstagefright/omx/tests/OMXHarness.cpp2
-rw-r--r--media/libstagefright/rtsp/MyHandler.h2
-rw-r--r--media/libstagefright/tests/MediaCodecListOverrides_test.cpp6
-rw-r--r--services/audioflinger/AudioFlinger.cpp34
-rw-r--r--services/audioflinger/AudioFlinger.h13
-rw-r--r--services/audioflinger/AudioMixer.cpp111
-rw-r--r--services/audioflinger/Effects.cpp10
-rw-r--r--services/audioflinger/Threads.cpp336
-rw-r--r--services/audioflinger/Threads.h41
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.cpp125
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.h8
-rw-r--r--services/camera/libcameraservice/CameraService.cpp19
-rw-r--r--services/camera/libcameraservice/CameraService.h2
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp10
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.h2
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp54
-rw-r--r--services/camera/libcameraservice/api1/client2/CaptureSequencer.h9
-rw-r--r--services/camera/libcameraservice/common/CameraModule.cpp30
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.cpp90
-rw-r--r--services/camera/libcameraservice/device3/Camera3Device.h5
-rw-r--r--services/soundtrigger/SoundTriggerHwService.cpp1
74 files changed, 1636 insertions, 729 deletions
diff --git a/camera/Android.mk b/camera/Android.mk
index 4c4700b..471cb0d 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -28,6 +28,7 @@ LOCAL_SRC_FILES:= \
ICameraClient.cpp \
ICameraService.cpp \
ICameraServiceListener.cpp \
+ ICameraServiceProxy.cpp \
ICameraRecordingProxy.cpp \
ICameraRecordingProxyListener.cpp \
camera2/ICameraDeviceUser.cpp \
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index a02dbe2..0071e70 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -494,7 +494,8 @@ status_t BnCameraService::onTransact(
__FUNCTION__, len);
return FAILED_TRANSACTION;
}
- int32_t events[len] = {};
+ int32_t events[len];
+ memset(events, 0, sizeof(int32_t) * len);
status_t status = data.read(events, sizeof(int32_t) * len);
if (status != NO_ERROR) {
ALOGE("%s: Received poorly formatted binder request: notifySystemEvent.",
diff --git a/camera/ICameraServiceProxy.cpp b/camera/ICameraServiceProxy.cpp
new file mode 100644
index 0000000..06a5afb
--- /dev/null
+++ b/camera/ICameraServiceProxy.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BpCameraServiceProxy"
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+
+#include <camera/ICameraServiceProxy.h>
+
+namespace android {
+
+class BpCameraServiceProxy: public BpInterface<ICameraServiceProxy> {
+public:
+ BpCameraServiceProxy(const sp<IBinder>& impl) : BpInterface<ICameraServiceProxy>(impl) {}
+
+ virtual void pingForUserUpdate() {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
+ remote()->transact(BnCameraServiceProxy::PING_FOR_USER_UPDATE, data, &reply,
+ IBinder::FLAG_ONEWAY);
+ }
+};
+
+
+IMPLEMENT_META_INTERFACE(CameraServiceProxy, "android.hardware.ICameraServiceProxy");
+
+status_t BnCameraServiceProxy::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags) {
+ switch(code) {
+ case PING_FOR_USER_UPDATE: {
+ CHECK_INTERFACE(ICameraServiceProxy, data, reply);
+ pingForUserUpdate();
+ return NO_ERROR;
+ } break;
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+}; // namespace android
+
diff --git a/include/camera/ICameraServiceProxy.h b/include/camera/ICameraServiceProxy.h
new file mode 100644
index 0000000..12a555f
--- /dev/null
+++ b/include/camera/ICameraServiceProxy.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+#define ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class ICameraServiceProxy : public IInterface {
+public:
+ enum {
+ PING_FOR_USER_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
+ };
+
+ DECLARE_META_INTERFACE(CameraServiceProxy);
+
+ virtual void pingForUserUpdate() = 0;
+};
+
+class BnCameraServiceProxy: public BnInterface<ICameraServiceProxy>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+
+
+}; // namespace android
+
+#endif // ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+
+
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 3241e9c..26cffa6 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -158,6 +158,9 @@ public:
// or no HW sync source is used.
static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+ // Indicate JAVA services are ready (scheduling, power management ...)
+ static status_t systemReady();
+
// Events used to synchronize actions between audio sessions.
// For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
// playback is complete on another audio session.
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 458f4b4..54e86b9 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -59,9 +59,11 @@ public:
// voluntary invalidation by mediaserver, or mediaserver crash.
EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
// back (after stop is called)
+#if 0 // FIXME not yet implemented
EVENT_NEW_TIMESTAMP = 8, // Delivered periodically and when there's a significant change
// in the mapping from frame position to presentation time.
// See AudioTimestamp for the information included with event.
+#endif
};
/* Client should declare a Buffer and pass the address to obtainBuffer()
@@ -183,6 +185,10 @@ public:
* pid: Process ID of the app which initially requested this AudioTrack
* for power management tracking, or -1 for current process ID.
* pAttributes: If not NULL, supersedes streamType for use case selection.
+ * doNotReconnect: If set to true, AudioTrack won't automatically recreate the IAudioTrack
+ binder to AudioFlinger.
+ It will return an error instead. The application will recreate
+ the track based on offloading or different channel configuration, etc.
* threadCanCallJava: Not present in parameter list, and so is fixed at false.
*/
@@ -200,7 +206,8 @@ public:
const audio_offload_info_t *offloadInfo = NULL,
int uid = -1,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false);
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
@@ -228,7 +235,8 @@ public:
const audio_offload_info_t *offloadInfo = NULL,
int uid = -1,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false);
/* Terminates the AudioTrack and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioTrack.
@@ -272,7 +280,8 @@ public:
const audio_offload_info_t *offloadInfo = NULL,
int uid = -1,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false);
/* Result of constructing the AudioTrack. This must be checked for successful initialization
* before using any AudioTrack API (except for set()), because using
@@ -877,6 +886,8 @@ protected:
// const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
// mLock must be held to read or write those bits reliably.
+ bool mDoNotReconnect;
+
int mSessionId;
int mAuxEffectId;
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 3f7fd09..5051aff 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -243,6 +243,9 @@ public:
/* Get the HW synchronization source used for an audio session */
virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
+
+ /* Indicate JAVA services are ready (scheduling, power management ...) */
+ virtual status_t systemReady() = 0;
};
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index d33d142..46c363a 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -98,9 +98,10 @@ public:
virtual status_t getGraphicBufferUsage(
node_id node, OMX_U32 port_index, OMX_U32* usage) = 0;
+ // Use |params| as an OMX buffer, but limit the size of the OMX buffer to |allottedSize|.
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) = 0;
+ buffer_id *buffer, OMX_U32 allottedSize) = 0;
virtual status_t useGraphicBuffer(
node_id node, OMX_U32 port_index,
@@ -132,9 +133,11 @@ public:
node_id node, OMX_U32 port_index, size_t size,
buffer_id *buffer, void **buffer_data) = 0;
+ // Allocate an OMX buffer of size |allotedSize|. Use |params| as the backup buffer, which
+ // may be larger.
virtual status_t allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) = 0;
+ buffer_id *buffer, OMX_U32 allottedSize) = 0;
virtual status_t freeBuffer(
node_id node, OMX_U32 port_index, buffer_id buffer) = 0;
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index fa917f9..46f900e 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -113,7 +113,8 @@ public:
AudioCallback cb = NULL,
void *cookie = NULL,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL) = 0;
+ const audio_offload_info_t *offloadInfo = NULL,
+ bool doNotReconnect = false) = 0;
virtual status_t start() = 0;
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index f061d22..e02918f 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -58,24 +58,6 @@ enum camcorder_quality {
CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
};
-/**
- * Set CIF as default maximum import and export resolution of video editor.
- * The maximum import and export resolutions are platform specific,
- * which should be defined in media_profiles.xml.
- * Set default maximum prefetch YUV frames to 6, which means video editor can
- * queue up to 6 YUV frames in the video encoder source.
- * This value is used to limit the amount of memory used by video editor
- * engine when the encoder consumes YUV frames at a lower speed
- * than video editor engine produces.
- */
-enum videoeditor_capability {
- VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH = 352,
- VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT = 288,
- VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH = 352,
- VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT = 288,
- VIDEOEDITOR_DEFAULT_MAX_PREFETCH_YUV_FRAMES = 6
-};
-
enum video_decoder {
VIDEO_DECODER_WMV,
};
@@ -148,32 +130,6 @@ public:
int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
/**
- * Returns the value for the given param name for the video editor cap
- * param or -1 if error.
- * Supported param name are:
- * videoeditor.input.width.max - max input video frame width
- * videoeditor.input.height.max - max input video frame height
- * videoeditor.output.width.max - max output video frame width
- * videoeditor.output.height.max - max output video frame height
- * maxPrefetchYUVFrames - max prefetch YUV frames in video editor engine. This value is used
- * to limit the memory consumption.
- */
- int getVideoEditorCapParamByName(const char *name) const;
-
- /**
- * Returns the value for the given param name for the video editor export codec format
- * param or -1 if error.
- * Supported param name are:
- * videoeditor.export.profile - export video profile
- * videoeditor.export.level - export video level
- * Supported param codec are:
- * 1 for h263
- * 2 for h264
- * 3 for mpeg4
- */
- int getVideoEditorExportParamByName(const char *name, int codec) const;
-
- /**
* Returns the audio encoders supported.
*/
Vector<audio_encoder> getAudioEncoders() const;
@@ -221,7 +177,7 @@ private:
MediaProfiles& operator=(const MediaProfiles&); // Don't call me
MediaProfiles(const MediaProfiles&); // Don't call me
- MediaProfiles() { mVideoEditorCap = NULL; } // Dummy default constructor
+ MediaProfiles() {} // Dummy default constructor
~MediaProfiles(); // Don't delete me
struct VideoCodec {
@@ -366,31 +322,6 @@ private:
int mCameraId;
Vector<int> mLevels;
};
- struct ExportVideoProfile {
- ExportVideoProfile(int codec, int profile, int level)
- :mCodec(codec),mProfile(profile),mLevel(level) {}
- ~ExportVideoProfile() {}
- int mCodec;
- int mProfile;
- int mLevel;
- };
- struct VideoEditorCap {
- VideoEditorCap(int inFrameWidth, int inFrameHeight,
- int outFrameWidth, int outFrameHeight, int frames)
- : mMaxInputFrameWidth(inFrameWidth),
- mMaxInputFrameHeight(inFrameHeight),
- mMaxOutputFrameWidth(outFrameWidth),
- mMaxOutputFrameHeight(outFrameHeight),
- mMaxPrefetchYUVFrames(frames) {}
-
- ~VideoEditorCap() {}
-
- int mMaxInputFrameWidth;
- int mMaxInputFrameHeight;
- int mMaxOutputFrameWidth;
- int mMaxOutputFrameHeight;
- int mMaxPrefetchYUVFrames;
- };
int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const;
void initRequiredProfileRefs(const Vector<int>& cameraIds);
@@ -403,7 +334,6 @@ private:
static void logAudioEncoderCap(const AudioEncoderCap& cap);
static void logVideoDecoderCap(const VideoDecoderCap& cap);
static void logAudioDecoderCap(const AudioDecoderCap& cap);
- static void logVideoEditorCap(const VideoEditorCap& cap);
// If the xml configuration file does exist, use the settings
// from the xml
@@ -415,9 +345,6 @@ private:
static VideoDecoderCap* createVideoDecoderCap(const char **atts);
static VideoEncoderCap* createVideoEncoderCap(const char **atts);
static AudioEncoderCap* createAudioEncoderCap(const char **atts);
- static VideoEditorCap* createVideoEditorCap(
- const char **atts, MediaProfiles *profiles);
- static ExportVideoProfile* createExportVideoProfile(const char **atts);
static CamcorderProfile* createCamcorderProfile(
int cameraId, const char **atts, Vector<int>& cameraIds);
@@ -461,8 +388,6 @@ private:
static void createDefaultEncoderOutputFileFormats(MediaProfiles *profiles);
static void createDefaultImageEncodingQualityLevels(MediaProfiles *profiles);
static void createDefaultImageDecodingMaxMemory(MediaProfiles *profiles);
- static void createDefaultVideoEditorCap(MediaProfiles *profiles);
- static void createDefaultExportVideoProfiles(MediaProfiles *profiles);
static VideoEncoderCap* createDefaultH263VideoEncoderCap();
static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
@@ -520,8 +445,6 @@ private:
RequiredProfiles *mRequiredProfileRefs;
Vector<int> mCameraIds;
- VideoEditorCap* mVideoEditorCap;
- Vector<ExportVideoProfile*> mVideoEditorExportProfiles;
};
}; // namespace android
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index ce34338..df5e519 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -32,6 +32,8 @@
namespace android {
+extern const char *kMaxEncoderInputBuffers;
+
struct AMessage;
struct MediaCodecList : public BnMediaCodecList {
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 6aeb919..d904ab6 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -27,6 +27,7 @@
#include <cutils/log.h>
#include "EffectBundle.h"
+#include "math.h"
// effect_handle_t interface implementation for bass boost
@@ -830,32 +831,69 @@ void LvmEffect_limitLevel(EffectContext *pContext) {
int gainCorrection = 0;
//Count the energy contribution per band for EQ and BassBoost only if they are active.
float energyContribution = 0;
+ float energyCross = 0;
+ float energyBassBoost = 0;
+ float crossCorrection = 0;
//EQ contribution
if (pContext->pBundledContext->bEqualizerEnabled == LVM_TRUE) {
for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
- float bandEnergy = (pContext->pBundledContext->bandGaindB[i] *
- LimitLevel_bandEnergyContribution[i])/15.0;
+ float bandFactor = pContext->pBundledContext->bandGaindB[i]/15.0;
+ float bandCoefficient = LimitLevel_bandEnergyCoefficient[i];
+ float bandEnergy = bandFactor * bandCoefficient * bandCoefficient;
if (bandEnergy > 0)
energyContribution += bandEnergy;
}
+
+ //cross EQ coefficients
+ float bandFactorSum = 0;
+ for (int i = 0; i < FIVEBAND_NUMBANDS-1; i++) {
+ float bandFactor1 = pContext->pBundledContext->bandGaindB[i]/15.0;
+ float bandFactor2 = pContext->pBundledContext->bandGaindB[i+1]/15.0;
+
+ if (bandFactor1 > 0 && bandFactor2 > 0) {
+ float crossEnergy = bandFactor1 * bandFactor2 *
+ LimitLevel_bandEnergyCrossCoefficient[i];
+ bandFactorSum += bandFactor1 * bandFactor2;
+
+ if (crossEnergy > 0)
+ energyCross += crossEnergy;
+ }
+ }
+ bandFactorSum -= 1.0;
+ if (bandFactorSum > 0)
+ crossCorrection = bandFactorSum * 0.7;
}
//BassBoost contribution
if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) {
- float bandEnergy = (pContext->pBundledContext->BassStrengthSaved *
- LimitLevel_bassBoostEnergyContribution)/1000.0;
- if (bandEnergy > 0)
- energyContribution += bandEnergy;
+ float boostFactor = (pContext->pBundledContext->BassStrengthSaved)/1000.0;
+ float boostCoefficient = LimitLevel_bassBoostEnergyCoefficient;
+
+ energyContribution += boostFactor * boostCoefficient * boostCoefficient;
+
+ for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+ float bandFactor = pContext->pBundledContext->bandGaindB[i]/15.0;
+ float bandCrossCoefficient = LimitLevel_bassBoostEnergyCrossCoefficient[i];
+ float bandEnergy = boostFactor * bandFactor *
+ bandCrossCoefficient;
+ if (bandEnergy > 0)
+ energyBassBoost += bandEnergy;
+ }
}
//Virtualizer contribution
if (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE) {
- energyContribution += LimitLevel_virtualizerContribution;
- }
+ energyContribution += LimitLevel_virtualizerContribution *
+ LimitLevel_virtualizerContribution;
+ }
+
+ double totalEnergyEstimation = sqrt(energyContribution + energyCross + energyBassBoost) -
+ crossCorrection;
+ ALOGV(" TOTAL energy estimation: %0.2f", totalEnergyEstimation);
//roundoff
- int maxLevelRound = (int)(energyContribution + 0.99);
+ int maxLevelRound = (int)(totalEnergyEstimation + 0.99);
if (maxLevelRound + pContext->pBundledContext->volume > 0) {
gainCorrection = maxLevelRound + pContext->pBundledContext->volume;
}
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index b3071f4..9459b87 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -200,10 +200,16 @@ static const PresetConfig gEqualizerPresets[] = {
* updated.
*/
-static const float LimitLevel_bandEnergyContribution[FIVEBAND_NUMBANDS] = {
- 5.0, 6.5, 6.45, 4.8, 1.7 };
+static const float LimitLevel_bandEnergyCoefficient[FIVEBAND_NUMBANDS] = {
+ 7.56, 9.69, 9.59, 7.37, 2.88};
-static const float LimitLevel_bassBoostEnergyContribution = 6.7;
+static const float LimitLevel_bandEnergyCrossCoefficient[FIVEBAND_NUMBANDS-1] = {
+ 126.0, 115.0, 125.0, 104.0 };
+
+static const float LimitLevel_bassBoostEnergyCrossCoefficient[FIVEBAND_NUMBANDS] = {
+ 221.21, 208.10, 28.16, 0.0, 0.0 };
+
+static const float LimitLevel_bassBoostEnergyCoefficient = 7.12;
static const float LimitLevel_virtualizerContribution = 1.9;
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 0c18828..a3c3d3c 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -7,7 +7,7 @@ LOCAL_SRC_FILES:= \
LOCAL_MODULE:= libmedia_helper
LOCAL_MODULE_TAGS := optional
-LOCAL_C_FLAGS += -Werror -Wall
+LOCAL_C_FLAGS += -Werror -Wno-error=deprecated-declarations -Wall
LOCAL_CLANG := true
include $(BUILD_STATIC_LIBRARY)
@@ -87,7 +87,7 @@ LOCAL_C_INCLUDES := \
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
-LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
LOCAL_CLANG := true
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 4c2e77b..6c2c226 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -423,6 +423,13 @@ audio_hw_sync_t AudioSystem::getAudioHwSyncForSession(audio_session_t sessionId)
return af->getAudioHwSyncForSession(sessionId);
}
+status_t AudioSystem::systemReady()
+{
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return NO_INIT;
+ return af->systemReady();
+}
+
// ---------------------------------------------------------------------------
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index db316b0..faf5935 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -178,7 +178,8 @@ AudioTrack::AudioTrack(
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
- const audio_attributes_t* pAttributes)
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -189,7 +190,7 @@ AudioTrack::AudioTrack(
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
- offloadInfo, uid, pid, pAttributes);
+ offloadInfo, uid, pid, pAttributes, doNotReconnect);
}
AudioTrack::AudioTrack(
@@ -207,7 +208,8 @@ AudioTrack::AudioTrack(
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
- const audio_attributes_t* pAttributes)
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -218,7 +220,7 @@ AudioTrack::AudioTrack(
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
- uid, pid, pAttributes);
+ uid, pid, pAttributes, doNotReconnect);
}
AudioTrack::~AudioTrack()
@@ -266,7 +268,8 @@ status_t AudioTrack::set(
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
- const audio_attributes_t* pAttributes)
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect)
{
ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
@@ -308,6 +311,7 @@ status_t AudioTrack::set(
}
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
+ mDoNotReconnect = doNotReconnect;
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
sharedBuffer->size());
@@ -2006,7 +2010,7 @@ status_t AudioTrack::restoreTrack_l(const char *from)
// output parameters and new IAudioFlinger in createTrack_l()
AudioSystem::clearAudioConfigCache();
- if (isOffloadedOrDirect_l()) {
+ if (isOffloadedOrDirect_l() || mDoNotReconnect) {
// FIXME re-creation of offloaded tracks is not yet implemented
return DEAD_OBJECT;
}
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index d722fe9..a3f014b 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -80,7 +80,8 @@ enum {
RELEASE_AUDIO_PATCH,
LIST_AUDIO_PATCHES,
SET_AUDIO_PORT_CONFIG,
- GET_AUDIO_HW_SYNC
+ GET_AUDIO_HW_SYNC,
+ SYSTEM_READY
};
#define MAX_ITEMS_PER_LIST 1024
@@ -903,6 +904,12 @@ public:
}
return (audio_hw_sync_t)reply.readInt32();
}
+ virtual status_t systemReady()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ return remote()->transact(SYSTEM_READY, data, &reply, IBinder::FLAG_ONEWAY);
+ }
};
IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -1396,6 +1403,11 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32(getAudioHwSyncForSession((audio_session_t)data.readInt32()));
return NO_ERROR;
} break;
+ case SYSTEM_READY: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ systemReady();
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index c14debf..d556c33 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -245,12 +245,13 @@ public:
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) {
+ buffer_id *buffer, OMX_U32 allottedSize) {
Parcel data, reply;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
data.writeInt32(port_index);
data.writeStrongBinder(IInterface::asBinder(params));
+ data.writeInt32(allottedSize);
remote()->transact(USE_BUFFER, data, &reply);
status_t err = reply.readInt32();
@@ -459,12 +460,13 @@ public:
virtual status_t allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) {
+ buffer_id *buffer, OMX_U32 allottedSize) {
Parcel data, reply;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
data.writeInt32(port_index);
data.writeStrongBinder(IInterface::asBinder(params));
+ data.writeInt32(allottedSize);
remote()->transact(ALLOC_BUFFER_WITH_BACKUP, data, &reply);
status_t err = reply.readInt32();
@@ -757,9 +759,10 @@ status_t BnOMX::onTransact(
OMX_U32 port_index = data.readInt32();
sp<IMemory> params =
interface_cast<IMemory>(data.readStrongBinder());
+ OMX_U32 allottedSize = data.readInt32();
buffer_id buffer;
- status_t err = useBuffer(node, port_index, params, &buffer);
+ status_t err = useBuffer(node, port_index, params, &buffer, allottedSize);
reply->writeInt32(err);
if (err == OK) {
@@ -953,10 +956,11 @@ status_t BnOMX::onTransact(
OMX_U32 port_index = data.readInt32();
sp<IMemory> params =
interface_cast<IMemory>(data.readStrongBinder());
+ OMX_U32 allottedSize = data.readInt32();
buffer_id buffer;
status_t err = allocateBufferWithBackup(
- node, port_index, params, &buffer);
+ node, port_index, params, &buffer, allottedSize);
reply->writeInt32(err);
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
index 7ae946d..6902e99 100644
--- a/media/libmedia/IResourceManagerService.cpp
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -48,7 +48,7 @@ static void writeToParcel(Parcel *data, const Vector<T> &items) {
template <typename T>
static void readFromParcel(const Parcel &data, Vector<T> *items) {
size_t size = (size_t)data.readUint32();
- for (size_t i = 0; i < size; i++) {
+ for (size_t i = 0; i < size && data.dataAvail() > 0; i++) {
T item;
item.readFromParcel(data);
items->add(item);
@@ -119,8 +119,6 @@ status_t BnResourceManagerService::onTransact(
switch (code) {
case CONFIG: {
CHECK_INTERFACE(IResourceManagerService, data, reply);
- sp<IResourceManagerClient> client(
- interface_cast<IResourceManagerClient>(data.readStrongBinder()));
Vector<MediaResourcePolicy> policies;
readFromParcel(data, &policies);
config(policies);
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 271be0c..34deb59 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -85,12 +85,18 @@ int JetPlayer::init()
// create the output AudioTrack
mAudioTrack = new AudioTrack();
- mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parameterize this
+ status_t status = mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parameterize this
pLibConfig->sampleRate,
AUDIO_FORMAT_PCM_16_BIT,
audio_channel_out_mask_from_count(pLibConfig->numChannels),
(size_t) mTrackBufferSize,
AUDIO_OUTPUT_FLAG_NONE);
+ if (status != OK) {
+ ALOGE("JetPlayer::init(): Error initializing JET library; AudioTrack error %d", status);
+ mAudioTrack.clear();
+ mState = EAS_STATE_ERROR;
+ return EAS_FAILURE;
+ }
// create render and playback thread
{
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index ae0061f..c5790fb 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -152,16 +152,6 @@ MediaProfiles::logAudioDecoderCap(const MediaProfiles::AudioDecoderCap& cap UNUS
ALOGV("codec = %d", cap.mCodec);
}
-/*static*/ void
-MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap UNUSED)
-{
- ALOGV("videoeditor cap:");
- ALOGV("mMaxInputFrameWidth = %d", cap.mMaxInputFrameWidth);
- ALOGV("mMaxInputFrameHeight = %d", cap.mMaxInputFrameHeight);
- ALOGV("mMaxOutputFrameWidth = %d", cap.mMaxOutputFrameWidth);
- ALOGV("mMaxOutputFrameHeight = %d", cap.mMaxOutputFrameHeight);
-}
-
/*static*/ int
MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings,
const char *name)
@@ -398,42 +388,6 @@ void MediaProfiles::addStartTimeOffset(int cameraId, const char** atts)
ALOGV("%s: cameraId=%d, offset=%d ms", __func__, cameraId, offsetTimeMs);
mStartTimeOffsets.replaceValueFor(cameraId, offsetTimeMs);
}
-/*static*/ MediaProfiles::ExportVideoProfile*
-MediaProfiles::createExportVideoProfile(const char **atts)
-{
- CHECK(!strcmp("name", atts[0]) &&
- !strcmp("profile", atts[2]) &&
- !strcmp("level", atts[4]));
-
- const size_t nMappings =
- sizeof(sVideoEncoderNameMap)/sizeof(sVideoEncoderNameMap[0]);
- const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
-
- MediaProfiles::ExportVideoProfile *profile =
- new MediaProfiles::ExportVideoProfile(
- codec, atoi(atts[3]), atoi(atts[5]));
-
- return profile;
-}
-/*static*/ MediaProfiles::VideoEditorCap*
-MediaProfiles::createVideoEditorCap(const char **atts, MediaProfiles *profiles)
-{
- CHECK(!strcmp("maxInputFrameWidth", atts[0]) &&
- !strcmp("maxInputFrameHeight", atts[2]) &&
- !strcmp("maxOutputFrameWidth", atts[4]) &&
- !strcmp("maxOutputFrameHeight", atts[6]) &&
- !strcmp("maxPrefetchYUVFrames", atts[8]));
-
- MediaProfiles::VideoEditorCap *pVideoEditorCap =
- new MediaProfiles::VideoEditorCap(atoi(atts[1]), atoi(atts[3]),
- atoi(atts[5]), atoi(atts[7]), atoi(atts[9]));
-
- logVideoEditorCap(*pVideoEditorCap);
- profiles->mVideoEditorCap = pVideoEditorCap;
-
- return pVideoEditorCap;
-}
/*static*/ void
MediaProfiles::startElementHandler(void *userData, const char *name, const char **atts)
@@ -465,10 +419,6 @@ MediaProfiles::startElementHandler(void *userData, const char *name, const char
createCamcorderProfile(profiles->mCurrentCameraId, atts, profiles->mCameraIds));
} else if (strcmp("ImageEncoding", name) == 0) {
profiles->addImageEncodingQualityLevel(profiles->mCurrentCameraId, atts);
- } else if (strcmp("VideoEditorCap", name) == 0) {
- createVideoEditorCap(atts, profiles);
- } else if (strcmp("ExportVideoProfile", name) == 0) {
- profiles->mVideoEditorExportProfiles.add(createExportVideoProfile(atts));
}
}
@@ -873,32 +823,6 @@ MediaProfiles::createDefaultImageEncodingQualityLevels(MediaProfiles *profiles)
profiles->mImageEncodingQualityLevels.add(levels);
}
-/*static*/ void
-MediaProfiles::createDefaultVideoEditorCap(MediaProfiles *profiles)
-{
- profiles->mVideoEditorCap =
- new MediaProfiles::VideoEditorCap(
- VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH,
- VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT,
- VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH,
- VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT,
- VIDEOEDITOR_DEFAULT_MAX_PREFETCH_YUV_FRAMES);
-}
-/*static*/ void
-MediaProfiles::createDefaultExportVideoProfiles(MediaProfiles *profiles)
-{
- // Create default video export profiles
- profiles->mVideoEditorExportProfiles.add(
- new ExportVideoProfile(VIDEO_ENCODER_H263,
- OMX_VIDEO_H263ProfileBaseline, OMX_VIDEO_H263Level10));
- profiles->mVideoEditorExportProfiles.add(
- new ExportVideoProfile(VIDEO_ENCODER_MPEG_4_SP,
- OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level1));
- profiles->mVideoEditorExportProfiles.add(
- new ExportVideoProfile(VIDEO_ENCODER_H264,
- OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel13));
-}
-
/*static*/ MediaProfiles*
MediaProfiles::createDefaultInstance()
{
@@ -910,8 +834,6 @@ MediaProfiles::createDefaultInstance()
createDefaultAudioDecoders(profiles);
createDefaultEncoderOutputFileFormats(profiles);
createDefaultImageEncodingQualityLevels(profiles);
- createDefaultVideoEditorCap(profiles);
- createDefaultExportVideoProfiles(profiles);
return profiles;
}
@@ -1009,54 +931,6 @@ int MediaProfiles::getVideoEncoderParamByName(const char *name, video_encoder co
ALOGE("The given video encoder param name %s is not found", name);
return -1;
}
-int MediaProfiles::getVideoEditorExportParamByName(
- const char *name, int codec) const
-{
- ALOGV("getVideoEditorExportParamByName: name %s codec %d", name, codec);
- ExportVideoProfile *exportProfile = NULL;
- int index = -1;
- for (size_t i =0; i < mVideoEditorExportProfiles.size(); i++) {
- exportProfile = mVideoEditorExportProfiles[i];
- if (exportProfile->mCodec == codec) {
- index = i;
- break;
- }
- }
- if (index == -1) {
- ALOGE("The given video decoder %d is not found", codec);
- return -1;
- }
- if (!strcmp("videoeditor.export.profile", name))
- return exportProfile->mProfile;
- if (!strcmp("videoeditor.export.level", name))
- return exportProfile->mLevel;
-
- ALOGE("The given video editor export param name %s is not found", name);
- return -1;
-}
-int MediaProfiles::getVideoEditorCapParamByName(const char *name) const
-{
- ALOGV("getVideoEditorCapParamByName: %s", name);
-
- if (mVideoEditorCap == NULL) {
- ALOGE("The mVideoEditorCap is not created, then create default cap.");
- createDefaultVideoEditorCap(sInstance);
- }
-
- if (!strcmp("videoeditor.input.width.max", name))
- return mVideoEditorCap->mMaxInputFrameWidth;
- if (!strcmp("videoeditor.input.height.max", name))
- return mVideoEditorCap->mMaxInputFrameHeight;
- if (!strcmp("videoeditor.output.width.max", name))
- return mVideoEditorCap->mMaxOutputFrameWidth;
- if (!strcmp("videoeditor.output.height.max", name))
- return mVideoEditorCap->mMaxOutputFrameHeight;
- if (!strcmp("maxPrefetchYUVFrames", name))
- return mVideoEditorCap->mMaxPrefetchYUVFrames;
-
- ALOGE("The given video editor param name %s is not found", name);
- return -1;
-}
Vector<audio_encoder> MediaProfiles::getAudioEncoders() const
{
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index ce04505..7f0cca2 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -55,7 +55,7 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/tremolo/Tremolo \
-LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
LOCAL_CLANG := true
LOCAL_MODULE:= libmediaplayerservice
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 3e2d967..7c40121 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1481,7 +1481,8 @@ status_t MediaPlayerService::AudioOutput::open(
audio_format_t format, int bufferCount,
AudioCallback cb, void *cookie,
audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+ const audio_offload_info_t *offloadInfo,
+ bool doNotReconnect)
{
mCallback = cb;
mCallbackCookie = cookie;
@@ -1597,7 +1598,8 @@ status_t MediaPlayerService::AudioOutput::open(
offloadInfo,
mUid,
mPid,
- mAttributes);
+ mAttributes,
+ doNotReconnect);
} else {
t = new AudioTrack(
mStreamType,
@@ -1614,12 +1616,14 @@ status_t MediaPlayerService::AudioOutput::open(
NULL, // offload info
mUid,
mPid,
- mAttributes);
+ mAttributes,
+ doNotReconnect);
}
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
ALOGE("Unable to create audio track");
delete newcbd;
+ // t goes out of scope, so reference count drops to zero
return NO_INIT;
} else {
// successful AudioTrack initialization implies a legacy stream type was generated
@@ -1872,6 +1876,15 @@ void MediaPlayerService::AudioOutput::CallbackWrapper(
me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
break;
+ case AudioTrack::EVENT_UNDERRUN:
+ // This occurs when there is no data available, typically occurring
+ // when there is a failure to supply data to the AudioTrack. It can also
+ // occur in non-offloaded mode when the audio device comes out of standby.
+ //
+ // If you see this at the start of playback, there probably was a glitch.
+ ALOGI("callbackwrapper: EVENT_UNDERRUN (discarded)");
+ break;
+
default:
ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
}
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 5103841..e9f72b8 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -97,7 +97,8 @@ class MediaPlayerService : public BnMediaPlayerService
audio_format_t format, int bufferCount,
AudioCallback cb, void *cookie,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL);
+ const audio_offload_info_t *offloadInfo = NULL,
+ bool doNotReconnect = false);
virtual status_t start();
virtual ssize_t write(const void* buffer, size_t size, bool blocking = true);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 5e7b644..64d172e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -56,7 +56,7 @@ NuPlayer::GenericSource::GenericSource(
mVideoLastDequeueTimeUs(0),
mFetchSubtitleDataGeneration(0),
mFetchTimedTextDataGeneration(0),
- mDurationUs(0ll),
+ mDurationUs(-1ll),
mAudioIsVorbis(false),
mIsWidevine(false),
mIsSecure(false),
@@ -324,6 +324,10 @@ status_t NuPlayer::GenericSource::setBuffers(
return INVALID_OPERATION;
}
+bool NuPlayer::GenericSource::isStreaming() const {
+ return mIsStreaming;
+}
+
NuPlayer::GenericSource::~GenericSource() {
if (mLooper != NULL) {
mLooper->unregisterHandler(id());
@@ -1510,17 +1514,7 @@ void NuPlayer::GenericSource::readBuffer(
mVideoTimeUs = timeUs;
}
- // formatChange && seeking: track whose source is changed during selection
- // formatChange && !seeking: track whose source is not changed during selection
- // !formatChange: normal seek
- if ((seeking || formatChange)
- && (trackType == MEDIA_TRACK_TYPE_AUDIO
- || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
- ATSParser::DiscontinuityType type = (formatChange && seeking)
- ? ATSParser::DISCONTINUITY_FORMATCHANGE
- : ATSParser::DISCONTINUITY_NONE;
- track->mPackets->queueDiscontinuity( type, NULL, true /* discard */);
- }
+ queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
sp<ABuffer> buffer = mediaBufferToABuffer(
mbuf, trackType, seekTimeUs, actualTimeUs);
@@ -1538,10 +1532,26 @@ void NuPlayer::GenericSource::readBuffer(
false /* discard */);
#endif
} else {
+ queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
track->mPackets->signalEOS(err);
break;
}
}
}
+void NuPlayer::GenericSource::queueDiscontinuityIfNeeded(
+ bool seeking, bool formatChange, media_track_type trackType, Track *track) {
+ // formatChange && seeking: track whose source is changed during selection
+ // formatChange && !seeking: track whose source is not changed during selection
+ // !formatChange: normal seek
+ if ((seeking || formatChange)
+ && (trackType == MEDIA_TRACK_TYPE_AUDIO
+ || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
+ ATSParser::DiscontinuityType type = (formatChange && seeking)
+ ? ATSParser::DISCONTINUITY_FORMATCHANGE
+ : ATSParser::DISCONTINUITY_NONE;
+ track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */);
+ }
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 7fab051..dc85d2d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -75,6 +75,8 @@ struct NuPlayer::GenericSource : public NuPlayer::Source {
virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
+ virtual bool isStreaming() const;
+
protected:
virtual ~GenericSource();
@@ -200,6 +202,9 @@ private:
media_track_type trackType,
int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
+ void queueDiscontinuityIfNeeded(
+ bool seeking, bool formatChange, media_track_type trackType, Track *track);
+
void schedulePollBuffering();
void cancelPollBuffering();
void restartPollBuffering();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 96a7adb..8760cbb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -834,7 +834,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
const bool hasVideo = (videoFormat != NULL);
const bool canOffload = canOffloadStream(
- audioMeta, hasVideo, true /* is_streaming */, streamType);
+ audioMeta, hasVideo, mSource->isStreaming(), streamType);
if (canOffload) {
if (!mOffloadAudio) {
mRenderer->signalEnableOffloadAudio();
@@ -1083,12 +1083,12 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
} else if (what == Renderer::kWhatMediaRenderingStart) {
ALOGV("media rendering started");
notifyListener(MEDIA_STARTED, 0, 0);
- } else if (what == Renderer::kWhatAudioOffloadTearDown) {
- ALOGV("Tear down audio offload, fall back to s/w path if due to error.");
+ } else if (what == Renderer::kWhatAudioTearDown) {
int64_t positionUs;
CHECK(msg->findInt64("positionUs", &positionUs));
int32_t reason;
CHECK(msg->findInt32("reason", &reason));
+ ALOGV("Tear down audio with reason %d.", reason);
closeAudioSink();
mAudioDecoder.clear();
++mAudioDecoderGeneration;
@@ -1100,9 +1100,22 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
}
performSeek(positionUs);
+
if (reason == Renderer::kDueToError) {
- mRenderer->signalDisableOffloadAudio();
- mOffloadAudio = false;
+ sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+ audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
+ const bool hasVideo = (videoFormat != NULL);
+ const bool canOffload = canOffloadStream(
+ audioMeta, hasVideo, mSource->isStreaming(), streamType);
+ if (canOffload) {
+ mRenderer->signalEnableOffloadAudio();
+ sp<AMessage> format = mSource->getFormat(true /*audio*/);
+ tryOpenAudioSinkForOffload(format, hasVideo);
+ } else {
+ mRenderer->signalDisableOffloadAudio();
+ mOffloadAudio = false;
+ }
instantiateDecoder(true /* audio */, &mAudioDecoder);
}
}
@@ -1140,6 +1153,22 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d",
(long long)seekTimeUs, needNotify);
+ if (!mStarted) {
+ // Seek before the player is started. In order to preview video,
+ // need to start the player and pause it. This branch is called
+ // only once if needed. After the player is started, any seek
+ // operation will go through normal path.
+ // All cases, including audio-only, are handled in the same way
+ // for the sake of simplicity.
+ onStart(seekTimeUs);
+ onPause();
+ mPausedByClient = true;
+ if (needNotify) {
+ notifyDriverSeekComplete();
+ }
+ break;
+ }
+
mDeferredActions.push_back(
new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
FLUSH_CMD_FLUSH /* video */));
@@ -1233,13 +1262,16 @@ status_t NuPlayer::onInstantiateSecureDecoders() {
return OK;
}
-void NuPlayer::onStart() {
+void NuPlayer::onStart(int64_t startPositionUs) {
mOffloadAudio = false;
mAudioEOS = false;
mVideoEOS = false;
mStarted = true;
mSource->start();
+ if (startPositionUs > 0) {
+ performSeek(startPositionUs);
+ }
uint32_t flags = 0;
@@ -1256,8 +1288,7 @@ void NuPlayer::onStart() {
sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
mOffloadAudio =
- canOffloadStream(audioMeta, (videoFormat != NULL),
- true /* is_streaming */, streamType);
+ canOffloadStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType);
if (mOffloadAudio) {
flags |= Renderer::FLAG_OFFLOAD_AUDIO;
}
@@ -1895,11 +1926,15 @@ void NuPlayer::performResumeDecoders(bool needNotify) {
void NuPlayer::finishResume() {
if (mResumePending) {
mResumePending = false;
- if (mDriver != NULL) {
- sp<NuPlayerDriver> driver = mDriver.promote();
- if (driver != NULL) {
- driver->notifySeekComplete();
- }
+ notifyDriverSeekComplete();
+ }
+}
+
+void NuPlayer::notifyDriverSeekComplete() {
+ if (mDriver != NULL) {
+ sp<NuPlayerDriver> driver = mDriver.promote();
+ if (driver != NULL) {
+ driver->notifySeekComplete();
}
}
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index f417f48..d7aa830 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -233,7 +233,7 @@ private:
void handleFlushComplete(bool audio, bool isDecoder);
void finishFlushIfPossible();
- void onStart();
+ void onStart(int64_t startPositionUs = -1);
void onResume();
void onPause();
@@ -242,6 +242,7 @@ private:
void flushDecoder(bool audio, bool needShutdown);
void finishResume();
+ void notifyDriverSeekComplete();
void postScanSources();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 376c93a..6abc81c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -68,6 +68,7 @@ NuPlayer::Decoder::Decoder(
}
NuPlayer::Decoder::~Decoder() {
+ mCodec->release();
releaseAndResetMediaBuffers();
}
@@ -414,6 +415,11 @@ bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
sp<ABuffer> buffer;
mCodec->getInputBuffer(index, &buffer);
+ if (buffer == NULL) {
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
if (index >= mInputBuffers.size()) {
for (size_t i = mInputBuffers.size(); i <= index; ++i) {
mInputBuffers.add();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 231f2e1..84ae25e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -397,23 +397,13 @@ status_t NuPlayerDriver::seekTo(int msec) {
switch (mState) {
case STATE_PREPARED:
case STATE_STOPPED_AND_PREPARED:
- {
+ case STATE_PAUSED:
mStartupSeekTimeUs = seekTimeUs;
- // pretend that the seek completed. It will actually happen when starting playback.
- // TODO: actually perform the seek here, so the player is ready to go at the new
- // location
- notifySeekComplete_l();
- break;
- }
-
+ // fall through.
case STATE_RUNNING:
- case STATE_PAUSED:
{
mAtEOS = false;
mSeekInProgress = true;
- if (mState == STATE_PAUSED) {
- mStartupSeekTimeUs = seekTimeUs;
- }
// seeks can take a while, so we essentially paused
notifyListener_l(MEDIA_PAUSED);
mPlayer->seekToAsync(seekTimeUs, true /* needNotify */);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 007a335..409dedf 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -82,7 +82,7 @@ NuPlayer::Renderer::Renderer(
mVideoRenderingStartGeneration(0),
mAudioRenderingStartGeneration(0),
mAudioOffloadPauseTimeoutGeneration(0),
- mAudioOffloadTornDown(false),
+ mAudioTornDown(false),
mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
mTotalBuffersQueued(0),
@@ -566,9 +566,9 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case kWhatAudioOffloadTearDown:
+ case kWhatAudioTearDown:
{
- onAudioOffloadTearDown(kDueToError);
+ onAudioTearDown(kDueToError);
break;
}
@@ -580,7 +580,7 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
break;
}
ALOGV("Audio Offload tear down due to pause timeout.");
- onAudioOffloadTearDown(kDueToTimeout);
+ onAudioTearDown(kDueToTimeout);
mWakeLock->release();
break;
}
@@ -648,7 +648,7 @@ size_t NuPlayer::Renderer::AudioSinkCallback(
case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
{
- me->notifyAudioOffloadTearDown();
+ me->notifyAudioTearDown();
break;
}
}
@@ -792,6 +792,7 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
ALOGW("AudioSink write would block when writing %zu bytes", copy);
} else {
ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
+ notifyAudioTearDown();
}
break;
}
@@ -1060,8 +1061,8 @@ void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t del
notify->post(delayUs);
}
-void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
- (new AMessage(kWhatAudioOffloadTearDown, this))->post();
+void NuPlayer::Renderer::notifyAudioTearDown() {
+ (new AMessage(kWhatAudioTearDown, this))->post();
}
void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
@@ -1480,11 +1481,11 @@ int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
return durationUs;
}
-void NuPlayer::Renderer::onAudioOffloadTearDown(AudioOffloadTearDownReason reason) {
- if (mAudioOffloadTornDown) {
+void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
+ if (mAudioTornDown) {
return;
}
- mAudioOffloadTornDown = true;
+ mAudioTornDown = true;
int64_t currentPositionUs;
if (getCurrentPosition(&currentPositionUs) != OK) {
@@ -1495,7 +1496,7 @@ void NuPlayer::Renderer::onAudioOffloadTearDown(AudioOffloadTearDownReason reaso
mAudioSink->flush();
sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatAudioOffloadTearDown);
+ notify->setInt32("what", kWhatAudioTearDown);
notify->setInt64("positionUs", currentPositionUs);
notify->setInt32("reason", reason);
notify->post();
@@ -1653,7 +1654,9 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
8 /* bufferCount */,
NULL,
NULL,
- (audio_output_flags_t)pcmFlags);
+ (audio_output_flags_t)pcmFlags,
+ NULL,
+ true /* doNotReconnect */);
if (err == OK) {
err = mAudioSink->setPlaybackRate(mPlaybackSettings);
}
@@ -1668,9 +1671,7 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
if (audioSinkChanged) {
onAudioSinkChanged();
}
- if (offloadingAudio()) {
- mAudioOffloadTornDown = false;
- }
+ mAudioTornDown = false;
return OK;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 928b71b..fbdf5bf 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -82,16 +82,16 @@ struct NuPlayer::Renderer : public AHandler {
void closeAudioSink();
enum {
- kWhatEOS = 'eos ',
- kWhatFlushComplete = 'fluC',
- kWhatPosition = 'posi',
- kWhatVideoRenderingStart = 'vdrd',
- kWhatMediaRenderingStart = 'mdrd',
- kWhatAudioOffloadTearDown = 'aOTD',
+ kWhatEOS = 'eos ',
+ kWhatFlushComplete = 'fluC',
+ kWhatPosition = 'posi',
+ kWhatVideoRenderingStart = 'vdrd',
+ kWhatMediaRenderingStart = 'mdrd',
+ kWhatAudioTearDown = 'adTD',
kWhatAudioOffloadPauseTimeout = 'aOPT',
};
- enum AudioOffloadTearDownReason {
+ enum AudioTearDownReason {
kDueToError = 0,
kDueToTimeout,
};
@@ -179,7 +179,7 @@ private:
int64_t mLastPositionUpdateUs;
int32_t mAudioOffloadPauseTimeoutGeneration;
- bool mAudioOffloadTornDown;
+ bool mAudioTornDown;
audio_offload_info_t mCurrentOffloadInfo;
struct PcmInfo {
@@ -242,7 +242,7 @@ private:
int32_t getQueueGeneration(bool audio);
int32_t getDrainGeneration(bool audio);
bool getSyncQueues();
- void onAudioOffloadTearDown(AudioOffloadTearDownReason reason);
+ void onAudioTearDown(AudioTearDownReason reason);
status_t onOpenAudioSink(
const sp<AMessage> &format,
bool offloadOnly,
@@ -255,7 +255,7 @@ private:
void notifyPosition();
void notifyVideoLateBy(int64_t lateByUs);
void notifyVideoRenderingStart();
- void notifyAudioOffloadTearDown();
+ void notifyAudioTearDown();
void flushQueue(List<QueueEntry> *queue);
bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index ef1ba13..11a6a9f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -118,6 +118,10 @@ struct NuPlayer::Source : public AHandler {
return false;
}
+ virtual bool isStreaming() const {
+ return true;
+ }
+
protected:
virtual ~Source() {}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 34bd4c7..cd20979 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -725,9 +725,9 @@ status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
info.mData = new ABuffer(ptr, bufSize);
} else if (mQuirks & requiresAllocateBufferBit) {
err = mOMX->allocateBufferWithBackup(
- mNode, portIndex, mem, &info.mBufferID);
+ mNode, portIndex, mem, &info.mBufferID, def.nBufferSize);
} else {
- err = mOMX->useBuffer(mNode, portIndex, mem, &info.mBufferID);
+ err = mOMX->useBuffer(mNode, portIndex, mem, &info.mBufferID, def.nBufferSize);
}
if (mem != NULL) {
@@ -997,7 +997,7 @@ status_t ACodec::allocateOutputMetaDataBuffers() {
// we use useBuffer for metadata regardless of quirks
err = mOMX->useBuffer(
- mNode, kPortIndexOutput, mem, &info.mBufferID);
+ mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
mBuffers[kPortIndexOutput].push(info);
@@ -2857,7 +2857,9 @@ status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
break;
}
- ALOGI("setupVideoEncoder succeeded");
+ if (err == OK) {
+ ALOGI("setupVideoEncoder succeeded");
+ }
return err;
}
@@ -5124,7 +5126,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
sp<CodecObserver> observer = new CodecObserver;
IOMX::node_id node = 0;
- status_t err = OMX_ErrorComponentNotFound;
+ status_t err = NAME_NOT_FOUND;
for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
++matchIndex) {
componentName = matchingCodecs.itemAt(matchIndex).mName.string();
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 99f9d94..0dfa300 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -125,7 +125,9 @@ LOCAL_SHARED_LIBRARIES += \
libdl \
libRScpp \
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall -DENABLE_STAGEFRIGHT_EXPERIMENTS
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wno-error=deprecated-declarations -Wall \
+ -DENABLE_STAGEFRIGHT_EXPERIMENTS
+
LOCAL_CLANG := true
LOCAL_MODULE:= libstagefright
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index e5a6a9b..34f0148 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -85,6 +85,9 @@ AudioSource::AudioSource(
this,
frameCount /*notificationFrames*/);
mInitCheck = mRecord->initCheck();
+ if (mInitCheck != OK) {
+ mRecord.clear();
+ }
} else {
mInitCheck = status;
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c8dcef1..f918d2d 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -25,6 +25,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
+#include <gui/BufferQueue.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
#include <media/IOMX.h>
@@ -192,6 +193,27 @@ sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
CHECK_EQ(client.connect(), (status_t)OK);
sp<IOMX> omx = client.interface();
+ const sp<IMediaCodecList> mediaCodecList = MediaCodecList::getInstance();
+ if (mediaCodecList == NULL) {
+ ALOGE("Failed to obtain MediaCodecList!");
+ return NULL; // if called from Java should raise IOException
+ }
+
+ AString tmp;
+ sp<AMessage> globalSettings = mediaCodecList->getGlobalSettings();
+ if (globalSettings == NULL || !globalSettings->findString(
+ kMaxEncoderInputBuffers, &tmp)) {
+ ALOGE("Failed to get encoder input buffer count!");
+ return NULL;
+ }
+
+ int32_t bufferCount = strtol(tmp.c_str(), NULL, 10);
+ if (bufferCount <= 0
+ || bufferCount > BufferQueue::MAX_MAX_ACQUIRED_BUFFERS) {
+ ALOGE("Encoder input buffer count is invalid!");
+ return NULL;
+ }
+
sp<IGraphicBufferProducer> bufferProducer;
sp<IGraphicBufferConsumer> bufferConsumer;
@@ -203,6 +225,14 @@ sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
return NULL;
}
+ err = bufferConsumer->setMaxAcquiredBufferCount(bufferCount);
+
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+ bufferCount, err);
+ return NULL;
+ }
+
return new PersistentSurface(bufferProducer, bufferConsumer);
}
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index d2352bc..f366b1f 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -42,6 +42,8 @@
namespace android {
+const char *kMaxEncoderInputBuffers = "max-video-encoder-input-buffers";
+
static Mutex sInitMutex;
static MediaCodecList *gCodecList = NULL;
@@ -71,10 +73,24 @@ sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
if (gCodecList->initCheck() == OK) {
sCodecList = gCodecList;
- struct stat s;
- if (stat(kProfilingResults, &s) == -1) {
+ FILE *resultsFile = fopen(kProfilingResults, "r");
+ if (resultsFile) {
+ AString currentVersion = getProfilingVersionString();
+ size_t currentVersionSize = currentVersion.size();
+ char *versionString = new char[currentVersionSize];
+ fgets(versionString, currentVersionSize, resultsFile);
+ if (strncmp(versionString, currentVersion.c_str(), currentVersionSize) != 0) {
+ // profiling result out of date
+ profilingNeeded = true;
+ }
+ fclose(resultsFile);
+ delete[] versionString;
+ } else {
// profiling results doesn't existed
profilingNeeded = true;
+ }
+
+ if (profilingNeeded) {
for (size_t i = 0; i < gCodecList->countCodecs(); ++i) {
infos.push_back(gCodecList->getCodecInfo(i));
}
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 0d95676..a928163 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -20,18 +20,30 @@
#include "MediaCodecListOverrides.h"
+#include <cutils/properties.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
#include <media/IMediaCodecList.h>
#include <media/MediaCodecInfo.h>
#include <media/MediaResourcePolicy.h>
+#include <media/openmax/OMX_IVCommon.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecList.h>
namespace android {
const char *kProfilingResults = "/data/misc/media/media_codecs_profiling_results.xml";
+AString getProfilingVersionString() {
+ char val[PROPERTY_VALUE_MAX];
+ if (property_get("ro.build.display.id", val, NULL) && (strlen(val) > 0)) {
+ return AStringPrintf("<!-- Profiled-with: %s -->", val);
+ }
+
+ return "<!-- Profiled-with: UNKNOWN_BUILD_ID -->";
+}
+
// a limit to avoid allocating unreasonable number of codec instances in the measurement.
// this should be in sync with the MAX_SUPPORTED_INSTANCES defined in MediaCodecInfo.java.
static const int kMaxInstances = 32;
@@ -86,6 +98,7 @@ static sp<AMessage> getMeasureFormat(
int32_t bitrate = 0;
getMeasureBitrate(caps, &bitrate);
format->setInt32("bitrate", bitrate);
+ format->setInt32("encoder", 1);
}
if (mime.startsWith("video/")) {
@@ -114,15 +127,67 @@ static sp<AMessage> getMeasureFormat(
return format;
}
+static size_t doProfileEncoderInputBuffers(
+ AString name, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
+ ALOGV("doProfileEncoderInputBuffers: name %s, mime %s", name.c_str(), mime.c_str());
+
+ sp<AMessage> format = getMeasureFormat(true /* isEncoder */, mime, caps);
+ if (format == NULL) {
+ return 0;
+ }
+
+ format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+ ALOGV("doProfileEncoderInputBuffers: format %s", format->debugString().c_str());
+
+ status_t err = OK;
+ sp<ALooper> looper = new ALooper;
+ looper->setName("MediaCodec_looper");
+ looper->start(
+ false /* runOnCallingThread */, false /* canCallJava */, ANDROID_PRIORITY_AUDIO);
+
+ sp<MediaCodec> codec = MediaCodec::CreateByComponentName(looper, name.c_str(), &err);
+ if (err != OK) {
+ ALOGE("Failed to create codec: %s", name.c_str());
+ return 0;
+ }
+
+ err = codec->configure(format, NULL, NULL, MediaCodec::CONFIGURE_FLAG_ENCODE);
+ if (err != OK) {
+ ALOGE("Failed to configure codec: %s with mime: %s", name.c_str(), mime.c_str());
+ codec->release();
+ return 0;
+ }
+
+ sp<IGraphicBufferProducer> bufferProducer;
+ err = codec->createInputSurface(&bufferProducer);
+ if (err != OK) {
+ ALOGE("Failed to create surface: %s with mime: %s", name.c_str(), mime.c_str());
+ codec->release();
+ return 0;
+ }
+
+ int minUndequeued = 0;
+ err = bufferProducer->query(
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeued);
+ if (err != OK) {
+ ALOGE("Failed to query NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS");
+ minUndequeued = 0;
+ }
+
+ err = codec->release();
+ if (err != OK) {
+ ALOGW("Failed to release codec: %s with mime: %s", name.c_str(), mime.c_str());
+ }
+
+ return minUndequeued;
+}
+
static size_t doProfileCodecs(
bool isEncoder, AString name, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
sp<AMessage> format = getMeasureFormat(isEncoder, mime, caps);
if (format == NULL) {
return 0;
}
- if (isEncoder) {
- format->setInt32("encoder", 1);
- }
ALOGV("doProfileCodecs %s %s %s %s",
name.c_str(), mime.c_str(), isEncoder ? "encoder" : "decoder",
format->debugString().c_str());
@@ -144,7 +209,7 @@ static size_t doProfileCodecs(
}
const sp<Surface> nativeWindow;
const sp<ICrypto> crypto;
- uint32_t flags = 0;
+ uint32_t flags = isEncoder ? MediaCodec::CONFIGURE_FLAG_ENCODE : 0;
ALOGV("doProfileCodecs configure");
err = codec->configure(format, nativeWindow, crypto, flags);
if (err != OK) {
@@ -211,6 +276,7 @@ void profileCodecs(
bool forceToMeasure) {
KeyedVector<AString, sp<MediaCodecInfo::Capabilities>> codecsNeedMeasure;
AString supportMultipleSecureCodecs = "true";
+ size_t maxEncoderInputBuffers = 0;
for (size_t i = 0; i < infos.size(); ++i) {
const sp<MediaCodecInfo> info = infos[i];
AString name = info->getCodecName();
@@ -251,9 +317,21 @@ void profileCodecs(
supportMultipleSecureCodecs = "false";
}
}
+ if (info->isEncoder() && mimes[i].startsWith("video/")) {
+ size_t encoderInputBuffers =
+ doProfileEncoderInputBuffers(name, mimes[i], caps);
+ if (encoderInputBuffers > maxEncoderInputBuffers) {
+ maxEncoderInputBuffers = encoderInputBuffers;
+ }
+ }
}
}
}
+ if (maxEncoderInputBuffers > 0) {
+ char tmp[32];
+ sprintf(tmp, "%zu", maxEncoderInputBuffers);
+ global_results->add(kMaxEncoderInputBuffers, tmp);
+ }
global_results->add(kPolicySupportsMultipleSecureCodecs, supportMultipleSecureCodecs);
}
@@ -307,6 +385,8 @@ void exportResultsToXML(
}
AString overrides;
+ overrides.append(getProfilingVersionString());
+ overrides.append("\n");
overrides.append("<MediaCodecs>\n");
if (global_results.size() > 0) {
overrides.append(" <Settings>\n");
diff --git a/media/libstagefright/MediaCodecListOverrides.h b/media/libstagefright/MediaCodecListOverrides.h
index e350d2a..d4bb225 100644
--- a/media/libstagefright/MediaCodecListOverrides.h
+++ b/media/libstagefright/MediaCodecListOverrides.h
@@ -26,10 +26,13 @@
namespace android {
+extern const char *kProfilingVersionString;
extern const char *kProfilingResults;
struct MediaCodecInfo;
+AString getProfilingVersionString();
+
bool splitString(const AString &s, const AString &delimiter, AString *s1, AString *s2);
// profile codecs and save the result to xml file named kProfilingResults.
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 1c53b40..f82636b 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -583,6 +583,13 @@ ssize_t NuCachedSource2::readInternal(off64_t offset, void *data, size_t size) {
Mutex::Autolock autoLock(mLock);
+ // If we're disconnecting, return EOS and don't access *data pointer.
+ // data could be on the stack of the caller to NuCachedSource2::readAt(),
+ // which may have exited already.
+ if (mDisconnecting) {
+ return ERROR_END_OF_STREAM;
+ }
+
if (!mFetching) {
mLastAccessPos = offset;
restartPrefetcherIfNecessary_l(
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index f1ebea2..c91a37f 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -90,7 +90,7 @@ struct MuxOMX : public IOMX {
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer);
+ buffer_id *buffer, OMX_U32 allottedSize);
virtual status_t useGraphicBuffer(
node_id node, OMX_U32 port_index,
@@ -120,7 +120,7 @@ struct MuxOMX : public IOMX {
virtual status_t allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer);
+ buffer_id *buffer, OMX_U32 allottedSize);
virtual status_t freeBuffer(
node_id node, OMX_U32 port_index, buffer_id buffer);
@@ -322,8 +322,8 @@ status_t MuxOMX::getGraphicBufferUsage(
status_t MuxOMX::useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) {
- return getOMX(node)->useBuffer(node, port_index, params, buffer);
+ buffer_id *buffer, OMX_U32 allottedSize) {
+ return getOMX(node)->useBuffer(node, port_index, params, buffer, allottedSize);
}
status_t MuxOMX::useGraphicBuffer(
@@ -375,9 +375,9 @@ status_t MuxOMX::allocateBuffer(
status_t MuxOMX::allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) {
+ buffer_id *buffer, OMX_U32 allottedSize) {
return getOMX(node)->allocateBufferWithBackup(
- node, port_index, params, buffer);
+ node, port_index, params, buffer, allottedSize);
}
status_t MuxOMX::freeBuffer(
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index aa6a7c0..927cc6c 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1678,7 +1678,7 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
&info.mData);
} else {
err = mOMX->allocateBufferWithBackup(
- mNode, portIndex, mem, &buffer);
+ mNode, portIndex, mem, &buffer, mem->size());
}
} else if (portIndex == kPortIndexOutput
&& (mQuirks & kRequiresAllocateBufferOnOutputPorts)) {
@@ -1690,10 +1690,10 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
&info.mData);
} else {
err = mOMX->allocateBufferWithBackup(
- mNode, portIndex, mem, &buffer);
+ mNode, portIndex, mem, &buffer, mem->size());
}
} else {
- err = mOMX->useBuffer(mNode, portIndex, mem, &buffer);
+ err = mOMX->useBuffer(mNode, portIndex, mem, &buffer, mem->size());
}
if (err != OK) {
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index d1b0f76..a9723ea 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -428,7 +428,15 @@ void SoftAMR::onQueueFilled(OMX_U32 /* portIndex */) {
}
}
-void SoftAMR::onPortFlushCompleted(OMX_U32 /* portIndex */) {
+void SoftAMR::onPortFlushCompleted(OMX_U32 portIndex) {
+ ALOGV("onPortFlushCompleted portindex %d, resetting frame ", portIndex);
+ if (portIndex == 0) {
+ if (mMode == MODE_NARROW) {
+ Speech_Decode_Frame_reset(mState);
+ } else {
+ pvDecoder_AmrWb_Reset(mState, 0 /* reset_all */);
+ }
+ }
}
void SoftAMR::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 6afac74..449d195 100755
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -1188,7 +1188,10 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
BufferInfo *outputBufferInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outputBufferHeader = outputBufferInfo->mHeader;
- if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS &&
+ inputBufferHeader->nFilledLen == 0) {
+ mSawInputEOS = true;
+
inQueue.erase(inQueue.begin());
inputBufferInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inputBufferHeader);
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index 4dd340c..8eb8f6c 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -20,7 +20,9 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
#include <utils/threads.h>
+#include <utils/KeyedVector.h>
#include <utils/Vector.h>
namespace android {
@@ -54,10 +56,21 @@ private:
Vector<sp<AnotherPacketSource> > mSourceImpls;
+ Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
+ // Sync points used for seeking --- normally one for video track is used.
+ // If no video track is present, audio track will be used instead.
+ KeyedVector<int64_t, off64_t> *mSeekSyncPoints;
+
off64_t mOffset;
void init();
status_t feedMore();
+ status_t seek(int64_t seekTimeUs,
+ const MediaSource::ReadOptions::SeekMode& seekMode);
+ status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
+ status_t seekBeyond(int64_t seekTimeUs);
+
+ status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
};
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index c183208..fa1e03f 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -81,7 +81,7 @@ public:
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer);
+ buffer_id *buffer, OMX_U32 allottedSize);
virtual status_t useGraphicBuffer(
node_id node, OMX_U32 port_index,
@@ -111,7 +111,7 @@ public:
virtual status_t allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer);
+ buffer_id *buffer, OMX_U32 allottedSize);
virtual status_t freeBuffer(
node_id node, OMX_U32 port_index, buffer_id buffer);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index ad1e181..e067598 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -70,7 +70,7 @@ struct OMXNodeInstance {
status_t useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
- OMX::buffer_id *buffer);
+ OMX::buffer_id *buffer, OMX_U32 allottedSize);
status_t useGraphicBuffer(
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
@@ -98,7 +98,7 @@ struct OMXNodeInstance {
status_t allocateBufferWithBackup(
OMX_U32 portIndex, const sp<IMemory> &params,
- OMX::buffer_id *buffer);
+ OMX::buffer_id *buffer, OMX_U32 allottedSize);
status_t freeBuffer(OMX_U32 portIndex, OMX::buffer_id buffer);
@@ -151,12 +151,11 @@ private:
OMX::buffer_id mID;
};
Vector<ActiveBuffer> mActiveBuffers;
-#ifdef __LP64__
+ // for buffer ptr to buffer id translation
Mutex mBufferIDLock;
uint32_t mBufferIDCount;
KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
-#endif
// For debug support
char *mName;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index e8b2219..53423ec 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -54,10 +54,13 @@ struct ATSParser::Program : public RefBase {
bool parsePSISection(
unsigned pid, ABitReader *br, status_t *err);
+ // Pass to appropriate stream according to pid, and set event if it's a PES
+ // with a sync frame.
+ // Note that the method itself does not touch event.
bool parsePID(
unsigned pid, unsigned continuity_counter,
unsigned payload_unit_start_indicator,
- ABitReader *br, status_t *err);
+ ABitReader *br, status_t *err, SyncEvent *event);
void signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra);
@@ -118,10 +121,14 @@ struct ATSParser::Stream : public RefBase {
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
+ // Parse the payload and set event when PES with a sync frame is detected.
+ // This method knows when a PES starts; so record mPesStartOffset in that
+ // case.
status_t parse(
unsigned continuity_counter,
unsigned payload_unit_start_indicator,
- ABitReader *br);
+ ABitReader *br,
+ SyncEvent *event);
void signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra);
@@ -150,17 +157,24 @@ private:
bool mEOSReached;
uint64_t mPrevPTS;
+ off64_t mPesStartOffset;
ElementaryStreamQueue *mQueue;
- status_t flush();
- status_t parsePES(ABitReader *br);
-
+ // Flush accumulated payload if necessary --- i.e. at EOS or at the start of
+ // another payload. event is set if the flushed payload is PES with a sync
+ // frame.
+ status_t flush(SyncEvent *event);
+ // Strip and parse PES headers and pass remaining payload into onPayload
+ // with parsed metadata. event is set if the PES contains a sync frame.
+ status_t parsePES(ABitReader *br, SyncEvent *event);
+
+ // Feed the payload into mQueue and if a packet is identified, queue it
+ // into mSource. If the packet is a sync frame. set event with start offset
+ // and timestamp of the packet.
void onPayloadData(
unsigned PTS_DTS_flags, uint64_t PTS, uint64_t DTS,
- const uint8_t *data, size_t size);
-
- void extractAACFrames(const sp<ABuffer> &buffer);
+ const uint8_t *data, size_t size, SyncEvent *event);
DISALLOW_EVIL_CONSTRUCTORS(Stream);
};
@@ -190,6 +204,17 @@ private:
DISALLOW_EVIL_CONSTRUCTORS(PSISection);
};
+ATSParser::SyncEvent::SyncEvent(off64_t offset)
+ : mInit(false), mOffset(offset), mTimeUs(0) {}
+
+void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
+ int64_t timeUs) {
+ mInit = true;
+ mOffset = offset;
+ mMediaSource = source;
+ mTimeUs = timeUs;
+}
+
////////////////////////////////////////////////////////////////////////////////
ATSParser::Program::Program(
@@ -220,7 +245,7 @@ bool ATSParser::Program::parsePSISection(
bool ATSParser::Program::parsePID(
unsigned pid, unsigned continuity_counter,
unsigned payload_unit_start_indicator,
- ABitReader *br, status_t *err) {
+ ABitReader *br, status_t *err, SyncEvent *event) {
*err = OK;
ssize_t index = mStreams.indexOfKey(pid);
@@ -229,7 +254,7 @@ bool ATSParser::Program::parsePID(
}
*err = mStreams.editValueAt(index)->parse(
- continuity_counter, payload_unit_start_indicator, br);
+ continuity_counter, payload_unit_start_indicator, br, event);
return true;
}
@@ -628,7 +653,8 @@ ATSParser::Stream::~Stream() {
status_t ATSParser::Stream::parse(
unsigned continuity_counter,
- unsigned payload_unit_start_indicator, ABitReader *br) {
+ unsigned payload_unit_start_indicator, ABitReader *br,
+ SyncEvent *event) {
if (mQueue == NULL) {
return OK;
}
@@ -659,12 +685,13 @@ status_t ATSParser::Stream::parse(
mExpectedContinuityCounter = (continuity_counter + 1) & 0x0f;
if (payload_unit_start_indicator) {
+ off64_t offset = (event != NULL) ? event->getOffset() : 0;
if (mPayloadStarted) {
// Otherwise we run the danger of receiving the trailing bytes
// of a PES packet that we never saw the start of and assuming
// we have a a complete PES packet.
- status_t err = flush();
+ status_t err = flush(event);
if (err != OK) {
return err;
@@ -672,6 +699,7 @@ status_t ATSParser::Stream::parse(
}
mPayloadStarted = true;
+ mPesStartOffset = offset;
}
if (!mPayloadStarted) {
@@ -785,10 +813,10 @@ void ATSParser::Stream::signalEOS(status_t finalResult) {
mSource->signalEOS(finalResult);
}
mEOSReached = true;
- flush();
+ flush(NULL);
}
-status_t ATSParser::Stream::parsePES(ABitReader *br) {
+status_t ATSParser::Stream::parsePES(ABitReader *br, SyncEvent *event) {
unsigned packet_startcode_prefix = br->getBits(24);
ALOGV("packet_startcode_prefix = 0x%08x", packet_startcode_prefix);
@@ -973,13 +1001,13 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
}
onPayloadData(
- PTS_DTS_flags, PTS, DTS, br->data(), dataLength);
+ PTS_DTS_flags, PTS, DTS, br->data(), dataLength, event);
br->skipBits(dataLength * 8);
} else {
onPayloadData(
PTS_DTS_flags, PTS, DTS,
- br->data(), br->numBitsLeft() / 8);
+ br->data(), br->numBitsLeft() / 8, event);
size_t payloadSizeBits = br->numBitsLeft();
if (payloadSizeBits % 8 != 0u) {
@@ -1003,8 +1031,8 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
return OK;
}
-status_t ATSParser::Stream::flush() {
- if (mBuffer->size() == 0) {
+status_t ATSParser::Stream::flush(SyncEvent *event) {
+ if (mBuffer == NULL || mBuffer->size() == 0) {
return OK;
}
@@ -1012,7 +1040,7 @@ status_t ATSParser::Stream::flush() {
ABitReader br(mBuffer->data(), mBuffer->size());
- status_t err = parsePES(&br);
+ status_t err = parsePES(&br, event);
mBuffer->setRange(0, 0);
@@ -1021,7 +1049,7 @@ status_t ATSParser::Stream::flush() {
void ATSParser::Stream::onPayloadData(
unsigned PTS_DTS_flags, uint64_t PTS, uint64_t /* DTS */,
- const uint8_t *data, size_t size) {
+ const uint8_t *data, size_t size, SyncEvent *event) {
#if 0
ALOGI("payload streamType 0x%02x, PTS = 0x%016llx, dPTS = %lld",
mStreamType,
@@ -1048,6 +1076,7 @@ void ATSParser::Stream::onPayloadData(
}
sp<ABuffer> accessUnit;
+ bool found = false;
while ((accessUnit = mQueue->dequeueAccessUnit()) != NULL) {
if (mSource == NULL) {
sp<MetaData> meta = mQueue->getFormat();
@@ -1075,6 +1104,17 @@ void ATSParser::Stream::onPayloadData(
}
mSource->queueAccessUnit(accessUnit);
}
+
+ if ((event != NULL) && !found && mQueue->getFormat() != NULL) {
+ int32_t sync = 0;
+ if (accessUnit->meta()->findInt32("isSync", &sync) && sync) {
+ int64_t timeUs;
+ if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
+ found = true;
+ event->init(mPesStartOffset, mSource, timeUs);
+ }
+ }
+ }
}
}
@@ -1127,14 +1167,15 @@ ATSParser::ATSParser(uint32_t flags)
ATSParser::~ATSParser() {
}
-status_t ATSParser::feedTSPacket(const void *data, size_t size) {
+status_t ATSParser::feedTSPacket(const void *data, size_t size,
+ SyncEvent *event) {
if (size != kTSPacketSize) {
ALOGE("Wrong TS packet size");
return BAD_VALUE;
}
ABitReader br((const uint8_t *)data, kTSPacketSize);
- return parseTS(&br);
+ return parseTS(&br, event);
}
void ATSParser::signalDiscontinuity(
@@ -1262,7 +1303,8 @@ void ATSParser::parseProgramAssociationTable(ABitReader *br) {
status_t ATSParser::parsePID(
ABitReader *br, unsigned PID,
unsigned continuity_counter,
- unsigned payload_unit_start_indicator) {
+ unsigned payload_unit_start_indicator,
+ SyncEvent *event) {
ssize_t sectionIndex = mPSISections.indexOfKey(PID);
if (sectionIndex >= 0) {
@@ -1334,7 +1376,7 @@ status_t ATSParser::parsePID(
status_t err;
if (mPrograms.editItemAt(i)->parsePID(
PID, continuity_counter, payload_unit_start_indicator,
- br, &err)) {
+ br, &err, event)) {
if (err != OK) {
return err;
}
@@ -1405,7 +1447,7 @@ status_t ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
return OK;
}
-status_t ATSParser::parseTS(ABitReader *br) {
+status_t ATSParser::parseTS(ABitReader *br, SyncEvent *event) {
ALOGV("---");
unsigned sync_byte = br->getBits(8);
@@ -1444,8 +1486,8 @@ status_t ATSParser::parseTS(ABitReader *br) {
}
if (err == OK) {
if (adaptation_field_control == 1 || adaptation_field_control == 3) {
- err = parsePID(
- br, PID, continuity_counter, payload_unit_start_indicator);
+ err = parsePID(br, PID, continuity_counter,
+ payload_unit_start_indicator, event);
}
}
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 4def333..430a8d5 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -22,6 +22,7 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaSource.h>
#include <utils/KeyedVector.h>
#include <utils/Vector.h>
#include <utils/RefBase.h>
@@ -30,7 +31,6 @@ namespace android {
class ABitReader;
struct ABuffer;
-struct MediaSource;
struct ATSParser : public RefBase {
enum DiscontinuityType {
@@ -62,9 +62,43 @@ struct ATSParser : public RefBase {
ALIGNED_VIDEO_DATA = 2,
};
+ // Event is used to signal sync point event at feedTSPacket().
+ struct SyncEvent {
+ SyncEvent(off64_t offset);
+
+ void init(off64_t offset, const sp<MediaSource> &source,
+ int64_t timeUs);
+
+ bool isInit() { return mInit; }
+ off64_t getOffset() { return mOffset; }
+ const sp<MediaSource> &getMediaSource() { return mMediaSource; }
+ int64_t getTimeUs() { return mTimeUs; }
+
+ private:
+ bool mInit;
+ /*
+ * mInit == false: the current offset
+ * mInit == true: the start offset of sync payload
+ */
+ off64_t mOffset;
+ /* The media source object for this event. */
+ sp<MediaSource> mMediaSource;
+ /* The timestamp of the sync frame. */
+ int64_t mTimeUs;
+ };
+
ATSParser(uint32_t flags = 0);
- status_t feedTSPacket(const void *data, size_t size);
+ // Feed a TS packet into the parser. uninitialized event with the start
+ // offset of this TS packet goes in, and if the parser detects PES with
+ // a sync frame, the event will be initiailzed with the start offset of the
+ // PES. Note that the offset of the event can be different from what we fed,
+ // as a PES may consist of multiple TS packets.
+ //
+ // Even in the case feedTSPacket() returns non-OK value, event still may be
+ // initialized if the parsing failed after the detection.
+ status_t feedTSPacket(
+ const void *data, size_t size, SyncEvent *event = NULL);
void signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra);
@@ -126,15 +160,25 @@ private:
void parseProgramAssociationTable(ABitReader *br);
void parseProgramMap(ABitReader *br);
- void parsePES(ABitReader *br);
-
+ // Parse PES packet where br is pointing to. If the PES contains a sync
+ // frame, set event with the time and the start offset of this PES.
+ // Note that the method itself does not touch event.
+ void parsePES(ABitReader *br, SyncEvent *event);
+
+ // Strip remaining packet headers and pass to appropriate program/stream
+ // to parse the payload. If the payload turns out to be PES and contains
+ // a sync frame, event shall be set with the time and start offset of the
+ // PES.
+ // Note that the method itself does not touch event.
status_t parsePID(
ABitReader *br, unsigned PID,
unsigned continuity_counter,
- unsigned payload_unit_start_indicator);
+ unsigned payload_unit_start_indicator,
+ SyncEvent *event);
status_t parseAdaptationField(ABitReader *br, unsigned PID);
- status_t parseTS(ABitReader *br);
+ // see feedTSPacket().
+ status_t parseTS(ABitReader *br, SyncEvent *event);
void updatePCR(unsigned PID, uint64_t PCR, size_t byteOffsetFromStart);
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index f5c33cf..aae3e9f 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -16,17 +16,22 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MPEG2TSExtractor"
+
+#include <inttypes.h>
#include <utils/Log.h>
#include "include/MPEG2TSExtractor.h"
#include "include/NuCachedSource2.h"
+#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <media/IStreamSource.h>
#include <utils/String8.h>
#include "AnotherPacketSource.h"
@@ -40,7 +45,7 @@ struct MPEG2TSSource : public MediaSource {
MPEG2TSSource(
const sp<MPEG2TSExtractor> &extractor,
const sp<AnotherPacketSource> &impl,
- bool seekable);
+ bool doesSeek);
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
@@ -54,8 +59,8 @@ private:
sp<AnotherPacketSource> mImpl;
// If there are both audio and video streams, only the video stream
- // will be seekable, otherwise the single stream will be seekable.
- bool mSeekable;
+ // will signal seek on the extractor; otherwise the single stream will seek.
+ bool mDoesSeek;
DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSSource);
};
@@ -63,10 +68,10 @@ private:
MPEG2TSSource::MPEG2TSSource(
const sp<MPEG2TSExtractor> &extractor,
const sp<AnotherPacketSource> &impl,
- bool seekable)
+ bool doesSeek)
: mExtractor(extractor),
mImpl(impl),
- mSeekable(seekable) {
+ mDoesSeek(doesSeek) {
}
status_t MPEG2TSSource::start(MetaData *params) {
@@ -85,27 +90,18 @@ status_t MPEG2TSSource::read(
MediaBuffer **out, const ReadOptions *options) {
*out = NULL;
- status_t finalResult;
- while (!mImpl->hasBufferAvailable(&finalResult)) {
- if (finalResult != OK) {
- return ERROR_END_OF_STREAM;
- }
-
- status_t err = mExtractor->feedMore();
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode seekMode;
+ if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
+ // seek is needed
+ status_t err = mExtractor->seek(seekTimeUs, seekMode);
if (err != OK) {
- mImpl->signalEOS(err);
+ return err;
}
}
- int64_t seekTimeUs;
- ReadOptions::SeekMode seekMode;
- if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
- // A seek was requested, but we don't actually support seeking and so can only "seek" to
- // the current position
- int64_t nextBufTimeUs;
- if (mImpl->nextBufferTime(&nextBufTimeUs) != OK || seekTimeUs != nextBufTimeUs) {
- return ERROR_UNSUPPORTED;
- }
+ if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
+ return ERROR_END_OF_STREAM;
}
return mImpl->read(out, options);
@@ -129,23 +125,10 @@ sp<MediaSource> MPEG2TSExtractor::getTrack(size_t index) {
return NULL;
}
- bool seekable = true;
- if (mSourceImpls.size() > 1) {
- if (mSourceImpls.size() != 2u) {
- ALOGE("Wrong size");
- return NULL;
- }
-
- sp<MetaData> meta = mSourceImpls.editItemAt(index)->getFormat();
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
-
- if (!strncasecmp("audio/", mime, 6)) {
- seekable = false;
- }
- }
-
- return new MPEG2TSSource(this, mSourceImpls.editItemAt(index), seekable);
+ // The seek reference track (video if present; audio otherwise) performs
+ // seek requests, while other tracks ignore requests.
+ return new MPEG2TSSource(this, mSourceImpls.editItemAt(index),
+ (mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
}
sp<MetaData> MPEG2TSExtractor::getTrackMetaData(
@@ -164,7 +147,7 @@ sp<MetaData> MPEG2TSExtractor::getMetaData() {
void MPEG2TSExtractor::init() {
bool haveAudio = false;
bool haveVideo = false;
- int numPacketsParsed = 0;
+ int64_t startTime = ALooper::GetNowUs();
while (feedMore() == OK) {
if (haveAudio && haveVideo) {
@@ -178,6 +161,8 @@ void MPEG2TSExtractor::init() {
if (impl != NULL) {
haveVideo = true;
mSourceImpls.push(impl);
+ mSyncPoints.push();
+ mSeekSyncPoints = &mSyncPoints.editTop();
}
}
@@ -189,15 +174,75 @@ void MPEG2TSExtractor::init() {
if (impl != NULL) {
haveAudio = true;
mSourceImpls.push(impl);
+ mSyncPoints.push();
+ if (!haveVideo) {
+ mSeekSyncPoints = &mSyncPoints.editTop();
+ }
}
}
- if (++numPacketsParsed > 10000) {
+ // Wait only for 2 seconds to detect audio/video streams.
+ if (ALooper::GetNowUs() - startTime > 2000000ll) {
break;
}
}
- ALOGI("haveAudio=%d, haveVideo=%d", haveAudio, haveVideo);
+ off64_t size;
+ if (mDataSource->getSize(&size) == OK && (haveAudio || haveVideo)) {
+ sp<AnotherPacketSource> impl = haveVideo
+ ? (AnotherPacketSource *)mParser->getSource(
+ ATSParser::VIDEO).get()
+ : (AnotherPacketSource *)mParser->getSource(
+ ATSParser::AUDIO).get();
+ size_t prevSyncSize = 1;
+ int64_t durationUs = -1;
+ List<int64_t> durations;
+ // Estimate duration --- stabilize until you get <500ms deviation.
+ while (feedMore() == OK
+ && ALooper::GetNowUs() - startTime <= 2000000ll) {
+ if (mSeekSyncPoints->size() > prevSyncSize) {
+ prevSyncSize = mSeekSyncPoints->size();
+ int64_t diffUs = mSeekSyncPoints->keyAt(prevSyncSize - 1)
+ - mSeekSyncPoints->keyAt(0);
+ off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
+ - mSeekSyncPoints->valueAt(0);
+ durationUs = size * diffUs / diffOffset;
+ durations.push_back(durationUs);
+ if (durations.size() > 5) {
+ durations.erase(durations.begin());
+ int64_t min = *durations.begin();
+ int64_t max = *durations.begin();
+ for (List<int64_t>::iterator i = durations.begin();
+ i != durations.end();
+ ++i) {
+ if (min > *i) {
+ min = *i;
+ }
+ if (max < *i) {
+ max = *i;
+ }
+ }
+ if (max - min < 500 * 1000) {
+ break;
+ }
+ }
+ }
+ }
+ status_t err;
+ int64_t bufferedDurationUs;
+ bufferedDurationUs = impl->getBufferedDurationUs(&err);
+ if (err == ERROR_END_OF_STREAM) {
+ durationUs = bufferedDurationUs;
+ }
+ if (durationUs > 0) {
+ const sp<MetaData> meta = impl->getFormat();
+ meta->setInt64(kKeyDuration, durationUs);
+ impl->setFormat(meta);
+ }
+ }
+
+ ALOGI("haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
+ haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
}
status_t MPEG2TSExtractor::feedMore() {
@@ -213,12 +258,195 @@ status_t MPEG2TSExtractor::feedMore() {
return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
}
+ ATSParser::SyncEvent event(mOffset);
mOffset += n;
- return mParser->feedTSPacket(packet, kTSPacketSize);
+ status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
+ if (event.isInit()) {
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ if (mSourceImpls[i].get() == event.getMediaSource().get()) {
+ mSyncPoints.editItemAt(i).add(
+ event.getTimeUs(), event.getOffset());
+ break;
+ }
+ }
+ }
+ return err;
}
uint32_t MPEG2TSExtractor::flags() const {
- return CAN_PAUSE;
+ return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
+}
+
+status_t MPEG2TSExtractor::seek(int64_t seekTimeUs,
+ const MediaSource::ReadOptions::SeekMode &seekMode) {
+ if (mSeekSyncPoints == NULL || mSeekSyncPoints->isEmpty()) {
+ ALOGW("No sync point to seek to.");
+ // ... and therefore we have nothing useful to do here.
+ return OK;
+ }
+
+ // Determine whether we're seeking beyond the known area.
+ bool shouldSeekBeyond =
+ (seekTimeUs > mSeekSyncPoints->keyAt(mSeekSyncPoints->size() - 1));
+
+ // Determine the sync point to seek.
+ size_t index = 0;
+ for (; index < mSeekSyncPoints->size(); ++index) {
+ int64_t timeUs = mSeekSyncPoints->keyAt(index);
+ if (timeUs > seekTimeUs) {
+ break;
+ }
+ }
+
+ switch (seekMode) {
+ case MediaSource::ReadOptions::SEEK_NEXT_SYNC:
+ if (index == mSeekSyncPoints->size()) {
+ ALOGW("Next sync not found; starting from the latest sync.");
+ --index;
+ }
+ break;
+ case MediaSource::ReadOptions::SEEK_CLOSEST_SYNC:
+ case MediaSource::ReadOptions::SEEK_CLOSEST:
+ ALOGW("seekMode not supported: %d; falling back to PREVIOUS_SYNC",
+ seekMode);
+ // fall-through
+ case MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC:
+ if (index == 0) {
+ ALOGW("Previous sync not found; starting from the earliest "
+ "sync.");
+ } else {
+ --index;
+ }
+ break;
+ }
+ if (!shouldSeekBeyond || mOffset <= mSeekSyncPoints->valueAt(index)) {
+ int64_t actualSeekTimeUs = mSeekSyncPoints->keyAt(index);
+ mOffset = mSeekSyncPoints->valueAt(index);
+ status_t err = queueDiscontinuityForSeek(actualSeekTimeUs);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (shouldSeekBeyond) {
+ status_t err = seekBeyond(seekTimeUs);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ // Fast-forward to sync frame.
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls[i];
+ status_t err;
+ feedUntilBufferAvailable(impl);
+ while (impl->hasBufferAvailable(&err)) {
+ sp<AMessage> meta = impl->getMetaAfterLastDequeued(0);
+ sp<ABuffer> buffer;
+ if (meta == NULL) {
+ return UNKNOWN_ERROR;
+ }
+ int32_t sync;
+ if (meta->findInt32("isSync", &sync) && sync) {
+ break;
+ }
+ err = impl->dequeueAccessUnit(&buffer);
+ if (err != OK) {
+ return err;
+ }
+ feedUntilBufferAvailable(impl);
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG2TSExtractor::queueDiscontinuityForSeek(int64_t actualSeekTimeUs) {
+ // Signal discontinuity
+ sp<AMessage> extra(new AMessage);
+ extra->setInt64(IStreamListener::kKeyMediaTimeUs, actualSeekTimeUs);
+ mParser->signalDiscontinuity(ATSParser::DISCONTINUITY_TIME, extra);
+
+ // After discontinuity, impl should only have discontinuities
+ // with the last being what we queued. Dequeue them all here.
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
+ sp<ABuffer> buffer;
+ status_t err;
+ while (impl->hasBufferAvailable(&err)) {
+ if (err != OK) {
+ return err;
+ }
+ err = impl->dequeueAccessUnit(&buffer);
+ // If the source contains anything but discontinuity, that's
+ // a programming mistake.
+ CHECK(err == INFO_DISCONTINUITY);
+ }
+ }
+
+ // Feed until we have a buffer for each source.
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
+ sp<ABuffer> buffer;
+ status_t err = feedUntilBufferAvailable(impl);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG2TSExtractor::seekBeyond(int64_t seekTimeUs) {
+ // If we're seeking beyond where we know --- read until we reach there.
+ size_t syncPointsSize = mSeekSyncPoints->size();
+
+ while (seekTimeUs > mSeekSyncPoints->keyAt(
+ mSeekSyncPoints->size() - 1)) {
+ status_t err;
+ if (syncPointsSize < mSeekSyncPoints->size()) {
+ syncPointsSize = mSeekSyncPoints->size();
+ int64_t syncTimeUs = mSeekSyncPoints->keyAt(syncPointsSize - 1);
+ // Dequeue buffers before sync point in order to avoid too much
+ // cache building up.
+ sp<ABuffer> buffer;
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls[i];
+ int64_t timeUs;
+ while ((err = impl->nextBufferTime(&timeUs)) == OK) {
+ if (timeUs < syncTimeUs) {
+ impl->dequeueAccessUnit(&buffer);
+ } else {
+ break;
+ }
+ }
+ if (err != OK && err != -EWOULDBLOCK) {
+ return err;
+ }
+ }
+ }
+ if (feedMore() != OK) {
+ return ERROR_END_OF_STREAM;
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG2TSExtractor::feedUntilBufferAvailable(
+ const sp<AnotherPacketSource> &impl) {
+ status_t finalResult;
+ while (!impl->hasBufferAvailable(&finalResult)) {
+ if (finalResult != OK) {
+ return finalResult;
+ }
+
+ status_t err = feedMore();
+ if (err != OK) {
+ impl->signalEOS(err);
+ }
+ }
+ return OK;
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index a1ceb2e..d46bf9d 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -352,9 +352,9 @@ status_t OMX::configureVideoTunnelMode(
status_t OMX::useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) {
+ buffer_id *buffer, OMX_U32 allottedSize) {
return findInstance(node)->useBuffer(
- port_index, params, buffer);
+ port_index, params, buffer, allottedSize);
}
status_t OMX::useGraphicBuffer(
@@ -405,9 +405,9 @@ status_t OMX::allocateBuffer(
status_t OMX::allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
- buffer_id *buffer) {
+ buffer_id *buffer, OMX_U32 allottedSize) {
return findInstance(node)->allocateBufferWithBackup(
- port_index, params, buffer);
+ port_index, params, buffer, allottedSize);
}
status_t OMX::freeBuffer(node_id node, OMX_U32 port_index, buffer_id buffer) {
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 91cee73..f5f0f4f 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -169,10 +169,8 @@ OMXNodeInstance::OMXNodeInstance(
mNodeID(0),
mHandle(NULL),
mObserver(observer),
- mDying(false)
-#ifdef __LP64__
- , mBufferIDCount(0)
-#endif
+ mDying(false),
+ mBufferIDCount(0)
{
mName = ADebug::GetDebugName(name);
DEBUG = ADebug::GetDebugLevelFromProperty(name, "debug.stagefright.omx-debug");
@@ -622,8 +620,11 @@ status_t OMXNodeInstance::configureVideoTunnelMode(
status_t OMXNodeInstance::useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
- OMX::buffer_id *buffer) {
+ OMX::buffer_id *buffer, OMX_U32 allottedSize) {
Mutex::Autolock autoLock(mLock);
+ if (allottedSize > params->size()) {
+ return BAD_VALUE;
+ }
BufferMeta *buffer_meta = new BufferMeta(params);
@@ -631,10 +632,11 @@ status_t OMXNodeInstance::useBuffer(
OMX_ERRORTYPE err = OMX_UseBuffer(
mHandle, &header, portIndex, buffer_meta,
- params->size(), static_cast<OMX_U8 *>(params->pointer()));
+ allottedSize, static_cast<OMX_U8 *>(params->pointer()));
if (err != OMX_ErrorNone) {
- CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(portIndex, params->size(), params->pointer()));
+ CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(
+ portIndex, (size_t)allottedSize, params->pointer()));
delete buffer_meta;
buffer_meta = NULL;
@@ -656,7 +658,7 @@ status_t OMXNodeInstance::useBuffer(
}
CLOG_BUFFER(useBuffer, NEW_BUFFER_FMT(
- *buffer, portIndex, "%zu@%p", params->size(), params->pointer()));
+ *buffer, portIndex, "%u@%p", allottedSize, params->pointer()));
return OK;
}
@@ -868,17 +870,9 @@ status_t OMXNodeInstance::createPersistentInputSurface(
consumer->setConsumerName(name);
consumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
- status_t err = consumer->setMaxAcquiredBufferCount(
- BufferQueue::MAX_MAX_ACQUIRED_BUFFERS);
- if (err != NO_ERROR) {
- ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
- BufferQueue::MAX_MAX_ACQUIRED_BUFFERS, err);
- return err;
- }
-
sp<BufferQueue::ProxyConsumerListener> proxy =
new BufferQueue::ProxyConsumerListener(NULL);
- err = consumer->consumerConnect(proxy, false);
+ status_t err = consumer->consumerConnect(proxy, false);
if (err != NO_ERROR) {
ALOGE("Error connecting to BufferQueue: %s (%d)",
strerror(-err), err);
@@ -949,19 +943,21 @@ status_t OMXNodeInstance::allocateBuffer(
status_t OMXNodeInstance::allocateBufferWithBackup(
OMX_U32 portIndex, const sp<IMemory> &params,
- OMX::buffer_id *buffer) {
+ OMX::buffer_id *buffer, OMX_U32 allottedSize) {
Mutex::Autolock autoLock(mLock);
+ if (allottedSize > params->size()) {
+ return BAD_VALUE;
+ }
BufferMeta *buffer_meta = new BufferMeta(params, true);
OMX_BUFFERHEADERTYPE *header;
OMX_ERRORTYPE err = OMX_AllocateBuffer(
- mHandle, &header, portIndex, buffer_meta, params->size());
-
+ mHandle, &header, portIndex, buffer_meta, allottedSize);
if (err != OMX_ErrorNone) {
CLOG_ERROR(allocateBufferWithBackup, err,
- SIMPLE_BUFFER(portIndex, params->size(), params->pointer()));
+ SIMPLE_BUFFER(portIndex, (size_t)allottedSize, params->pointer()));
delete buffer_meta;
buffer_meta = NULL;
@@ -981,8 +977,8 @@ status_t OMXNodeInstance::allocateBufferWithBackup(
bufferSource->addCodecBuffer(header);
}
- CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %p",
- params->size(), params->pointer(), header->pBuffer));
+ CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%u@%p :> %p",
+ allottedSize, params->pointer(), header->pBuffer));
return OK;
}
@@ -1037,6 +1033,12 @@ status_t OMXNodeInstance::emptyBuffer(
Mutex::Autolock autoLock(mLock);
OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ // rangeLength and rangeOffset must be a subset of the allocated data in the buffer.
+ // corner case: we permit rangeOffset == end-of-buffer with rangeLength == 0.
+ if (rangeOffset > header->nAllocLen
+ || rangeLength > header->nAllocLen - rangeOffset) {
+ return BAD_VALUE;
+ }
header->nFilledLen = rangeLength;
header->nOffset = rangeOffset;
@@ -1422,8 +1424,6 @@ void OMXNodeInstance::freeActiveBuffers() {
}
}
-#ifdef __LP64__
-
OMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
if (bufferHeader == NULL) {
return 0;
@@ -1466,23 +1466,4 @@ void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer) {
mBufferIDToBufferHeader.removeItem(buffer);
}
-#else
-
-OMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
- return (OMX::buffer_id)bufferHeader;
-}
-
-OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(OMX::buffer_id buffer) {
- return (OMX_BUFFERHEADERTYPE *)buffer;
-}
-
-OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
- return (OMX::buffer_id)bufferHeader;
-}
-
-void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer __unused) {
-}
-
-#endif
-
} // namespace android
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 67ff145..294b2ed 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -193,7 +193,7 @@ status_t Harness::allocatePortBuffers(
CHECK(buffer.mMemory != NULL);
err = mOMX->allocateBufferWithBackup(
- node, portIndex, buffer.mMemory, &buffer.mID);
+ node, portIndex, buffer.mMemory, &buffer.mID, buffer.mMemory->size());
EXPECT_SUCCESS(err, "allocateBuffer");
buffers->push(buffer);
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index ba17e90..e64a7a1 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1737,7 +1737,7 @@ private:
}
if (!mAllTracksHaveTime) {
- bool allTracksHaveTime = true;
+ bool allTracksHaveTime = (mTracks.size() > 0);
for (size_t i = 0; i < mTracks.size(); ++i) {
TrackInfo *track = &mTracks.editItemAt(i);
if (track->mNTPAnchorUs < 0) {
diff --git a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
index cee62a3..ab547be 100644
--- a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
+++ b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
@@ -150,7 +150,11 @@ TEST_F(MediaCodecListOverridesTest, exportTestResultsToXML) {
fclose(f);
free(buf);
- EXPECT_TRUE(overrides == kTestOverridesStr);
+ AString expected;
+ expected.append(getProfilingVersionString());
+ expected.append("\n");
+ expected.append(kTestOverridesStr);
+ EXPECT_TRUE(overrides == expected);
remove(fileName);
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 93b1642..52fce34 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -181,7 +181,8 @@ AudioFlinger::AudioFlinger()
mIsLowRamDevice(true),
mIsDeviceTypeKnown(false),
mGlobalEffectEnableTime(0),
- mPrimaryOutputSampleRate(0)
+ mPrimaryOutputSampleRate(0),
+ mSystemReady(false)
{
getpid_cached = getpid();
char value[PROPERTY_VALUE_MAX];
@@ -1722,6 +1723,26 @@ audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId
return (audio_hw_sync_t)value;
}
+status_t AudioFlinger::systemReady()
+{
+ Mutex::Autolock _l(mLock);
+ ALOGI("%s", __FUNCTION__);
+ if (mSystemReady) {
+ ALOGW("%s called twice", __FUNCTION__);
+ return NO_ERROR;
+ }
+ mSystemReady = true;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ ThreadBase *thread = (ThreadBase *)mPlaybackThreads.valueAt(i).get();
+ thread->systemReady();
+ }
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ ThreadBase *thread = (ThreadBase *)mRecordThreads.valueAt(i).get();
+ thread->systemReady();
+ }
+ return NO_ERROR;
+}
+
// setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
void AudioFlinger::setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId)
{
@@ -1794,15 +1815,15 @@ sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_
PlaybackThread *thread;
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- thread = new OffloadThread(this, outputStream, *output, devices);
+ thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(config->format)
|| !isValidPcmSinkChannelMask(config->channel_mask)) {
- thread = new DirectOutputThread(this, outputStream, *output, devices);
+ thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
} else {
- thread = new MixerThread(this, outputStream, *output, devices);
+ thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
}
mPlaybackThreads.add(*output, thread);
@@ -1873,7 +1894,7 @@ audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1,
}
audio_io_handle_t id = nextUniqueId();
- DuplicatingThread *thread = new DuplicatingThread(this, thread1, id);
+ DuplicatingThread *thread = new DuplicatingThread(this, thread1, id, mSystemReady);
thread->addOutputTrack(thread2);
mPlaybackThreads.add(id, thread);
// notify client processes of the new output creation
@@ -2120,7 +2141,8 @@ sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t m
inputStream,
*input,
primaryOutputDevice_l(),
- devices
+ devices,
+ mSystemReady
#ifdef TEE_SINK
, teeSink
#endif
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 51b2610..d087ced 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -257,6 +257,9 @@ public:
/* Get the HW synchronization source used for an audio session */
virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+ /* Indicate JAVA services are ready (scheduling, power management ...) */
+ virtual status_t systemReady();
+
virtual status_t onTransact(
uint32_t code,
const Parcel& data,
@@ -356,6 +359,15 @@ private:
// check that channelMask is the "canonical" one we expect for the channelCount.
return channelMask == audio_channel_out_mask_from_count(channelCount);
}
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ if (kEnableExtendedChannels) {
+ const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ if (channelCount >= FCC_2 // mono is not supported at this time
+ && channelCount <= AudioMixer::MAX_NUM_CHANNELS) {
+ return true;
+ }
+ }
+ return false;
default:
return false;
}
@@ -752,6 +764,7 @@ private:
uint32_t mPrimaryOutputSampleRate; // sample rate of the primary output, or zero if none
// protected by mHardwareLock
+ bool mSystemReady;
};
#undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 586c737..01efc53 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -66,6 +66,13 @@
#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
#endif
+// TODO: Move these macro/inlines to a header file.
+template <typename T>
+static inline
+T max(const T& x, const T& y) {
+ return x > y ? x : y;
+}
+
// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
// original code will be used for stereo sinks, the new mixer for multichannel.
static const bool kUseNewMixer = true;
@@ -499,41 +506,99 @@ void AudioMixer::disable(int name)
static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
+ // check floating point volume to see if it is identical to the previously
+ // set volume.
+ // We do not use a tolerance here (and reject changes too small)
+ // as it may be confusing to use a different value than the one set.
+ // If the resulting volume is too small to ramp, it is a direct set of the volume.
if (newVolume == *pSetVolume) {
return false;
}
- /* set the floating point volume variables */
- if (ramp != 0) {
- *pVolumeInc = (newVolume - *pSetVolume) / ramp;
- *pPrevVolume = *pSetVolume;
+ if (newVolume < 0) {
+ newVolume = 0; // should not have negative volumes
} else {
- *pVolumeInc = 0;
- *pPrevVolume = newVolume;
+ switch (fpclassify(newVolume)) {
+ case FP_SUBNORMAL:
+ case FP_NAN:
+ newVolume = 0;
+ break;
+ case FP_ZERO:
+ break; // zero volume is fine
+ case FP_INFINITE:
+ // Infinite volume could be handled consistently since
+ // floating point math saturates at infinities,
+ // but we limit volume to unity gain float.
+ // ramp = 0; break;
+ //
+ newVolume = AudioMixer::UNITY_GAIN_FLOAT;
+ break;
+ case FP_NORMAL:
+ default:
+ // Floating point does not have problems with overflow wrap
+ // that integer has. However, we limit the volume to
+ // unity gain here.
+ // TODO: Revisit the volume limitation and perhaps parameterize.
+ if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
+ newVolume = AudioMixer::UNITY_GAIN_FLOAT;
+ }
+ break;
+ }
+ }
+
+ // set floating point volume ramp
+ if (ramp != 0) {
+ // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
+ // is no computational mismatch; hence equality is checked here.
+ ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
+ " prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
+ const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
+ const float maxv = max(newVolume, *pPrevVolume); // could be inf, cannot be nan, subnormal
+
+ if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
+ && maxv + inc != maxv) { // inc must make forward progress
+ *pVolumeInc = inc;
+ // ramp is set now.
+ // Note: if newVolume is 0, then near the end of the ramp,
+ // it may be possible that the ramped volume may be subnormal or
+ // temporarily negative by a small amount or subnormal due to floating
+ // point inaccuracies.
+ } else {
+ ramp = 0; // ramp not allowed
+ }
}
- *pSetVolume = newVolume;
- /* set the legacy integer volume variables */
- int32_t intVolume = newVolume * AudioMixer::UNITY_GAIN_INT;
- if (intVolume > AudioMixer::UNITY_GAIN_INT) {
- intVolume = AudioMixer::UNITY_GAIN_INT;
- } else if (intVolume < 0) {
- ALOGE("negative volume %.7g", newVolume);
- intVolume = 0; // should never happen, but for safety check.
+ // compute and check integer volume, no need to check negative values
+ // The integer volume is limited to "unity_gain" to avoid wrapping and other
+ // audio artifacts, so it never reaches the range limit of U4.28.
+ // We safely use signed 16 and 32 bit integers here.
+ const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
+ const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
+ AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
+
+ // set integer volume ramp
+ if (ramp != 0) {
+ // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
+ // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
+ // is no computational mismatch; hence equality is checked here.
+ ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
+ " prev:%d set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
+ const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
+
+ if (inc != 0) { // inc must make forward progress
+ *pIntVolumeInc = inc;
+ } else {
+ ramp = 0; // ramp not allowed
+ }
}
- if (intVolume == *pIntSetVolume) {
- *pIntVolumeInc = 0;
- /* TODO: integer/float workaround: ignore floating volume ramp */
+
+ // if no ramp, or ramp not allowed, then clear float and integer increments
+ if (ramp == 0) {
*pVolumeInc = 0;
*pPrevVolume = newVolume;
- return true;
- }
- if (ramp != 0) {
- *pIntVolumeInc = ((intVolume - *pIntSetVolume) << 16) / ramp;
- *pIntPrevVolume = (*pIntVolumeInc == 0 ? intVolume : *pIntSetVolume) << 16;
- } else {
*pIntVolumeInc = 0;
*pIntPrevVolume = intVolume << 16;
}
+ *pSetVolume = newVolume;
*pIntSetVolume = intVolume;
return true;
}
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 8bccb47..949c91d 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -335,13 +335,21 @@ status_t AudioFlinger::EffectModule::configure()
// TODO: handle configuration of effects replacing track process
channelMask = thread->channelMask();
+ mConfig.outputCfg.channels = channelMask;
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
} else {
mConfig.inputCfg.channels = channelMask;
+ // TODO: Update this logic when multichannel effects are implemented.
+ // For offloaded tracks consider mono output as stereo for proper effect initialization
+ if (channelMask == AUDIO_CHANNEL_OUT_MONO) {
+ mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ ALOGV("Overriding effect input and output as STEREO");
+ }
}
- mConfig.outputCfg.channels = channelMask;
+
mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
mConfig.inputCfg.samplingRate = thread->sampleRate();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 594ed05..7809eff 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -356,13 +356,47 @@ String8 devicesToString(audio_devices_t devices)
AUDIO_DEVICE_OUT_SPEAKER, "SPEAKER",
AUDIO_DEVICE_OUT_WIRED_HEADSET, "WIRED_HEADSET",
AUDIO_DEVICE_OUT_WIRED_HEADPHONE, "WIRED_HEADPHONE",
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "BLUETOOTH_SCO",
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET",
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, "BLUETOOTH_SCO_CARKIT",
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, "BLUETOOTH_A2DP",
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, "BLUETOOTH_A2DP_HEADPHONES",
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, "BLUETOOTH_A2DP_SPEAKER",
+ AUDIO_DEVICE_OUT_AUX_DIGITAL, "AUX_DIGITAL",
+ AUDIO_DEVICE_OUT_HDMI, "HDMI",
+ AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET",
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET",
+ AUDIO_DEVICE_OUT_USB_ACCESSORY, "USB_ACCESSORY",
+ AUDIO_DEVICE_OUT_USB_DEVICE, "USB_DEVICE",
AUDIO_DEVICE_OUT_TELEPHONY_TX, "TELEPHONY_TX",
+ AUDIO_DEVICE_OUT_LINE, "LINE",
+ AUDIO_DEVICE_OUT_HDMI_ARC, "HDMI_ARC",
+ AUDIO_DEVICE_OUT_SPDIF, "SPDIF",
+ AUDIO_DEVICE_OUT_FM, "FM",
+ AUDIO_DEVICE_OUT_AUX_LINE, "AUX_LINE",
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE, "SPEAKER_SAFE",
AUDIO_DEVICE_NONE, "NONE", // must be last
}, mappingsIn[] = {
+ AUDIO_DEVICE_IN_COMMUNICATION, "COMMUNICATION",
+ AUDIO_DEVICE_IN_AMBIENT, "AMBIENT",
AUDIO_DEVICE_IN_BUILTIN_MIC, "BUILTIN_MIC",
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET",
AUDIO_DEVICE_IN_WIRED_HEADSET, "WIRED_HEADSET",
+ AUDIO_DEVICE_IN_AUX_DIGITAL, "AUX_DIGITAL",
AUDIO_DEVICE_IN_VOICE_CALL, "VOICE_CALL",
+ AUDIO_DEVICE_IN_TELEPHONY_RX, "TELEPHONY_RX",
+ AUDIO_DEVICE_IN_BACK_MIC, "BACK_MIC",
AUDIO_DEVICE_IN_REMOTE_SUBMIX, "REMOTE_SUBMIX",
+ AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET",
+ AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET",
+ AUDIO_DEVICE_IN_USB_ACCESSORY, "USB_ACCESSORY",
+ AUDIO_DEVICE_IN_USB_DEVICE, "USB_DEVICE",
+ AUDIO_DEVICE_IN_FM_TUNER, "FM_TUNER",
+ AUDIO_DEVICE_IN_TV_TUNER, "TV_TUNER",
+ AUDIO_DEVICE_IN_LINE, "LINE",
+ AUDIO_DEVICE_IN_SPDIF, "SPDIF",
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP, "BLUETOOTH_A2DP",
+ AUDIO_DEVICE_IN_LOOPBACK, "LOOPBACK",
AUDIO_DEVICE_NONE, "NONE", // must be last
};
String8 result;
@@ -487,7 +521,7 @@ const char *sourceToString(audio_source_t source)
}
AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
- audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
+ audio_devices_t outDevice, audio_devices_t inDevice, type_t type, bool systemReady)
: Thread(false /*canCallJava*/),
mType(type),
mAudioFlinger(audioFlinger),
@@ -498,7 +532,8 @@ AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio
mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
// mName will be set by concrete (non-virtual) subclass
- mDeathRecipient(new PMDeathRecipient(this))
+ mDeathRecipient(new PMDeathRecipient(this)),
+ mSystemReady(systemReady)
{
memset(&mPatch, 0, sizeof(struct audio_patch));
}
@@ -567,6 +602,11 @@ status_t AudioFlinger::ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event)
{
status_t status = NO_ERROR;
+ if (event->mRequiresSystemReady && !mSystemReady) {
+ event->mWaitStatus = false;
+ mPendingConfigEvents.add(event);
+ return status;
+ }
mConfigEvents.add(event);
ALOGV("sendConfigEvent_l() num events %d event %d", mConfigEvents.size(), event->mType);
mWaitWorkCV.signal();
@@ -598,6 +638,12 @@ void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event)
sendConfigEvent_l(configEvent);
}
+void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio)
+{
+ Mutex::Autolock _l(mLock);
+ sendPrioConfigEvent_l(pid, tid, prio);
+}
+
// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
{
@@ -697,49 +743,62 @@ void AudioFlinger::ThreadBase::processConfigEvents_l()
String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
String8 s;
- if (output) {
- if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
- if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
- if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
- if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
- if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
- if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
- if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
- if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
- if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
- if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
- } else {
- if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
- if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
- if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
- if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
- if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
- if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
- if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
- if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
- if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
- if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
- if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
- if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
- if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
- if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
- if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
- }
- int len = s.length();
- if (s.length() > 2) {
- char *str = s.lockBuffer(len);
- s.unlockBuffer(len - 2);
- }
- return s;
+ const audio_channel_representation_t representation = audio_channel_mask_get_representation(mask);
+
+ switch (representation) {
+ case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
+ if (output) {
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+ if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
+ } else {
+ if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
+ if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
+ if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
+ if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
+ if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
+ if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
+ if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
+ if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+ if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
+ if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
+ if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
+ }
+ const int len = s.length();
+ if (len > 2) {
+ char *str = s.lockBuffer(len); // needed?
+ s.unlockBuffer(len - 2); // remove trailing ", "
+ }
+ return s;
+ }
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
+ return s;
+ default:
+ s.appendFormat("unknown mask, representation:%d bits:%#x",
+ representation, audio_channel_mask_get_bits(mask));
+ return s;
+ }
}
void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
@@ -880,8 +939,7 @@ void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids)
}
void AudioFlinger::ThreadBase::getPowerManager_l() {
-
- if (mPowerManager == 0) {
+ if (mSystemReady && mPowerManager == 0) {
// use checkService() to avoid blocking if power service is not up yet
sp<IBinder> binder =
defaultServiceManager()->checkService(String16("power"));
@@ -895,7 +953,6 @@ void AudioFlinger::ThreadBase::getPowerManager_l() {
}
void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
-
getPowerManager_l();
if (mWakeLockToken == NULL) {
ALOGE("no wake lock to update!");
@@ -1337,6 +1394,20 @@ void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *conf
AUDIO_PORT_CONFIG_FORMAT;
}
+void AudioFlinger::ThreadBase::systemReady()
+{
+ Mutex::Autolock _l(mLock);
+ if (mSystemReady) {
+ return;
+ }
+ mSystemReady = true;
+
+ for (size_t i = 0; i < mPendingConfigEvents.size(); i++) {
+ sendConfigEvent_l(mPendingConfigEvents.editItemAt(i));
+ }
+ mPendingConfigEvents.clear();
+}
+
// ----------------------------------------------------------------------------
// Playback
@@ -1346,8 +1417,9 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge
AudioStreamOut* output,
audio_io_handle_t id,
audio_devices_t device,
- type_t type)
- : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
+ type_t type,
+ bool systemReady)
+ : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type, systemReady),
mNormalFrameCount(0), mSinkBuffer(NULL),
mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
mMixerBuffer(NULL),
@@ -1366,7 +1438,7 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge
mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
- standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
+ mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
mBytesRemaining(0),
mCurrentWriteLength(0),
mUseAsyncWrite(false),
@@ -1572,10 +1644,12 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac
) &&
// PCM data
audio_is_linear_pcm(format) &&
- // identical channel mask to sink, or mono in and stereo sink
+ // TODO: extract as a data library function that checks that a computationally
+ // expensive downmixer is not required: isFastOutputChannelConversion()
(channelMask == mChannelMask ||
- (channelMask == AUDIO_CHANNEL_OUT_MONO &&
- mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) &&
+ mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
+ (channelMask == AUDIO_CHANNEL_OUT_MONO
+ /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
// hardware sample rate
(sampleRate == mSampleRate) &&
// normal mixer has an associated fast mixer
@@ -2418,9 +2492,9 @@ void AudioFlinger::PlaybackThread::threadLoop_exit()
/*
The derived values that are cached:
- mSinkBufferSize from frame count * frame size
- - activeSleepTime from activeSleepTimeUs()
- - idleSleepTime from idleSleepTimeUs()
- - standbyDelay from mActiveSleepTimeUs (DIRECT only)
+ - mActiveSleepTimeUs from activeSleepTimeUs()
+ - mIdleSleepTimeUs from idleSleepTimeUs()
+ - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only)
- maxPeriod from frame count and sample rate (MIXER only)
The parameters that affect these derived values are:
@@ -2437,8 +2511,8 @@ The parameters that affect these derived values are:
void AudioFlinger::PlaybackThread::cacheParameters_l()
{
mSinkBufferSize = mNormalFrameCount * mFrameSize;
- activeSleepTime = activeSleepTimeUs();
- idleSleepTime = idleSleepTimeUs();
+ mActiveSleepTimeUs = activeSleepTimeUs();
+ mIdleSleepTimeUs = idleSleepTimeUs();
}
void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
@@ -2605,7 +2679,7 @@ bool AudioFlinger::PlaybackThread::threadLoop()
{
Vector< sp<Track> > tracksToRemove;
- standbyTime = systemTime();
+ mStandbyTimeNs = systemTime();
// MIXER
nsecs_t lastWarning = 0;
@@ -2617,7 +2691,7 @@ bool AudioFlinger::PlaybackThread::threadLoop()
int lastGeneration = 0;
cacheParameters_l();
- sleepTime = idleSleepTime;
+ mSleepTimeUs = mIdleSleepTimeUs;
if (mType == MIXER) {
sleepTimeShift = 0;
@@ -2696,12 +2770,12 @@ bool AudioFlinger::PlaybackThread::threadLoop()
if (released) {
acquireWakeLock_l();
}
- standbyTime = systemTime() + standbyDelay;
- sleepTime = 0;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
+ mSleepTimeUs = 0;
continue;
}
- if ((!mActiveTracks.size() && systemTime() > standbyTime) ||
+ if ((!mActiveTracks.size() && systemTime() > mStandbyTimeNs) ||
isSuspended()) {
// put audio hardware into standby after short delay
if (shouldStandby_l()) {
@@ -2736,8 +2810,8 @@ bool AudioFlinger::PlaybackThread::threadLoop()
mBytesRemaining = 0;
checkSilentMode_l();
- standbyTime = systemTime() + standbyDelay;
- sleepTime = idleSleepTime;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
+ mSleepTimeUs = mIdleSleepTimeUs;
if (mType == MIXER) {
sleepTimeShift = 0;
}
@@ -2768,15 +2842,15 @@ bool AudioFlinger::PlaybackThread::threadLoop()
threadLoop_mix();
} else if ((mMixerStatus != MIXER_DRAIN_TRACK)
&& (mMixerStatus != MIXER_DRAIN_ALL)) {
- // threadLoop_sleepTime sets sleepTime to 0 if data
+ // threadLoop_sleepTime sets mSleepTimeUs to 0 if data
// must be written to HAL
threadLoop_sleepTime();
- if (sleepTime == 0) {
+ if (mSleepTimeUs == 0) {
mCurrentWriteLength = mSinkBufferSize;
}
}
// Either threadLoop_mix() or threadLoop_sleepTime() should have set
- // mMixerBuffer with data if mMixerBufferValid is true and sleepTime == 0.
+ // mMixerBuffer with data if mMixerBufferValid is true and mSleepTimeUs == 0.
// Merge mMixerBuffer data into mEffectBuffer (if any effects are valid)
// or mSinkBuffer (if there are no effects).
//
@@ -2784,7 +2858,7 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// support higher precision, this needs to move.
//
// mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
- // TODO use sleepTime == 0 as an additional condition.
+ // TODO use mSleepTimeUs == 0 as an additional condition.
if (mMixerBufferValid) {
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
@@ -2795,14 +2869,14 @@ bool AudioFlinger::PlaybackThread::threadLoop()
mBytesRemaining = mCurrentWriteLength;
if (isSuspended()) {
- sleepTime = suspendSleepTimeUs();
+ mSleepTimeUs = suspendSleepTimeUs();
// simulate write to HAL when suspended
mBytesWritten += mSinkBufferSize;
mBytesRemaining = 0;
}
// only process effects if we're going to write
- if (sleepTime == 0 && mType != OFFLOAD) {
+ if (mSleepTimeUs == 0 && mType != OFFLOAD) {
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
}
@@ -2821,7 +2895,7 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// Only if the Effects buffer is enabled and there is data in the
// Effects buffer (buffer valid), we need to
// copy into the sink buffer.
- // TODO use sleepTime == 0 as an additional condition.
+ // TODO use mSleepTimeUs == 0 as an additional condition.
if (mEffectBufferValid) {
//ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
@@ -2832,8 +2906,8 @@ bool AudioFlinger::PlaybackThread::threadLoop()
unlockEffectChains(effectChains);
if (!waitingAsyncCallback()) {
- // sleepTime == 0 means we must write to audio hardware
- if (sleepTime == 0) {
+ // mSleepTimeUs == 0 means we must write to audio hardware
+ if (mSleepTimeUs == 0) {
if (mBytesRemaining) {
ssize_t ret = threadLoop_write();
if (ret < 0) {
@@ -2863,7 +2937,7 @@ bool AudioFlinger::PlaybackThread::threadLoop()
} else {
ATRACE_BEGIN("sleep");
- usleep(sleepTime);
+ usleep(mSleepTimeUs);
ATRACE_END();
}
}
@@ -3118,8 +3192,8 @@ void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *
// ----------------------------------------------------------------------------
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, type_t type)
- : PlaybackThread(audioFlinger, output, id, device, type),
+ audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
+ : PlaybackThread(audioFlinger, output, id, device, type, systemReady),
// mAudioMixer below
// mFastMixer below
mFastMixerFutex(0)
@@ -3252,11 +3326,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
// start the fast mixer
mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
pid_t tid = mFastMixer->getTid();
- int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
- if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
- kPriorityFastMixer, getpid_cached, tid, err);
- }
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
#ifdef AUDIO_WATCHDOG
// create and start the watchdog
@@ -3264,11 +3334,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
mAudioWatchdog->setDump(&mAudioWatchdogDump);
mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
tid = mAudioWatchdog->getTid();
- err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
- if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
- kPriorityFastMixer, getpid_cached, tid, err);
- }
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
#endif
}
@@ -3467,11 +3533,11 @@ void AudioFlinger::MixerThread::threadLoop_mix()
// Only increase sleep time if the mixer is ready for two consecutive times to avoid
// that a steady state of alternating ready/not ready conditions keeps the sleep time
// such that we would underrun the audio HAL.
- if ((sleepTime == 0) && (sleepTimeShift > 0)) {
+ if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
sleepTimeShift--;
}
- sleepTime = 0;
- standbyTime = systemTime() + standbyDelay;
+ mSleepTimeUs = 0;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
//TODO: delay standby when effects have a tail
}
@@ -3480,11 +3546,11 @@ void AudioFlinger::MixerThread::threadLoop_sleepTime()
{
// If no tracks are ready, sleep once for the duration of an output
// buffer size, then write 0s to the output
- if (sleepTime == 0) {
+ if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime >> sleepTimeShift;
- if (sleepTime < kMinThreadSleepTimeUs) {
- sleepTime = kMinThreadSleepTimeUs;
+ mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
+ if (mSleepTimeUs < kMinThreadSleepTimeUs) {
+ mSleepTimeUs = kMinThreadSleepTimeUs;
}
// reduce sleep time in case of consecutive application underruns to avoid
// starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
@@ -3494,7 +3560,7 @@ void AudioFlinger::MixerThread::threadLoop_sleepTime()
sleepTimeShift++;
}
} else {
- sleepTime = idleSleepTime;
+ mSleepTimeUs = mIdleSleepTimeUs;
}
} else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
// clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared
@@ -3504,7 +3570,7 @@ void AudioFlinger::MixerThread::threadLoop_sleepTime()
} else {
memset(mSinkBuffer, 0, mSinkBufferSize);
}
- sleepTime = 0;
+ mSleepTimeUs = 0;
ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
"anticipated start");
}
@@ -4294,16 +4360,16 @@ void AudioFlinger::MixerThread::cacheParameters_l()
// ----------------------------------------------------------------------------
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device)
- : PlaybackThread(audioFlinger, output, id, device, DIRECT)
+ AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady)
+ : PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady)
// mLeftVolFloat, mRightVolFloat
{
}
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
- ThreadBase::type_t type)
- : PlaybackThread(audioFlinger, output, id, device, type)
+ ThreadBase::type_t type, bool systemReady)
+ : PlaybackThread(audioFlinger, output, id, device, type, systemReady)
// mLeftVolFloat, mRightVolFloat
{
}
@@ -4443,7 +4509,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep
track->mRetryCount = kMaxTrackRetriesDirect;
mActiveTrack = t;
mixerStatus = MIXER_TRACKS_READY;
- if (usesHwAvSync() && mHwPaused) {
+ if (mHwPaused) {
doHwResume = true;
mHwPaused = false;
}
@@ -4495,7 +4561,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep
android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
} else if (last) {
mixerStatus = MIXER_TRACKS_ENABLED;
- if (usesHwAvSync() && !mHwPaused && !mStandby) {
+ if (mHwSupportsPause && !mHwPaused && !mStandby) {
doHwPause = true;
mHwPaused = true;
}
@@ -4553,8 +4619,8 @@ void AudioFlinger::DirectOutputThread::threadLoop_mix()
mActiveTrack->releaseBuffer(&buffer);
}
mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer;
- sleepTime = 0;
- standbyTime = systemTime() + standbyDelay;
+ mSleepTimeUs = 0;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
mActiveTrack.clear();
}
@@ -4562,18 +4628,18 @@ void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
{
// do not write to HAL when paused
if (mHwPaused || (usesHwAvSync() && mStandby)) {
- sleepTime = idleSleepTime;
+ mSleepTimeUs = mIdleSleepTimeUs;
return;
}
- if (sleepTime == 0) {
+ if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime;
+ mSleepTimeUs = mActiveSleepTimeUs;
} else {
- sleepTime = idleSleepTime;
+ mSleepTimeUs = mIdleSleepTimeUs;
}
} else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
memset(mSinkBuffer, 0, mFrameCount * mFrameSize);
- sleepTime = 0;
+ mSleepTimeUs = 0;
}
}
@@ -4609,7 +4675,7 @@ bool AudioFlinger::DirectOutputThread::shouldStandby_l()
mTracks[mTracks.size() - 1]->mState == TrackBase::IDLE;
}
- return !mStandby && !(trackPaused || (usesHwAvSync() && mHwPaused && !trackStopped));
+ return !mStandby && !(trackPaused || (mHwPaused && !trackStopped));
}
// getTrackName_l() must be called with ThreadBase::mLock held
@@ -4714,11 +4780,11 @@ void AudioFlinger::DirectOutputThread::cacheParameters_l()
// hardware resources as soon as possible
// no delay on outputs with HW A/V sync
if (usesHwAvSync()) {
- standbyDelay = 0;
- } else if (audio_is_linear_pcm(mFormat)) {
- standbyDelay = microseconds(activeSleepTime*2);
+ mStandbyDelayNs = 0;
+ } else if ((mType == OFFLOAD) && !audio_is_linear_pcm(mFormat)) {
+ mStandbyDelayNs = kOffloadStandbyDelayNs;
} else {
- standbyDelay = kOffloadStandbyDelayNs;
+ mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
}
}
@@ -4832,8 +4898,8 @@ void AudioFlinger::AsyncCallbackThread::resetDraining()
// ----------------------------------------------------------------------------
AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, uint32_t device)
- : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD),
+ AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady)
+ : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady),
mPausedBytesRemaining(0)
{
//FIXME: mStandby should be set to true by ThreadBase constructor
@@ -4898,7 +4964,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr
if (track->isPausing()) {
track->setPaused();
if (last) {
- if (!mHwPaused) {
+ if (mHwSupportsPause && !mHwPaused) {
doHwPause = true;
mHwPaused = true;
}
@@ -4934,7 +5000,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr
// resume an interrupted write
}
// enable write to audio HAL
- sleepTime = 0;
+ mSleepTimeUs = 0;
// Do not handle new data in this iteration even if track->framesReady()
mixerStatus = MIXER_TRACKS_ENABLED;
@@ -4994,8 +5060,8 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr
// do not modify drain sequence if we are already draining. This happens
// when resuming from pause after drain.
if ((mDrainSequence & 1) == 0) {
- sleepTime = 0;
- standbyTime = systemTime() + standbyDelay;
+ mSleepTimeUs = 0;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
mixerStatus = MIXER_DRAIN_TRACK;
mDrainSequence += 2;
}
@@ -5110,9 +5176,9 @@ void AudioFlinger::OffloadThread::onAddNewTrack_l()
// ----------------------------------------------------------------------------
AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
- AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
+ AudioFlinger::MixerThread* mainThread, audio_io_handle_t id, bool systemReady)
: MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
- DUPLICATING),
+ systemReady, DUPLICATING),
mWaitTimeMs(UINT_MAX)
{
addOutputTrack(mainThread);
@@ -5137,19 +5203,19 @@ void AudioFlinger::DuplicatingThread::threadLoop_mix()
memset(mSinkBuffer, 0, mSinkBufferSize);
}
}
- sleepTime = 0;
+ mSleepTimeUs = 0;
writeFrames = mNormalFrameCount;
mCurrentWriteLength = mSinkBufferSize;
- standbyTime = systemTime() + standbyDelay;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
}
void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
{
- if (sleepTime == 0) {
+ if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime;
+ mSleepTimeUs = mActiveSleepTimeUs;
} else {
- sleepTime = idleSleepTime;
+ mSleepTimeUs = mIdleSleepTimeUs;
}
} else if (mBytesWritten != 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
@@ -5159,7 +5225,7 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
// flush remaining overflow buffers in output tracks
writeFrames = 0;
}
- sleepTime = 0;
+ mSleepTimeUs = 0;
}
}
@@ -5292,12 +5358,13 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamIn *input,
audio_io_handle_t id,
audio_devices_t outDevice,
- audio_devices_t inDevice
+ audio_devices_t inDevice,
+ bool systemReady
#ifdef TEE_SINK
, const sp<NBAIO_Sink>& teeSink
#endif
) :
- ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
+ ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
// mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
mRsmpInRear(0)
@@ -5416,12 +5483,7 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
- if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
- kPriorityFastCapture, getpid_cached, tid, err);
- }
-
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
#ifdef AUDIO_WATCHDOG
// FIXME
#endif
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 37bacae..07c226e 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -35,7 +35,8 @@ public:
static const char *threadTypeToString(type_t type);
ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
- audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
+ audio_devices_t outDevice, audio_devices_t inDevice, type_t type,
+ bool systemReady);
virtual ~ThreadBase();
virtual status_t readyToRun();
@@ -92,10 +93,13 @@ public:
Condition mCond; // condition for status return
status_t mStatus; // status communicated to sender
bool mWaitStatus; // true if sender is waiting for status
+ bool mRequiresSystemReady; // true if must wait for system ready to enter event queue
sp<ConfigEventData> mData; // event specific parameter data
protected:
- ConfigEvent(int type) : mType(type), mStatus(NO_ERROR), mWaitStatus(false), mData(NULL) {}
+ ConfigEvent(int type, bool requiresSystemReady = false) :
+ mType(type), mStatus(NO_ERROR), mWaitStatus(false),
+ mRequiresSystemReady(requiresSystemReady), mData(NULL) {}
};
class IoConfigEventData : public ConfigEventData {
@@ -136,7 +140,7 @@ public:
class PrioConfigEvent : public ConfigEvent {
public:
PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) :
- ConfigEvent(CFG_EVENT_PRIO) {
+ ConfigEvent(CFG_EVENT_PRIO, true) {
mData = new PrioConfigEventData(pid, tid, prio);
}
virtual ~PrioConfigEvent() {}
@@ -258,6 +262,7 @@ public:
status_t sendConfigEvent_l(sp<ConfigEvent>& event);
void sendIoConfigEvent(audio_io_config_event event);
void sendIoConfigEvent_l(audio_io_config_event event);
+ void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio);
void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
status_t sendSetParameterConfigEvent_l(const String8& keyValuePair);
status_t sendCreateAudioPatchConfigEvent(const struct audio_patch *patch,
@@ -359,6 +364,8 @@ public:
virtual sp<IMemory> pipeMemory() const { return 0; }
+ void systemReady();
+
mutable Mutex mLock;
protected:
@@ -418,6 +425,7 @@ protected:
size_t mBufferSize; // HAL buffer size for read() or write()
Vector< sp<ConfigEvent> > mConfigEvents;
+ Vector< sp<ConfigEvent> > mPendingConfigEvents; // events awaiting system ready
// These fields are written and read by thread itself without lock or barrier,
// and read by other threads without lock or barrier via standby(), outDevice()
@@ -445,6 +453,7 @@ protected:
mSuspendedSessions;
static const size_t kLogSize = 4 * 1024;
sp<NBLog::Writer> mNBLogWriter;
+ bool mSystemReady;
};
// --- PlaybackThread ---
@@ -470,7 +479,7 @@ public:
static const int8_t kMaxTrackRetriesOffload = 20;
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, type_t type);
+ audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
virtual ~PlaybackThread();
void dump(int fd, const Vector<String16>& args);
@@ -746,14 +755,14 @@ private:
bool mInWrite;
// FIXME rename these former local variables of threadLoop to standard "m" names
- nsecs_t standbyTime;
+ nsecs_t mStandbyTimeNs;
size_t mSinkBufferSize;
// cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l()
- uint32_t activeSleepTime;
- uint32_t idleSleepTime;
+ uint32_t mActiveSleepTimeUs;
+ uint32_t mIdleSleepTimeUs;
- uint32_t sleepTime;
+ uint32_t mSleepTimeUs;
// mixer status returned by prepareTracks_l()
mixer_state mMixerStatus; // current cycle
@@ -766,7 +775,7 @@ private:
uint32_t sleepTimeShift;
// same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value
- nsecs_t standbyDelay;
+ nsecs_t mStandbyDelayNs;
// MIXER only
nsecs_t maxPeriod;
@@ -842,6 +851,7 @@ public:
AudioStreamOut* output,
audio_io_handle_t id,
audio_devices_t device,
+ bool systemReady,
type_t type = MIXER);
virtual ~MixerThread();
@@ -903,7 +913,7 @@ class DirectOutputThread : public PlaybackThread {
public:
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device);
+ audio_io_handle_t id, audio_devices_t device, bool systemReady);
virtual ~DirectOutputThread();
// Thread virtuals
@@ -933,7 +943,8 @@ protected:
float mRightVolFloat;
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, uint32_t device, ThreadBase::type_t type);
+ audio_io_handle_t id, uint32_t device, ThreadBase::type_t type,
+ bool systemReady);
void processVolume_l(Track *track, bool lastTrack);
// prepareTracks_l() tells threadLoop_mix() the name of the single active track
@@ -946,7 +957,7 @@ class OffloadThread : public DirectOutputThread {
public:
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, uint32_t device);
+ audio_io_handle_t id, uint32_t device, bool systemReady);
virtual ~OffloadThread() {};
virtual void flushHw_l();
@@ -1001,7 +1012,7 @@ private:
class DuplicatingThread : public MixerThread {
public:
DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
- audio_io_handle_t id);
+ audio_io_handle_t id, bool systemReady);
virtual ~DuplicatingThread();
// Thread virtuals
@@ -1177,7 +1188,8 @@ public:
AudioStreamIn *input,
audio_io_handle_t id,
audio_devices_t outDevice,
- audio_devices_t inDevice
+ audio_devices_t inDevice,
+ bool systemReady
#ifdef TEE_SINK
, const sp<NBAIO_Sink>& teeSink
#endif
@@ -1294,6 +1306,7 @@ private:
// one-time initialization, no locks required
sp<FastCapture> mFastCapture; // non-0 if there is also
// a fast capture
+
// FIXME audio watchdog thread
// contents are not guaranteed to be consistent, no locks required
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index d1ee400..665cce9 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -171,7 +171,7 @@ status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
}
updateDevicesAndOutputs();
- if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) {
+ if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
@@ -261,7 +261,7 @@ status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
closeAllInputs();
- if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) {
+ if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
@@ -302,6 +302,9 @@ void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs
audio_patch_handle_t afPatchHandle;
DeviceVector deviceList;
+ if(!hasPrimaryOutput()) {
+ return;
+ }
audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
@@ -449,8 +452,6 @@ void AudioPolicyManager::setPhoneState(audio_mode_t state)
checkOutputForAllStrategies();
updateDevicesAndOutputs();
- sp<SwAudioOutputDescriptor> hwOutputDesc = mPrimaryOutput;
-
int delayMs = 0;
if (isStateInCall(state)) {
nsecs_t sysTime = systemTime();
@@ -477,29 +478,31 @@ void AudioPolicyManager::setPhoneState(audio_mode_t state)
}
}
- // Note that despite the fact that getNewOutputDevice() is called on the primary output,
- // the device returned is not necessarily reachable via this output
- audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
- // force routing command to audio hardware when ending call
- // even if no device change is needed
- if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
- rxDevice = hwOutputDesc->device();
- }
-
- if (state == AUDIO_MODE_IN_CALL) {
- updateCallRouting(rxDevice, delayMs);
- } else if (oldState == AUDIO_MODE_IN_CALL) {
- if (mCallRxPatch != 0) {
- mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
- mCallRxPatch.clear();
+ if (hasPrimaryOutput()) {
+ // Note that despite the fact that getNewOutputDevice() is called on the primary output,
+ // the device returned is not necessarily reachable via this output
+ audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+ // force routing command to audio hardware when ending call
+ // even if no device change is needed
+ if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
+ rxDevice = mPrimaryOutput->device();
}
- if (mCallTxPatch != 0) {
- mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
- mCallTxPatch.clear();
+
+ if (state == AUDIO_MODE_IN_CALL) {
+ updateCallRouting(rxDevice, delayMs);
+ } else if (oldState == AUDIO_MODE_IN_CALL) {
+ if (mCallRxPatch != 0) {
+ mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+ mCallRxPatch.clear();
+ }
+ if (mCallTxPatch != 0) {
+ mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+ mCallTxPatch.clear();
+ }
+ setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+ } else {
+ setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
}
- setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
- } else {
- setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
}
// if entering in call state, handle special case of active streams
// pertaining to sonification strategy see handleIncallSonification()
@@ -543,7 +546,7 @@ void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
checkA2dpSuspend();
checkOutputForAllStrategies();
updateDevicesAndOutputs();
- if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) {
+ if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
updateCallRouting(newDevice);
}
@@ -579,24 +582,43 @@ sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
audio_channel_mask_t channelMask,
audio_output_flags_t flags)
{
+ // only retain flags that will drive the direct output profile selection
+ // if explicitly requested
+ static const uint32_t kRelevantFlags =
+ (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ flags =
+ (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+
+ sp<IOProfile> profile;
+
for (size_t i = 0; i < mHwModules.size(); i++) {
if (mHwModules[i]->mHandle == 0) {
continue;
}
for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
- bool found = profile->isCompatibleProfile(device, String8(""),
+ sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
+ if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
channelMask, NULL /*updatedChannelMask*/,
- flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ?
- AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT);
- if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
- return profile;
+ flags)) {
+ continue;
+ }
+ // reject profiles not corresponding to a device currently available
+ if ((mAvailableOutputDevices.types() & curProfile->mSupportedDevices.types()) == 0) {
+ continue;
+ }
+ // if several profiles are compatible, give priority to one with offload capability
+ if (profile != 0 && ((curProfile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
+ continue;
+ }
+ profile = curProfile;
+ if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ break;
}
}
}
- return 0;
+ return profile;
}
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
@@ -819,10 +841,27 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice(
if (outputDesc != NULL) {
closeOutput(outputDesc->mIoHandle);
}
+
+ // if the selected profile is offloaded and no offload info was specified,
+ // create a default one
+ audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
+ if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ defaultOffloadInfo.sample_rate = samplingRate;
+ defaultOffloadInfo.channel_mask = channelMask;
+ defaultOffloadInfo.format = format;
+ defaultOffloadInfo.stream_type = stream;
+ defaultOffloadInfo.bit_rate = 0;
+ defaultOffloadInfo.duration_us = -1;
+ defaultOffloadInfo.has_video = true; // conservative
+ defaultOffloadInfo.is_streaming = true; // likely
+ offloadInfo = &defaultOffloadInfo;
+ }
+
outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
outputDesc->mDevice = device;
outputDesc->mLatency = 0;
- outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags);
+ outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = samplingRate;
config.channel_mask = channelMask;
@@ -854,10 +893,6 @@ audio_io_handle_t AudioPolicyManager::getOutputForDevice(
if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
goto non_direct_output;
}
- // fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
- goto non_direct_output;
- }
return AUDIO_IO_HANDLE_NONE;
}
outputDesc->mSamplingRate = config.sample_rate;
@@ -1226,7 +1261,7 @@ void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
// If effects where present on the output, audioflinger moved them to the primary
// output by default: move them back to the appropriate output.
audio_io_handle_t dstOutput = getOutputForEffect();
- if (dstOutput != mPrimaryOutput->mIoHandle) {
+ if (hasPrimaryOutput() && dstOutput != mPrimaryOutput->mIoHandle) {
mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX,
mPrimaryOutput->mIoHandle, dstOutput);
}
@@ -1934,7 +1969,8 @@ status_t AudioPolicyManager::dump(int fd)
snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
result.append(buffer);
- snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput->mIoHandle);
+ snprintf(buffer, SIZE, " Primary Output: %d\n",
+ hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
result.append(buffer);
snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
result.append(buffer);
@@ -2896,7 +2932,7 @@ AudioPolicyManager::~AudioPolicyManager()
status_t AudioPolicyManager::initCheck()
{
- return (mPrimaryOutput == 0) ? NO_INIT : NO_ERROR;
+ return hasPrimaryOutput() ? NO_ERROR : NO_INIT;
}
#ifdef AUDIO_POLICY_TEST
@@ -3271,7 +3307,8 @@ status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> de
policyMix->setOutput(desc);
desc->mPolicyMix = policyMix->getMix();
- } else if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) {
+ } else if (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
+ hasPrimaryOutput()) {
// no duplicated output for direct outputs and
// outputs used by dynamic policy mixes
audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE;
@@ -4600,6 +4637,10 @@ void AudioPolicyManager::setStreamMute(audio_stream_type_t stream,
void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
bool starting, bool stateChange)
{
+ if(!hasPrimaryOutput()) {
+ return;
+ }
+
// if the stream pertains to sonification strategy and we are in call we must
// mute the stream if it is low visibility. If it is high visibility, we must play a tone
// in the device used for phone strategy and play the tone if the selected device does not
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index ea16864..f9d1198 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -553,10 +553,16 @@ protected:
audio_devices_t availablePrimaryOutputDevices() const
{
+ if (!hasPrimaryOutput()) {
+ return AUDIO_DEVICE_NONE;
+ }
return mPrimaryOutput->supportedDevices() & mAvailableOutputDevices.types();
}
audio_devices_t availablePrimaryInputDevices() const
{
+ if (!hasPrimaryOutput()) {
+ return AUDIO_DEVICE_NONE;
+ }
return mAvailableInputDevices.getDevicesFromHwModule(mPrimaryOutput->getModuleHandle());
}
@@ -576,6 +582,8 @@ protected:
void clearSessionRoutes(uid_t uid);
void checkStrategyRoute(routing_strategy strategy, audio_io_handle_t ouptutToSkip);
+ status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
+
uid_t mUidCached;
AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 527e80b..6f073ed 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -33,6 +33,7 @@
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <binder/ProcessInfoService.h>
+#include <camera/ICameraServiceProxy.h>
#include <cutils/atomic.h>
#include <cutils/properties.h>
#include <gui/Surface.h>
@@ -230,6 +231,18 @@ void CameraService::onFirstRef()
}
CameraDeviceFactory::registerService(this);
+
+ CameraService::pingCameraServiceProxy();
+}
+
+void CameraService::pingCameraServiceProxy() {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.camera.proxy"));
+ if (binder == nullptr) {
+ return;
+ }
+ sp<ICameraServiceProxy> proxyBinder = interface_cast<ICameraServiceProxy>(binder);
+ proxyBinder->pingForUserUpdate();
}
CameraService::~CameraService() {
@@ -992,6 +1005,10 @@ status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clien
"(PID %" PRId32 ", priority %" PRId32 ")", i->getKey().string(),
String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
i->getPriority());
+ ALOGE(" Conflicts with: Device %s, client package %s (PID %"
+ PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+ String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
+ i->getPriority());
}
// Log the client's attempt
@@ -2051,7 +2068,7 @@ String8 CameraService::CameraClientManager::toString() const {
auto conflicting = i->getConflicting();
auto clientSp = i->getValue();
String8 packageName;
- userid_t clientUserId;
+ userid_t clientUserId = 0;
if (clientSp.get() != nullptr) {
packageName = String8{clientSp->getPackageName()};
uid_t clientUid = clientSp->getClientUid();
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 2e4743f..3298772 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -720,6 +720,8 @@ private:
static String8 toString(std::set<userid_t> intSet);
+ static void pingCameraServiceProxy();
+
};
template<class Func>
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 05ede92..f2d6ab2 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1881,6 +1881,16 @@ void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
mCaptureSequencer->notifyAutoExposure(newState, triggerId);
}
+void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp) {
+ (void)resultExtras;
+ (void)timestamp;
+
+ ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
+ __FUNCTION__, resultExtras.requestId, timestamp);
+ mCaptureSequencer->notifyShutter(resultExtras, timestamp);
+}
+
camera2::SharedParameters& Camera2Client::getParameters() {
return mParameters;
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index a988037..3784aab 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -106,6 +106,8 @@ public:
virtual void notifyAutoFocus(uint8_t newState, int triggerId);
virtual void notifyAutoExposure(uint8_t newState, int triggerId);
+ virtual void notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp);
/**
* Interface used by independent components of Camera2Client.
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 9849f4d..d847e0f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -43,6 +43,8 @@ CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
mNewFrameReceived(false),
mNewCaptureReceived(false),
mShutterNotified(false),
+ mHalNotifiedShutter(false),
+ mShutterCaptureId(-1),
mClient(client),
mCaptureState(IDLE),
mStateTransitionCount(0),
@@ -106,6 +108,16 @@ void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
}
}
+void CaptureSequencer::notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ if (!mHalNotifiedShutter && resultExtras.requestId == mShutterCaptureId) {
+ mHalNotifiedShutter = true;
+ mShutterNotifySignal.signal();
+ }
+}
+
void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
ATRACE_CALL();
ALOGV("%s: New result available.", __FUNCTION__);
@@ -335,6 +347,11 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStart(
} else {
nextState = STANDARD_START;
}
+ {
+ Mutex::Autolock l(mInputMutex);
+ mShutterCaptureId = mCaptureId;
+ mHalNotifiedShutter = false;
+ }
mShutterNotified = false;
return nextState;
@@ -541,6 +558,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
return DONE;
}
}
+
// TODO: Capture should be atomic with setStreamingRequest here
res = client->getCameraDevice()->capture(captureCopy);
if (res != OK) {
@@ -560,6 +578,31 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
+
+ // Wait for shutter callback
+ while (!mHalNotifiedShutter) {
+ if (mTimeoutCount <= 0) {
+ break;
+ }
+ res = mShutterNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+ if (res == TIMED_OUT) {
+ mTimeoutCount--;
+ return STANDARD_CAPTURE_WAIT;
+ }
+ }
+
+ if (mHalNotifiedShutter) {
+ if (!mShutterNotified) {
+ SharedParameters::Lock l(client->getParameters());
+ /* warning: this also locks a SharedCameraCallbacks */
+ shutterNotifyLocked(l.mParameters, client, mMsgType);
+ mShutterNotified = true;
+ }
+ } else if (mTimeoutCount <= 0) {
+ ALOGW("Timed out waiting for shutter notification");
+ return DONE;
+ }
+
// Wait for new metadata result (mNewFrame)
while (!mNewFrameReceived) {
res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -569,15 +612,6 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
}
}
- // Approximation of the shutter being closed
- // - TODO: use the hal3 exposure callback in Camera3Device instead
- if (mNewFrameReceived && !mShutterNotified) {
- SharedParameters::Lock l(client->getParameters());
- /* warning: this also locks a SharedCameraCallbacks */
- shutterNotifyLocked(l.mParameters, client, mMsgType);
- mShutterNotified = true;
- }
-
// Wait until jpeg was captured by JpegProcessor
while (mNewFrameReceived && !mNewCaptureReceived) {
res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -591,6 +625,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
return DONE;
}
if (mNewFrameReceived && mNewCaptureReceived) {
+
if (mNewFrameId != mCaptureId) {
ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
mCaptureId, mNewFrameId);
@@ -667,7 +702,6 @@ CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
sp<Camera2Client> &/*client*/) {
status_t res;
ATRACE_CALL();
-
while (!mNewCaptureReceived) {
res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
if (res == TIMED_OUT) {
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index d42ab13..10252fb 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -62,6 +62,10 @@ class CaptureSequencer:
// Notifications about AE state changes
void notifyAutoExposure(uint8_t newState, int triggerId);
+ // Notifications about shutter (capture start)
+ void notifyShutter(const CaptureResultExtras& resultExtras,
+ nsecs_t timestamp);
+
// Notification from the frame processor
virtual void onResultAvailable(const CaptureResult &result);
@@ -95,7 +99,10 @@ class CaptureSequencer:
sp<MemoryBase> mCaptureBuffer;
Condition mNewCaptureSignal;
- bool mShutterNotified;
+ bool mShutterNotified; // Has CaptureSequencer sent shutter to Client
+ bool mHalNotifiedShutter; // Has HAL sent shutter to CaptureSequencer
+ int32_t mShutterCaptureId; // The captureId which is waiting for shutter notification
+ Condition mShutterNotifySignal;
/**
* Internal to CaptureSequencer
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index c662853..2f0117d 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -31,7 +31,7 @@ void CameraModule::deriveCameraCharacteristicsKeys(
// Keys added in HAL3.3
if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
- const size_t NUM_DERIVED_KEYS_HAL3_3 = 3;
+ const size_t NUM_DERIVED_KEYS_HAL3_3 = 5;
Vector<uint8_t> controlModes;
uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
@@ -80,6 +80,29 @@ void CameraModule::deriveCameraCharacteristicsKeys(
chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);
+ entry = chars.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
+ // HAL3.2 devices passing existing CTS test should all support all LSC modes and LSC map
+ bool lensShadingModeSupported = false;
+ if (entry.count > 0) {
+ for (size_t i = 0; i < entry.count; i++) {
+ if (entry.data.i32[i] == ANDROID_SHADING_MODE) {
+ lensShadingModeSupported = true;
+ break;
+ }
+ }
+ }
+ Vector<uint8_t> lscModes;
+ Vector<uint8_t> lscMapModes;
+ lscModes.push(ANDROID_SHADING_MODE_FAST);
+ lscModes.push(ANDROID_SHADING_MODE_HIGH_QUALITY);
+ lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
+ if (lensShadingModeSupported) {
+ lscModes.push(ANDROID_SHADING_MODE_OFF);
+ lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON);
+ }
+ chars.update(ANDROID_SHADING_AVAILABLE_MODES, lscModes);
+ chars.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, lscMapModes);
+
entry = chars.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
Vector<int32_t> availableCharsKeys;
availableCharsKeys.setCapacity(entry.count + NUM_DERIVED_KEYS_HAL3_3);
@@ -87,9 +110,12 @@ void CameraModule::deriveCameraCharacteristicsKeys(
availableCharsKeys.push(entry.data.i32[i]);
}
availableCharsKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
- availableCharsKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE);
+ availableCharsKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
availableCharsKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
+ availableCharsKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
+ availableCharsKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
chars.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, availableCharsKeys);
+
}
return;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 445c9c2..45d9421 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2613,6 +2613,21 @@ status_t Camera3Device::RequestThread::clear(
if (listener != NULL) {
for (RequestList::iterator it = mRequestQueue.begin();
it != mRequestQueue.end(); ++it) {
+ // Abort the input buffers for reprocess requests.
+ if ((*it)->mInputStream != NULL) {
+ camera3_stream_buffer_t inputBuffer;
+ status_t res = (*it)->mInputStream->getInputBuffer(&inputBuffer);
+ if (res != OK) {
+ ALOGW("%s: %d: couldn't get input buffer while clearing the request "
+ "list: %s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
+ } else {
+ res = (*it)->mInputStream->returnInputBuffer(inputBuffer);
+ if (res != OK) {
+ ALOGE("%s: %d: couldn't return input buffer while clearing the request "
+ "list: %s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
+ }
+ }
+ }
// Set the frame number this request would have had, if it
// had been submitted; this frame number will not be reused.
// The requestId and burstId fields were set when the request was
@@ -2752,29 +2767,11 @@ bool Camera3Device::RequestThread::threadLoop() {
__FUNCTION__);
}
- camera3_stream_buffer_t inputBuffer;
uint32_t totalNumBuffers = 0;
// Fill in buffers
-
if (nextRequest->mInputStream != NULL) {
- res = nextRequest->mInputStream->getInputBuffer(&inputBuffer);
- if (res != OK) {
- // Can't get input buffer from gralloc queue - this could be due to
- // disconnected queue or other producer misbehavior, so not a fatal
- // error
- ALOGE("RequestThread: Can't get input buffer, skipping request:"
- " %s (%d)", strerror(-res), res);
- Mutex::Autolock l(mRequestLock);
- if (mListener != NULL) {
- mListener->notifyError(
- ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
- nextRequest->mResultExtras);
- }
- cleanUpFailedRequest(request, nextRequest, outputBuffers);
- return true;
- }
- request.input_buffer = &inputBuffer;
+ request.input_buffer = &nextRequest->mInputBuffer;
totalNumBuffers += 1;
} else {
request.input_buffer = NULL;
@@ -2792,11 +2789,13 @@ bool Camera3Device::RequestThread::threadLoop() {
// error
ALOGE("RequestThread: Can't get output buffer, skipping request:"
" %s (%d)", strerror(-res), res);
- Mutex::Autolock l(mRequestLock);
- if (mListener != NULL) {
- mListener->notifyError(
- ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
- nextRequest->mResultExtras);
+ {
+ Mutex::Autolock l(mRequestLock);
+ if (mListener != NULL) {
+ mListener->notifyError(
+ ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ nextRequest->mResultExtras);
+ }
}
cleanUpFailedRequest(request, nextRequest, outputBuffers);
return true;
@@ -2865,6 +2864,12 @@ bool Camera3Device::RequestThread::threadLoop() {
nextRequest->mSettings.unlock(request.settings);
}
+ // Unset as current request
+ {
+ Mutex::Autolock l(mRequestLock);
+ mNextRequest.clear();
+ }
+
// Remove any previously queued triggers (after unlock)
res = removeTriggers(mPrevRequest);
if (res != OK) {
@@ -2890,6 +2895,13 @@ bool Camera3Device::RequestThread::isStreamPending(
sp<Camera3StreamInterface>& stream) {
Mutex::Autolock l(mRequestLock);
+ if (mNextRequest != nullptr) {
+ for (const auto& s : mNextRequest->mOutputStreams) {
+ if (stream == s) return true;
+ }
+ if (stream == mNextRequest->mInputStream) return true;
+ }
+
for (const auto& request : mRequestQueue) {
for (const auto& s : request->mOutputStreams) {
if (stream == s) return true;
@@ -2915,15 +2927,18 @@ void Camera3Device::RequestThread::cleanUpFailedRequest(
if (request.settings != NULL) {
nextRequest->mSettings.unlock(request.settings);
}
- if (request.input_buffer != NULL) {
- request.input_buffer->status = CAMERA3_BUFFER_STATUS_ERROR;
- nextRequest->mInputStream->returnInputBuffer(*(request.input_buffer));
+ if (nextRequest->mInputStream != NULL) {
+ nextRequest->mInputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
+ nextRequest->mInputStream->returnInputBuffer(nextRequest->mInputBuffer);
}
for (size_t i = 0; i < request.num_output_buffers; i++) {
outputBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
nextRequest->mOutputStreams.editItemAt(i)->returnBuffer(
outputBuffers[i], 0);
}
+
+ Mutex::Autolock l(mRequestLock);
+ mNextRequest.clear();
}
sp<Camera3Device::CaptureRequest>
@@ -3006,7 +3021,28 @@ sp<Camera3Device::CaptureRequest>
nextRequest->mResultExtras.frameNumber = mFrameNumber++;
nextRequest->mResultExtras.afTriggerId = mCurrentAfTriggerId;
nextRequest->mResultExtras.precaptureTriggerId = mCurrentPreCaptureTriggerId;
+
+ // Since RequestThread::clear() removes buffers from the input stream,
+ // get the right buffer here before unlocking mRequestLock
+ if (nextRequest->mInputStream != NULL) {
+ res = nextRequest->mInputStream->getInputBuffer(&nextRequest->mInputBuffer);
+ if (res != OK) {
+ // Can't get input buffer from gralloc queue - this could be due to
+ // disconnected queue or other producer misbehavior, so not a fatal
+ // error
+ ALOGE("%s: Can't get input buffer, skipping request:"
+ " %s (%d)", __FUNCTION__, strerror(-res), res);
+ if (mListener != NULL) {
+ mListener->notifyError(
+ ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ nextRequest->mResultExtras);
+ }
+ return NULL;
+ }
+ }
}
+ mNextRequest = nextRequest;
+
return nextRequest;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 4fbcb2e..b9313fc 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -234,6 +234,7 @@ class Camera3Device :
public:
CameraMetadata mSettings;
sp<camera3::Camera3Stream> mInputStream;
+ camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
mOutputStreams;
CaptureResultExtras mResultExtras;
@@ -501,6 +502,10 @@ class Camera3Device :
Condition mRequestSignal;
RequestList mRequestQueue;
RequestList mRepeatingRequests;
+ // The next request being prepped for submission to the HAL, no longer
+ // on the request queue. Read-only even with mRequestLock held, outside
+ // of threadLoop
+ sp<const CaptureRequest> mNextRequest;
bool mReconfigured;
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 081aff7..9de6fe2 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -787,6 +787,7 @@ void SoundTriggerHwService::Module::setCaptureState_l(bool active)
mHwDevice->stop_recognition(mHwDevice, model->mHandle);
// keep model in ACTIVE state so that event is processed by onCallbackEvent()
struct sound_trigger_phrase_recognition_event phraseEvent;
+ memset(&phraseEvent, 0, sizeof(struct sound_trigger_phrase_recognition_event));
switch (model->mType) {
case SOUND_MODEL_TYPE_KEYPHRASE:
phraseEvent.num_phrases = model->mConfig.num_phrases;