summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormauimauer <sebastian@n-unity.de>2011-12-08 21:05:12 +0100
committermauimauer <sebastian@n-unity.de>2011-12-08 21:05:12 +0100
commite32b53c0441d5188b048176d8f11c5dea7383681 (patch)
tree6ca32ddda100db73e3408c3e81c9afb10ac5a7ea
parent1496d891eb5cf119cd4324dfdb3a96de33c5d5ed (diff)
downloaddevice_samsung_n7000-e32b53c0441d5188b048176d8f11c5dea7383681.zip
device_samsung_n7000-e32b53c0441d5188b048176d8f11c5dea7383681.tar.gz
device_samsung_n7000-e32b53c0441d5188b048176d8f11c5dea7383681.tar.bz2
Removed setEffectEnabled as my soundpolicy.so doesn't know that func
-rwxr-xr-xaudio/Android.mk62
-rwxr-xr-xaudio/AudioPolicyCompatClient.cpp142
-rwxr-xr-xaudio/AudioPolicyCompatClient.h79
-rwxr-xr-xaudio/AudioPolicyInterface.h238
-rwxr-xr-xaudio/AudioPolicyManagerBase.cpp2434
-rwxr-xr-xaudio/AudioPolicyManagerBase.h393
-rwxr-xr-xaudio/AudioPolicyManagerDefault.cpp34
-rwxr-xr-xaudio/AudioPolicyManagerDefault.h43
-rwxr-xr-xaudio/AudioSystem.h562
-rwxr-xr-xaudio/audio_policy_hal.cpp425
10 files changed, 4411 insertions, 1 deletions
diff --git a/audio/Android.mk b/audio/Android.mk
index c72f44a..5026c54 100755
--- a/audio/Android.mk
+++ b/audio/Android.mk
@@ -32,6 +32,11 @@ LOCAL_SHARED_LIBRARIES += libdl
LOCAL_SHARED_LIBRARIES += libaudio
+ifeq ($(BOARD_FORCE_STATIC_A2DP),true)
+ LOCAL_SHARED_LIBRARIES += liba2dp
+endif
+
+
LOCAL_STATIC_LIBRARIES := \
libmedia_helper
@@ -40,4 +45,59 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \
include $(BUILD_SHARED_LIBRARY)
-endif \ No newline at end of file
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ AudioPolicyManagerBase.cpp \
+ AudioPolicyCompatClient.cpp \
+ audio_policy_hal.cpp
+
+ifeq ($(AUDIO_POLICY_TEST),true)
+ LOCAL_CFLAGS += -DAUDIO_POLICY_TEST
+endif
+
+ifeq ($(BOARD_HAVE_BLUETOOTH),true)
+ LOCAL_CFLAGS += -DWITH_A2DP
+endif
+
+LOCAL_STATIC_LIBRARIES := libmedia_helper
+LOCAL_MODULE := libaudiopolicy_legacy2
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
+
+
+# The default audio policy, for now still implemented on top of legacy
+# policy code
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ AudioPolicyManagerDefault.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libutils \
+ libmedia
+
+LOCAL_STATIC_LIBRARIES := \
+ libmedia_helper
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libaudiopolicy_legacy2
+
+ifeq ($(BOARD_USES_AUDIO_LEGACY),true)
+LOCAL_SHARED_LIBRARIES += libaudiopolicy
+endif
+
+LOCAL_C_INCLUDES := $(LOCAL_PATH)
+LOCAL_MODULE := audio_policy.$(TARGET_BOARD_PLATFORM)
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
+LOCAL_MODULE_TAGS := optional
+
+ifeq ($(BOARD_HAVE_BLUETOOTH),true)
+ LOCAL_CFLAGS += -DWITH_A2DP
+endif
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif ## AUDIOPOLICY
diff --git a/audio/AudioPolicyCompatClient.cpp b/audio/AudioPolicyCompatClient.cpp
new file mode 100755
index 0000000..08c31c5
--- /dev/null
+++ b/audio/AudioPolicyCompatClient.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyCompatClient"
+//#define LOG_NDEBUG 0
+
+#include <stdint.h>
+
+#include <hardware/hardware.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+
+#include <hardware_legacy/AudioSystemLegacy.h>
+
+#include "AudioPolicyCompatClient.h"
+
+namespace android_audio_legacy {
+
+audio_io_handle_t AudioPolicyCompatClient::openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ AudioSystem::output_flags flags)
+{
+ return mServiceOps->open_output(mService, pDevices, pSamplingRate, pFormat,
+ pChannels, pLatencyMs,
+ (audio_policy_output_flags_t)flags);
+}
+
+audio_io_handle_t AudioPolicyCompatClient::openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2)
+{
+ return mServiceOps->open_duplicate_output(mService, output1, output2);
+}
+
+status_t AudioPolicyCompatClient::closeOutput(audio_io_handle_t output)
+{
+ return mServiceOps->close_output(mService, output);
+}
+
+status_t AudioPolicyCompatClient::suspendOutput(audio_io_handle_t output)
+{
+ return mServiceOps->suspend_output(mService, output);
+}
+
+status_t AudioPolicyCompatClient::restoreOutput(audio_io_handle_t output)
+{
+ return mServiceOps->restore_output(mService, output);
+}
+
+audio_io_handle_t AudioPolicyCompatClient::openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics)
+{
+ return mServiceOps->open_input(mService, pDevices, pSamplingRate, pFormat,
+ pChannels, acoustics);
+}
+
+status_t AudioPolicyCompatClient::closeInput(audio_io_handle_t input)
+{
+ return mServiceOps->close_input(mService, input);
+}
+
+status_t AudioPolicyCompatClient::setStreamOutput(AudioSystem::stream_type stream,
+ audio_io_handle_t output)
+{
+ return mServiceOps->set_stream_output(mService, (audio_stream_type_t)stream,
+ output);
+}
+
+status_t AudioPolicyCompatClient::moveEffects(int session, audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput)
+{
+ return mServiceOps->move_effects(mService, session, srcOutput, dstOutput);
+}
+
+String8 AudioPolicyCompatClient::getParameters(audio_io_handle_t ioHandle, const String8& keys)
+{
+ char *str;
+ String8 out_str8;
+
+ str = mServiceOps->get_parameters(mService, ioHandle, keys.string());
+ out_str8 = String8(str);
+ free(str);
+
+ return out_str8;
+}
+
+void AudioPolicyCompatClient::setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs,
+ int delayMs)
+{
+ mServiceOps->set_parameters(mService, ioHandle, keyValuePairs.string(),
+ delayMs);
+}
+
+status_t AudioPolicyCompatClient::setStreamVolume(
+ AudioSystem::stream_type stream,
+ float volume,
+ audio_io_handle_t output,
+ int delayMs)
+{
+ return mServiceOps->set_stream_volume(mService, (audio_stream_type_t)stream,
+ volume, output, delayMs);
+}
+
+status_t AudioPolicyCompatClient::startTone(ToneGenerator::tone_type tone,
+ AudioSystem::stream_type stream)
+{
+ return mServiceOps->start_tone(mService,
+ AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
+ (audio_stream_type_t)stream);
+}
+
+status_t AudioPolicyCompatClient::stopTone()
+{
+ return mServiceOps->stop_tone(mService);
+}
+
+status_t AudioPolicyCompatClient::setVoiceVolume(float volume, int delayMs)
+{
+ return mServiceOps->set_voice_volume(mService, volume, delayMs);
+}
+
+}; // namespace android_audio_legacy
diff --git a/audio/AudioPolicyCompatClient.h b/audio/AudioPolicyCompatClient.h
new file mode 100755
index 0000000..4feee37
--- /dev/null
+++ b/audio/AudioPolicyCompatClient.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPOLICYCLIENTLEGACY_H
+#define ANDROID_AUDIOPOLICYCLIENTLEGACY_H
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+
+#include <hardware_legacy/AudioSystemLegacy.h>
+#include <hardware_legacy/AudioPolicyInterface.h>
+
+/************************************/
+/* FOR BACKWARDS COMPATIBILITY ONLY */
+/************************************/
+namespace android_audio_legacy {
+
+class AudioPolicyCompatClient : public AudioPolicyClientInterface {
+public:
+ AudioPolicyCompatClient(struct audio_policy_service_ops *serviceOps,
+ void *service) :
+ mServiceOps(serviceOps) , mService(service) {}
+
+ virtual audio_io_handle_t openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ AudioSystem::output_flags flags);
+ virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2);
+ virtual status_t closeOutput(audio_io_handle_t output);
+ virtual status_t suspendOutput(audio_io_handle_t output);
+ virtual status_t restoreOutput(audio_io_handle_t output);
+ virtual audio_io_handle_t openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics);
+ virtual status_t closeInput(audio_io_handle_t input);
+ virtual status_t setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output);
+ virtual status_t moveEffects(int session,
+ audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput);
+
+ virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+ virtual void setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs,
+ int delayMs = 0);
+ virtual status_t setStreamVolume(AudioSystem::stream_type stream,
+ float volume,
+ audio_io_handle_t output,
+ int delayMs = 0);
+ virtual status_t startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream);
+ virtual status_t stopTone();
+ virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+
+private:
+ struct audio_policy_service_ops* mServiceOps;
+ void* mService;
+};
+
+}; // namespace android_audio_legacy
+
+#endif // ANDROID_AUDIOPOLICYCLIENTLEGACY_H
diff --git a/audio/AudioPolicyInterface.h b/audio/AudioPolicyInterface.h
new file mode 100755
index 0000000..78f87da
--- /dev/null
+++ b/audio/AudioPolicyInterface.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPOLICYINTERFACE_H
+#define ANDROID_AUDIOPOLICYINTERFACE_H
+
+#include <media/AudioSystem.h>
+#include <media/ToneGenerator.h>
+#include <utils/String8.h>
+
+#include <hardware_legacy/AudioSystemLegacy.h>
+
+namespace android_audio_legacy {
+ using android::Vector;
+ using android::String8;
+ using android::ToneGenerator;
+
+// ----------------------------------------------------------------------------
+
+// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
+// between the platform specific audio policy manager and Android generic audio policy manager.
+// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
+// This implementation makes use of the AudioPolicyClientInterface to control the activity and
+// configuration of audio input and output streams.
+//
+// The platform specific audio policy manager is in charge of the audio routing and volume control
+// policies for a given platform.
+// The main roles of this module are:
+// - keep track of current system state (removable device connections, phone state, user requests...).
+// System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
+// - process getOutput() queries received when AudioTrack objects are created: Those queries
+// return a handler on an output that has been selected, configured and opened by the audio policy manager and that
+// must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
+// When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
+// to close or reconfigure the output depending on other streams using this output and current system state.
+// - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
+// - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
+// applicable to each output as a function of platform specific settings and current output route (destination device). It
+// also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
+//
+// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so)
+// and is linked with libaudioflinger.so
+
+
+// Audio Policy Manager Interface
+class AudioPolicyInterface
+{
+
+public:
+ virtual ~AudioPolicyInterface() {}
+ //
+ // configuration functions
+ //
+
+ // indicate a change in device connection status
+ virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address) = 0;
+ // retreive a device connection status
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address) = 0;
+ // indicate a change in phone state. Valid phones states are defined by AudioSystem::audio_mode
+ virtual void setPhoneState(int state) = 0;
+ // indicate a change in ringer mode
+ virtual void setRingerMode(uint32_t mode, uint32_t mask) = 0;
+ // force using a specific device category for the specified usage
+ virtual void setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config) = 0;
+ // retreive current device category forced for a given usage
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage) = 0;
+ // set a system property (e.g. camera sound always audible)
+ virtual void setSystemProperty(const char* property, const char* value) = 0;
+ // check proper initialization
+ virtual status_t initCheck() = 0;
+
+ //
+ // Audio routing query functions
+ //
+
+ // request an output appriate for playback of the supplied stream type and parameters
+ virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate = 0,
+ uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT) = 0;
+ // indicates to the audio policy manager that the output starts being used by corresponding stream.
+ virtual status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0) = 0;
+ // indicates to the audio policy manager that the output stops being used by corresponding stream.
+ virtual status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0) = 0;
+ // releases the output.
+ virtual void releaseOutput(audio_io_handle_t output) = 0;
+
+ // request an input appriate for record from the supplied device with supplied parameters.
+ virtual audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate = 0,
+ uint32_t Format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0) = 0;
+ // indicates to the audio policy manager that the input starts being used.
+ virtual status_t startInput(audio_io_handle_t input) = 0;
+ // indicates to the audio policy manager that the input stops being used.
+ virtual status_t stopInput(audio_io_handle_t input) = 0;
+ // releases the input.
+ virtual void releaseInput(audio_io_handle_t input) = 0;
+
+ //
+ // volume control functions
+ //
+
+ // initialises stream volume conversion parameters by specifying volume index range.
+ virtual void initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax) = 0;
+
+ // sets the new stream volume at a level corresponding to the supplied index
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index) = 0;
+ // retreive current volume index for the specified stream
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index) = 0;
+
+ // return the strategy corresponding to a given stream type
+ virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream) = 0;
+
+ // return the enabled output devices for the given stream type
+ virtual uint32_t getDevicesForStream(AudioSystem::stream_type stream) = 0;
+
+ // Audio effect management
+ virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0;
+ virtual status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t io,
+ uint32_t strategy,
+ int session,
+ int id) = 0;
+ virtual status_t unregisterEffect(int id) = 0;
+
+ virtual bool isStreamActive(int stream, uint32_t inPastMs = 0) const = 0;
+
+ //dump state
+ virtual status_t dump(int fd) = 0;
+};
+
+
+// Audio Policy client Interface
+class AudioPolicyClientInterface
+{
+public:
+ virtual ~AudioPolicyClientInterface() {}
+
+ //
+ // Audio output Control functions
+ //
+
+ // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
+ // in case the audio policy manager has no specific requirements for the output being opened.
+ // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
+ // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+ virtual audio_io_handle_t openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ AudioSystem::output_flags flags) = 0;
+ // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
+ // a special mixer thread in the AudioFlinger.
+ virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
+ // closes the output stream
+ virtual status_t closeOutput(audio_io_handle_t output) = 0;
+ // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
+ // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+ virtual status_t suspendOutput(audio_io_handle_t output) = 0;
+ // restores a suspended output.
+ virtual status_t restoreOutput(audio_io_handle_t output) = 0;
+
+ //
+ // Audio input Control functions
+ //
+
+ // opens an audio input
+ virtual audio_io_handle_t openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics) = 0;
+ // closes an audio input
+ virtual status_t closeInput(audio_io_handle_t input) = 0;
+ //
+ // misc control functions
+ //
+
+ // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+ // for each output (destination device) it is attached to.
+ virtual status_t setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0;
+
+ // reroute a given stream type to the specified output
+ virtual status_t setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output) = 0;
+
+ // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
+ virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0;
+ // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+ virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
+
+ // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
+ // over a telephony device during a phone call.
+ virtual status_t startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream) = 0;
+ virtual status_t stopTone() = 0;
+
+ // set down link audio volume.
+ virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
+
+ // move effect to the specified output
+ virtual status_t moveEffects(int session,
+ audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput) = 0;
+
+};
+
+extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
+extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface);
+
+
+}; // namespace android
+
+#endif // ANDROID_AUDIOPOLICYINTERFACE_H
diff --git a/audio/AudioPolicyManagerBase.cpp b/audio/AudioPolicyManagerBase.cpp
new file mode 100755
index 0000000..ac31ad4
--- /dev/null
+++ b/audio/AudioPolicyManagerBase.cpp
@@ -0,0 +1,2434 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyManagerBase"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include <hardware_legacy/AudioPolicyManagerBase.h>
+#include <hardware/audio_effect.h>
+#include <math.h>
+
+namespace android_audio_legacy {
+
+// ----------------------------------------------------------------------------
+// AudioPolicyInterface implementation
+// ----------------------------------------------------------------------------
+
+
+status_t AudioPolicyManagerBase::setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address)
+{
+
+ LOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address);
+
+ // connect/disconnect only 1 device at a time
+ if (AudioSystem::popCount(device) != 1) return BAD_VALUE;
+
+ if (strlen(device_address) >= MAX_DEVICE_ADDRESS_LEN) {
+ LOGE("setDeviceConnectionState() invalid address: %s", device_address);
+ return BAD_VALUE;
+ }
+
+ // handle output devices
+ if (AudioSystem::isOutputDevice(device)) {
+
+#ifndef WITH_A2DP
+ if (AudioSystem::isA2dpDevice(device)) {
+ LOGE("setDeviceConnectionState() invalid device: %x", device);
+ return BAD_VALUE;
+ }
+#endif
+
+ switch (state)
+ {
+ // handle output device connection
+ case AudioSystem::DEVICE_STATE_AVAILABLE:
+ if (mAvailableOutputDevices & device) {
+ LOGW("setDeviceConnectionState() device already connected: %x", device);
+ return INVALID_OPERATION;
+ }
+ LOGV("setDeviceConnectionState() connecting device %x", device);
+
+ // register new device as available
+ mAvailableOutputDevices |= device;
+
+#ifdef WITH_A2DP
+ // handle A2DP device connection
+ if (AudioSystem::isA2dpDevice(device)) {
+ status_t status = handleA2dpConnection(device, device_address);
+ if (status != NO_ERROR) {
+ mAvailableOutputDevices &= ~device;
+ return status;
+ }
+ } else
+#endif
+ {
+ if (AudioSystem::isBluetoothScoDevice(device)) {
+ LOGV("setDeviceConnectionState() BT SCO device, address %s", device_address);
+ // keep track of SCO device address
+ mScoDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
+ }
+ }
+ break;
+ // handle output device disconnection
+ case AudioSystem::DEVICE_STATE_UNAVAILABLE: {
+ if (!(mAvailableOutputDevices & device)) {
+ LOGW("setDeviceConnectionState() device not connected: %x", device);
+ return INVALID_OPERATION;
+ }
+
+
+ LOGV("setDeviceConnectionState() disconnecting device %x", device);
+ // remove device from available output devices
+ mAvailableOutputDevices &= ~device;
+
+#ifdef WITH_A2DP
+ // handle A2DP device disconnection
+ if (AudioSystem::isA2dpDevice(device)) {
+ status_t status = handleA2dpDisconnection(device, device_address);
+ if (status != NO_ERROR) {
+ mAvailableOutputDevices |= device;
+ return status;
+ }
+ } else
+#endif
+ {
+ if (AudioSystem::isBluetoothScoDevice(device)) {
+ mScoDeviceAddress = "";
+ }
+ }
+ } break;
+
+ default:
+ LOGE("setDeviceConnectionState() invalid state: %x", state);
+ return BAD_VALUE;
+ }
+
+ // request routing change if necessary
+ uint32_t newDevice = getNewDevice(mHardwareOutput, false);
+#ifdef WITH_A2DP
+ checkA2dpSuspend();
+ checkOutputForAllStrategies();
+ // A2DP outputs must be closed after checkOutputForAllStrategies() is executed
+ if (state == AudioSystem::DEVICE_STATE_UNAVAILABLE && AudioSystem::isA2dpDevice(device)) {
+ closeA2dpOutputs();
+ }
+#endif
+ updateDeviceForStrategy();
+ setOutputDevice(mHardwareOutput, newDevice);
+
+ if (device == AudioSystem::DEVICE_OUT_WIRED_HEADSET) {
+ device = AudioSystem::DEVICE_IN_WIRED_HEADSET;
+ } else if (device == AudioSystem::DEVICE_OUT_BLUETOOTH_SCO ||
+ device == AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET ||
+ device == AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT) {
+ device = AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+ } else {
+ return NO_ERROR;
+ }
+ }
+ // handle input devices
+ if (AudioSystem::isInputDevice(device)) {
+
+ switch (state)
+ {
+ // handle input device connection
+ case AudioSystem::DEVICE_STATE_AVAILABLE: {
+ if (mAvailableInputDevices & device) {
+ LOGW("setDeviceConnectionState() device already connected: %d", device);
+ return INVALID_OPERATION;
+ }
+ mAvailableInputDevices |= device;
+ }
+ break;
+
+ // handle input device disconnection
+ case AudioSystem::DEVICE_STATE_UNAVAILABLE: {
+ if (!(mAvailableInputDevices & device)) {
+ LOGW("setDeviceConnectionState() device not connected: %d", device);
+ return INVALID_OPERATION;
+ }
+ mAvailableInputDevices &= ~device;
+ } break;
+
+ default:
+ LOGE("setDeviceConnectionState() invalid state: %x", state);
+ return BAD_VALUE;
+ }
+
+ audio_io_handle_t activeInput = getActiveInput();
+ if (activeInput != 0) {
+ AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput);
+ uint32_t newDevice = getDeviceForInputSource(inputDesc->mInputSource);
+ if (newDevice != inputDesc->mDevice) {
+ LOGV("setDeviceConnectionState() changing device from %x to %x for input %d",
+ inputDesc->mDevice, newDevice, activeInput);
+ inputDesc->mDevice = newDevice;
+ AudioParameter param = AudioParameter();
+ param.addInt(String8(AudioParameter::keyRouting), (int)newDevice);
+ mpClientInterface->setParameters(activeInput, param.toString());
+ }
+ }
+
+ return NO_ERROR;
+ }
+
+ LOGW("setDeviceConnectionState() invalid device: %x", device);
+ return BAD_VALUE;
+}
+
+AudioSystem::device_connection_state AudioPolicyManagerBase::getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address)
+{
+ AudioSystem::device_connection_state state = AudioSystem::DEVICE_STATE_UNAVAILABLE;
+ String8 address = String8(device_address);
+ if (AudioSystem::isOutputDevice(device)) {
+ if (device & mAvailableOutputDevices) {
+#ifdef WITH_A2DP
+ if (AudioSystem::isA2dpDevice(device) &&
+ address != "" && mA2dpDeviceAddress != address) {
+ return state;
+ }
+#endif
+ if (AudioSystem::isBluetoothScoDevice(device) &&
+ address != "" && mScoDeviceAddress != address) {
+ return state;
+ }
+ state = AudioSystem::DEVICE_STATE_AVAILABLE;
+ }
+ } else if (AudioSystem::isInputDevice(device)) {
+ if (device & mAvailableInputDevices) {
+ state = AudioSystem::DEVICE_STATE_AVAILABLE;
+ }
+ }
+
+ return state;
+}
+
+void AudioPolicyManagerBase::setPhoneState(int state)
+{
+ LOGV("setPhoneState() state %d", state);
+ uint32_t newDevice = 0;
+ if (state < 0 || state >= AudioSystem::NUM_MODES) {
+ LOGW("setPhoneState() invalid state %d", state);
+ return;
+ }
+
+ if (state == mPhoneState ) {
+ LOGW("setPhoneState() setting same state %d", state);
+ return;
+ }
+
+ // if leaving call state, handle special case of active streams
+ // pertaining to sonification strategy see handleIncallSonification()
+ if (isInCall()) {
+ LOGV("setPhoneState() in call state management: new state is %d", state);
+ for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ handleIncallSonification(stream, false, true);
+ }
+ }
+
+ // store previous phone state for management of sonification strategy below
+ int oldState = mPhoneState;
+ mPhoneState = state;
+ bool force = false;
+
+ // are we entering or starting a call
+ if (!isStateInCall(oldState) && isStateInCall(state)) {
+ LOGV(" Entering call in setPhoneState()");
+ // force routing command to audio hardware when starting a call
+ // even if no device change is needed
+ force = true;
+ } else if (isStateInCall(oldState) && !isStateInCall(state)) {
+ LOGV(" Exiting call in setPhoneState()");
+ // force routing command to audio hardware when exiting a call
+ // even if no device change is needed
+ force = true;
+ } else if (isStateInCall(state) && (state != oldState)) {
+ LOGV(" Switching between telephony and VoIP in setPhoneState()");
+ // force routing command to audio hardware when switching between telephony and VoIP
+ // even if no device change is needed
+ force = true;
+ }
+
+ // check for device and output changes triggered by new phone state
+ newDevice = getNewDevice(mHardwareOutput, false);
+#ifdef WITH_A2DP
+ checkA2dpSuspend();
+ checkOutputForAllStrategies();
+#endif
+ updateDeviceForStrategy();
+
+ AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
+
+ // force routing command to audio hardware when ending call
+ // even if no device change is needed
+ if (isStateInCall(oldState) && newDevice == 0) {
+ newDevice = hwOutputDesc->device();
+ }
+
+ // when changing from ring tone to in call mode, mute the ringing tone
+ // immediately and delay the route change to avoid sending the ring tone
+ // tail into the earpiece or headset.
+ int delayMs = 0;
+ if (isStateInCall(state) && oldState == AudioSystem::MODE_RINGTONE) {
+ // delay the device change command by twice the output latency to have some margin
+ // and be sure that audio buffers not yet affected by the mute are out when
+ // we actually apply the route change
+ delayMs = hwOutputDesc->mLatency*2;
+ setStreamMute(AudioSystem::RING, true, mHardwareOutput);
+ }
+
+ // change routing is necessary
+ setOutputDevice(mHardwareOutput, newDevice, force, delayMs);
+
+ // if entering in call state, handle special case of active streams
+ // pertaining to sonification strategy see handleIncallSonification()
+ if (isStateInCall(state)) {
+ LOGV("setPhoneState() in call state management: new state is %d", state);
+ // unmute the ringing tone after a sufficient delay if it was muted before
+ // setting output device above
+ if (oldState == AudioSystem::MODE_RINGTONE) {
+ setStreamMute(AudioSystem::RING, false, mHardwareOutput, MUTE_TIME_MS);
+ }
+ for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ handleIncallSonification(stream, true, true);
+ }
+ }
+
+ // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
+ if (state == AudioSystem::MODE_RINGTONE &&
+ isStreamActive(AudioSystem::MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
+ mLimitRingtoneVolume = true;
+ } else {
+ mLimitRingtoneVolume = false;
+ }
+}
+
+void AudioPolicyManagerBase::setRingerMode(uint32_t mode, uint32_t mask)
+{
+ LOGV("setRingerMode() mode %x, mask %x", mode, mask);
+
+ mRingerMode = mode;
+}
+
+void AudioPolicyManagerBase::setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+{
+ LOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mPhoneState);
+
+ bool forceVolumeReeval = false;
+ switch(usage) {
+ case AudioSystem::FOR_COMMUNICATION:
+ if (config != AudioSystem::FORCE_SPEAKER && config != AudioSystem::FORCE_BT_SCO &&
+ config != AudioSystem::FORCE_NONE) {
+ LOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
+ return;
+ }
+ forceVolumeReeval = true;
+ mForceUse[usage] = config;
+ break;
+ case AudioSystem::FOR_MEDIA:
+ if (config != AudioSystem::FORCE_HEADPHONES && config != AudioSystem::FORCE_BT_A2DP &&
+ config != AudioSystem::FORCE_WIRED_ACCESSORY &&
+ config != AudioSystem::FORCE_ANALOG_DOCK &&
+ config != AudioSystem::FORCE_DIGITAL_DOCK && config != AudioSystem::FORCE_NONE) {
+ LOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
+ return;
+ }
+ mForceUse[usage] = config;
+ break;
+ case AudioSystem::FOR_RECORD:
+ if (config != AudioSystem::FORCE_BT_SCO && config != AudioSystem::FORCE_WIRED_ACCESSORY &&
+ config != AudioSystem::FORCE_NONE) {
+ LOGW("setForceUse() invalid config %d for FOR_RECORD", config);
+ return;
+ }
+ mForceUse[usage] = config;
+ break;
+ case AudioSystem::FOR_DOCK:
+ if (config != AudioSystem::FORCE_NONE && config != AudioSystem::FORCE_BT_CAR_DOCK &&
+ config != AudioSystem::FORCE_BT_DESK_DOCK &&
+ config != AudioSystem::FORCE_WIRED_ACCESSORY &&
+ config != AudioSystem::FORCE_ANALOG_DOCK &&
+ config != AudioSystem::FORCE_DIGITAL_DOCK) {
+ LOGW("setForceUse() invalid config %d for FOR_DOCK", config);
+ }
+ forceVolumeReeval = true;
+ mForceUse[usage] = config;
+ break;
+ default:
+ LOGW("setForceUse() invalid usage %d", usage);
+ break;
+ }
+
+ // check for device and output changes triggered by new phone state
+ uint32_t newDevice = getNewDevice(mHardwareOutput, false);
+#ifdef WITH_A2DP
+ checkA2dpSuspend();
+ checkOutputForAllStrategies();
+#endif
+ updateDeviceForStrategy();
+ setOutputDevice(mHardwareOutput, newDevice);
+ if (forceVolumeReeval) {
+ applyStreamVolumes(mHardwareOutput, newDevice, 0, true);
+ }
+
+ audio_io_handle_t activeInput = getActiveInput();
+ if (activeInput != 0) {
+ AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput);
+ newDevice = getDeviceForInputSource(inputDesc->mInputSource);
+ if (newDevice != inputDesc->mDevice) {
+ LOGV("setForceUse() changing device from %x to %x for input %d",
+ inputDesc->mDevice, newDevice, activeInput);
+ inputDesc->mDevice = newDevice;
+ AudioParameter param = AudioParameter();
+ param.addInt(String8(AudioParameter::keyRouting), (int)newDevice);
+ mpClientInterface->setParameters(activeInput, param.toString());
+ }
+ }
+
+}
+
+AudioSystem::forced_config AudioPolicyManagerBase::getForceUse(AudioSystem::force_use usage)
+{
+ return mForceUse[usage];
+}
+
+void AudioPolicyManagerBase::setSystemProperty(const char* property, const char* value)
+{
+ LOGV("setSystemProperty() property %s, value %s", property, value);
+ if (strcmp(property, "ro.camera.sound.forced") == 0) {
+ if (atoi(value)) {
+ LOGV("ENFORCED_AUDIBLE cannot be muted");
+ mStreams[AudioSystem::ENFORCED_AUDIBLE].mCanBeMuted = false;
+ } else {
+ LOGV("ENFORCED_AUDIBLE can be muted");
+ mStreams[AudioSystem::ENFORCED_AUDIBLE].mCanBeMuted = true;
+ }
+ }
+}
+
+audio_io_handle_t AudioPolicyManagerBase::getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags)
+{
+ audio_io_handle_t output = 0;
+ uint32_t latency = 0;
+ routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
+ uint32_t device = getDeviceForStrategy(strategy);
+ LOGV("getOutput() stream %d, samplingRate %d, format %d, channels %x, flags %x", stream, samplingRate, format, channels, flags);
+
+#ifdef AUDIO_POLICY_TEST
+ if (mCurOutput != 0) {
+ LOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channels %x, mDirectOutput %d",
+ mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
+
+ if (mTestOutputs[mCurOutput] == 0) {
+ LOGV("getOutput() opening test output");
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = mTestDevice;
+ outputDesc->mSamplingRate = mTestSamplingRate;
+ outputDesc->mFormat = mTestFormat;
+ outputDesc->mChannels = mTestChannels;
+ outputDesc->mLatency = mTestLatencyMs;
+ outputDesc->mFlags = (AudioSystem::output_flags)(mDirectOutput ? AudioSystem::OUTPUT_FLAG_DIRECT : 0);
+ outputDesc->mRefCount[stream] = 0;
+ mTestOutputs[mCurOutput] = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+ if (mTestOutputs[mCurOutput]) {
+ AudioParameter outputCmd = AudioParameter();
+ outputCmd.addInt(String8("set_id"),mCurOutput);
+ mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
+ addOutput(mTestOutputs[mCurOutput], outputDesc);
+ }
+ }
+ return mTestOutputs[mCurOutput];
+ }
+#endif //AUDIO_POLICY_TEST
+
+ // open a direct output if required by specified parameters
+ if (needsDirectOuput(stream, samplingRate, format, channels, flags, device)) {
+
+ LOGV("getOutput() opening direct output device %x", device);
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = device;
+ outputDesc->mSamplingRate = samplingRate;
+ outputDesc->mFormat = format;
+ outputDesc->mChannels = channels;
+ outputDesc->mLatency = 0;
+ outputDesc->mFlags = (AudioSystem::output_flags)(flags | AudioSystem::OUTPUT_FLAG_DIRECT);
+ outputDesc->mRefCount[stream] = 0;
+ outputDesc->mStopTime[stream] = 0;
+ output = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+
+ // only accept an output with the requeted parameters
+ if (output == 0 ||
+ (samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) ||
+ (format != 0 && format != outputDesc->mFormat) ||
+ (channels != 0 && channels != outputDesc->mChannels)) {
+ LOGV("getOutput() failed opening direct output: samplingRate %d, format %d, channels %d",
+ samplingRate, format, channels);
+ if (output != 0) {
+ mpClientInterface->closeOutput(output);
+ }
+ delete outputDesc;
+ return 0;
+ }
+ addOutput(output, outputDesc);
+ return output;
+ }
+
+ if (channels != 0 && channels != AudioSystem::CHANNEL_OUT_MONO &&
+ channels != AudioSystem::CHANNEL_OUT_STEREO) {
+ return 0;
+ }
+ // open a non direct output
+
+ // get which output is suitable for the specified stream. The actual routing change will happen
+ // when startOutput() will be called
+ uint32_t a2dpDevice = device & AudioSystem::DEVICE_OUT_ALL_A2DP;
+ if (AudioSystem::popCount((AudioSystem::audio_devices)device) == 2) {
+#ifdef WITH_A2DP
+ if (a2dpUsedForSonification() && a2dpDevice != 0) {
+ // if playing on 2 devices among which one is A2DP, use duplicated output
+ LOGV("getOutput() using duplicated output");
+ LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device in multiple %x selected but A2DP output not opened", device);
+ output = mDuplicatedOutput;
+ } else
+#endif
+ {
+ // if playing on 2 devices among which none is A2DP, use hardware output
+ output = mHardwareOutput;
+ }
+ LOGV("getOutput() using output %d for 2 devices %x", output, device);
+ } else {
+#ifdef WITH_A2DP
+ if (a2dpDevice != 0) {
+ // if playing on A2DP device, use a2dp output
+ LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device %x selected but A2DP output not opened", device);
+ output = mA2dpOutput;
+ } else
+#endif
+ {
+ // if playing on not A2DP device, use hardware output
+ output = mHardwareOutput;
+ }
+ }
+
+
+ LOGW_IF((output ==0), "getOutput() could not find output for stream %d, samplingRate %d, format %d, channels %x, flags %x",
+ stream, samplingRate, format, channels, flags);
+
+ return output;
+}
+
+status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
+{
+ LOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("startOutput() unknow output %d", output);
+ return BAD_VALUE;
+ }
+
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+ routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
+
+#ifdef WITH_A2DP
+ if (mA2dpOutput != 0 && !a2dpUsedForSonification() &&
+ (strategy == STRATEGY_SONIFICATION || strategy == STRATEGY_ENFORCED_AUDIBLE)) {
+ setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);
+ }
+#endif
+
+ // incremenent usage count for this stream on the requested output:
+ // NOTE that the usage count is the same for duplicated output and hardware output which is
+ // necassary for a correct control of hardware output routing by startOutput() and stopOutput()
+ outputDesc->changeRefCount(stream, 1);
+
+ uint32_t prevDevice = outputDesc->mDevice;
+ setOutputDevice(output, getNewDevice(output));
+
+ // handle special case for sonification while in call
+ if (isInCall()) {
+ handleIncallSonification(stream, true, false);
+ }
+
+ // apply volume rules for current stream and device if necessary
+ checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, outputDesc->device());
+
+ // FIXME: need a delay to make sure that audio path switches to speaker before sound
+ // starts. Should be platform specific?
+ if (stream == AudioSystem::ENFORCED_AUDIBLE &&
+ prevDevice != outputDesc->mDevice) {
+ usleep(outputDesc->mLatency*4*1000);
+ }
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
+{
+ LOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("stopOutput() unknow output %d", output);
+ return BAD_VALUE;
+ }
+
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+ routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
+
+ // handle special case for sonification while in call
+ if (isInCall()) {
+ handleIncallSonification(stream, false, false);
+ }
+
+ if (outputDesc->mRefCount[stream] > 0) {
+ // decrement usage count of this stream on the output
+ outputDesc->changeRefCount(stream, -1);
+ // store time at which the stream was stopped - see isStreamActive()
+ outputDesc->mStopTime[stream] = systemTime();
+
+ setOutputDevice(output, getNewDevice(output), false, outputDesc->mLatency*2);
+
+#ifdef WITH_A2DP
+ if (mA2dpOutput != 0 && !a2dpUsedForSonification() &&
+ (strategy == STRATEGY_SONIFICATION || strategy == STRATEGY_ENFORCED_AUDIBLE)) {
+ setStrategyMute(STRATEGY_MEDIA,
+ false,
+ mA2dpOutput,
+ mOutputs.valueFor(mHardwareOutput)->mLatency*2);
+ }
+#endif
+ if (output != mHardwareOutput) {
+ setOutputDevice(mHardwareOutput, getNewDevice(mHardwareOutput), true);
+ }
+ return NO_ERROR;
+ } else {
+ LOGW("stopOutput() refcount is already 0 for output %d", output);
+ return INVALID_OPERATION;
+ }
+}
+
+void AudioPolicyManagerBase::releaseOutput(audio_io_handle_t output)
+{
+ LOGV("releaseOutput() %d", output);
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("releaseOutput() releasing unknown output %d", output);
+ return;
+ }
+
+#ifdef AUDIO_POLICY_TEST
+ int testIndex = testOutputIndex(output);
+ if (testIndex != 0) {
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+ if (outputDesc->refCount() == 0) {
+ mpClientInterface->closeOutput(output);
+ delete mOutputs.valueAt(index);
+ mOutputs.removeItem(output);
+ mTestOutputs[testIndex] = 0;
+ }
+ return;
+ }
+#endif //AUDIO_POLICY_TEST
+
+ if (mOutputs.valueAt(index)->mFlags & AudioSystem::OUTPUT_FLAG_DIRECT) {
+ mpClientInterface->closeOutput(output);
+ delete mOutputs.valueAt(index);
+ mOutputs.removeItem(output);
+ }
+}
+
+audio_io_handle_t AudioPolicyManagerBase::getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics)
+{
+ audio_io_handle_t input = 0;
+ uint32_t device = getDeviceForInputSource(inputSource);
+
+ LOGV("getInput() inputSource %d, samplingRate %d, format %d, channels %x, acoustics %x", inputSource, samplingRate, format, channels, acoustics);
+
+ if (device == 0) {
+ return 0;
+ }
+
+ // adapt channel selection to input source
+ switch(inputSource) {
+ case AUDIO_SOURCE_VOICE_UPLINK:
+ channels = AudioSystem::CHANNEL_IN_VOICE_UPLINK;
+ break;
+ case AUDIO_SOURCE_VOICE_DOWNLINK:
+ channels = AudioSystem::CHANNEL_IN_VOICE_DNLINK;
+ break;
+ case AUDIO_SOURCE_VOICE_CALL:
+ channels = (AudioSystem::CHANNEL_IN_VOICE_UPLINK | AudioSystem::CHANNEL_IN_VOICE_DNLINK);
+ break;
+ default:
+ break;
+ }
+
+ AudioInputDescriptor *inputDesc = new AudioInputDescriptor();
+
+ inputDesc->mInputSource = inputSource;
+ inputDesc->mDevice = device;
+ inputDesc->mSamplingRate = samplingRate;
+ inputDesc->mFormat = format;
+ inputDesc->mChannels = channels;
+ inputDesc->mAcoustics = acoustics;
+ inputDesc->mRefCount = 0;
+ input = mpClientInterface->openInput(&inputDesc->mDevice,
+ &inputDesc->mSamplingRate,
+ &inputDesc->mFormat,
+ &inputDesc->mChannels,
+ inputDesc->mAcoustics);
+
+ // only accept input with the exact requested set of parameters
+ if (input == 0 ||
+ (samplingRate != inputDesc->mSamplingRate) ||
+ (format != inputDesc->mFormat) ||
+ (channels != inputDesc->mChannels)) {
+ LOGV("getInput() failed opening input: samplingRate %d, format %d, channels %d",
+ samplingRate, format, channels);
+ if (input != 0) {
+ mpClientInterface->closeInput(input);
+ }
+ delete inputDesc;
+ return 0;
+ }
+ mInputs.add(input, inputDesc);
+ return input;
+}
+
+status_t AudioPolicyManagerBase::startInput(audio_io_handle_t input)
+{
+ LOGV("startInput() input %d", input);
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ LOGW("startInput() unknow input %d", input);
+ return BAD_VALUE;
+ }
+ AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
+
+#ifdef AUDIO_POLICY_TEST
+ if (mTestInput == 0)
+#endif //AUDIO_POLICY_TEST
+ {
+ // refuse 2 active AudioRecord clients at the same time
+ if (getActiveInput() != 0) {
+ LOGW("startInput() input %d failed: other input already started", input);
+ return INVALID_OPERATION;
+ }
+ }
+
+ AudioParameter param = AudioParameter();
+ param.addInt(String8(AudioParameter::keyRouting), (int)inputDesc->mDevice);
+
+ param.addInt(String8(AudioParameter::keyInputSource), (int)inputDesc->mInputSource);
+ LOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
+
+ mpClientInterface->setParameters(input, param.toString());
+
+ inputDesc->mRefCount = 1;
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerBase::stopInput(audio_io_handle_t input)
+{
+ LOGV("stopInput() input %d", input);
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ LOGW("stopInput() unknow input %d", input);
+ return BAD_VALUE;
+ }
+ AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
+
+ if (inputDesc->mRefCount == 0) {
+ LOGW("stopInput() input %d already stopped", input);
+ return INVALID_OPERATION;
+ } else {
+ AudioParameter param = AudioParameter();
+ param.addInt(String8(AudioParameter::keyRouting), 0);
+ mpClientInterface->setParameters(input, param.toString());
+ inputDesc->mRefCount = 0;
+ return NO_ERROR;
+ }
+}
+
+void AudioPolicyManagerBase::releaseInput(audio_io_handle_t input)
+{
+ LOGV("releaseInput() %d", input);
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ LOGW("releaseInput() releasing unknown input %d", input);
+ return;
+ }
+ mpClientInterface->closeInput(input);
+ delete mInputs.valueAt(index);
+ mInputs.removeItem(input);
+ LOGV("releaseInput() exit");
+}
+
+void AudioPolicyManagerBase::initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax)
+{
+ LOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+ if (indexMin < 0 || indexMin >= indexMax) {
+ LOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d", stream , indexMin, indexMax);
+ return;
+ }
+ mStreams[stream].mIndexMin = indexMin;
+ mStreams[stream].mIndexMax = indexMax;
+}
+
+status_t AudioPolicyManagerBase::setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+{
+
+ if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) {
+ return BAD_VALUE;
+ }
+
+ // Force max volume if stream cannot be muted
+ if (!mStreams[stream].mCanBeMuted) index = mStreams[stream].mIndexMax;
+
+ LOGV("setStreamVolumeIndex() stream %d, index %d", stream, index);
+ mStreams[stream].mIndexCur = index;
+
+ // compute and apply stream volume on all outputs according to connected device
+ status_t status = NO_ERROR;
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), mOutputs.valueAt(i)->device());
+ if (volStatus != NO_ERROR) {
+ status = volStatus;
+ }
+ }
+ return status;
+}
+
+status_t AudioPolicyManagerBase::getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+{
+ if (index == 0) {
+ return BAD_VALUE;
+ }
+ LOGV("getStreamVolumeIndex() stream %d", stream);
+ *index = mStreams[stream].mIndexCur;
+ return NO_ERROR;
+}
+
+audio_io_handle_t AudioPolicyManagerBase::getOutputForEffect(effect_descriptor_t *desc)
+{
+ LOGV("getOutputForEffect()");
+ // apply simple rule where global effects are attached to the same output as MUSIC streams
+ return getOutput(AudioSystem::MUSIC);
+}
+
+status_t AudioPolicyManagerBase::registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t io,
+ uint32_t strategy,
+ int session,
+ int id)
+{
+ ssize_t index = mOutputs.indexOfKey(io);
+ if (index < 0) {
+ index = mInputs.indexOfKey(io);
+ if (index < 0) {
+ LOGW("registerEffect() unknown io %d", io);
+ return INVALID_OPERATION;
+ }
+ }
+
+ if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
+ LOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
+ desc->name, desc->memoryUsage);
+ return INVALID_OPERATION;
+ }
+ mTotalEffectsMemory += desc->memoryUsage;
+ LOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
+ desc->name, io, strategy, session, id);
+ LOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
+
+ EffectDescriptor *pDesc = new EffectDescriptor();
+ memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t));
+ pDesc->mIo = io;
+ pDesc->mStrategy = (routing_strategy)strategy;
+ pDesc->mSession = session;
+ pDesc->mEnabled = false;
+
+ mEffects.add(id, pDesc);
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerBase::unregisterEffect(int id)
+{
+ ssize_t index = mEffects.indexOfKey(id);
+ if (index < 0) {
+ LOGW("unregisterEffect() unknown effect ID %d", id);
+ return INVALID_OPERATION;
+ }
+
+ EffectDescriptor *pDesc = mEffects.valueAt(index);
+
+ setEffectEnabled(pDesc, false);
+
+ if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) {
+ LOGW("unregisterEffect() memory %d too big for total %d",
+ pDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+ pDesc->mDesc.memoryUsage = mTotalEffectsMemory;
+ }
+ mTotalEffectsMemory -= pDesc->mDesc.memoryUsage;
+ LOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d",
+ pDesc->mDesc.name, id, pDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+
+ mEffects.removeItem(id);
+ delete pDesc;
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerBase::setEffectEnabled(int id, bool enabled)
+{
+ ssize_t index = mEffects.indexOfKey(id);
+ if (index < 0) {
+ LOGW("unregisterEffect() unknown effect ID %d", id);
+ return INVALID_OPERATION;
+ }
+
+ return setEffectEnabled(mEffects.valueAt(index), enabled);
+}
+
+status_t AudioPolicyManagerBase::setEffectEnabled(EffectDescriptor *pDesc, bool enabled)
+{
+ if (enabled == pDesc->mEnabled) {
+ LOGV("setEffectEnabled(%s) effect already %s",
+ enabled?"true":"false", enabled?"enabled":"disabled");
+ return INVALID_OPERATION;
+ }
+
+ if (enabled) {
+ if (mTotalEffectsCpuLoad + pDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) {
+ LOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS",
+ pDesc->mDesc.name, (float)pDesc->mDesc.cpuLoad/10);
+ return INVALID_OPERATION;
+ }
+ mTotalEffectsCpuLoad += pDesc->mDesc.cpuLoad;
+ LOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad);
+ } else {
+ if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) {
+ LOGW("setEffectEnabled(false) CPU load %d too high for total %d",
+ pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
+ pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
+ }
+ mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad;
+ LOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad);
+ }
+ pDesc->mEnabled = enabled;
+ return NO_ERROR;
+}
+
+bool AudioPolicyManagerBase::isStreamActive(int stream, uint32_t inPastMs) const
+{
+ nsecs_t sysTime = systemTime();
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ if (mOutputs.valueAt(i)->mRefCount[stream] != 0 ||
+ ns2ms(sysTime - mOutputs.valueAt(i)->mStopTime[stream]) < inPastMs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+status_t AudioPolicyManagerBase::dump(int fd)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Hardware Output: %d\n", mHardwareOutput);
+ result.append(buffer);
+#ifdef WITH_A2DP
+ snprintf(buffer, SIZE, " A2DP Output: %d\n", mA2dpOutput);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Duplicated Output: %d\n", mDuplicatedOutput);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " A2DP device address: %s\n", mA2dpDeviceAddress.string());
+ result.append(buffer);
+#endif
+ snprintf(buffer, SIZE, " SCO device address: %s\n", mScoDeviceAddress.string());
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Output devices: %08x\n", mAvailableOutputDevices);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Input devices: %08x\n", mAvailableInputDevices);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Ringer mode: %d\n", mRingerMode);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Force use for communications %d\n", mForceUse[AudioSystem::FOR_COMMUNICATION]);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Force use for media %d\n", mForceUse[AudioSystem::FOR_MEDIA]);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Force use for record %d\n", mForceUse[AudioSystem::FOR_RECORD]);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Force use for dock %d\n", mForceUse[AudioSystem::FOR_DOCK]);
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ snprintf(buffer, SIZE, "\nOutputs dump:\n");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ snprintf(buffer, SIZE, "- Output %d dump:\n", mOutputs.keyAt(i));
+ write(fd, buffer, strlen(buffer));
+ mOutputs.valueAt(i)->dump(fd);
+ }
+
+ snprintf(buffer, SIZE, "\nInputs dump:\n");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ snprintf(buffer, SIZE, "- Input %d dump:\n", mInputs.keyAt(i));
+ write(fd, buffer, strlen(buffer));
+ mInputs.valueAt(i)->dump(fd);
+ }
+
+ snprintf(buffer, SIZE, "\nStreams dump:\n");
+ write(fd, buffer, strlen(buffer));
+ snprintf(buffer, SIZE, " Stream Index Min Index Max Index Cur Can be muted\n");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
+ snprintf(buffer, SIZE, " %02d", i);
+ mStreams[i].dump(buffer + 3, SIZE);
+ write(fd, buffer, strlen(buffer));
+ }
+
+ snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
+ (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
+ write(fd, buffer, strlen(buffer));
+
+ snprintf(buffer, SIZE, "Registered effects:\n");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i));
+ write(fd, buffer, strlen(buffer));
+ mEffects.valueAt(i)->dump(fd);
+ }
+
+
+ return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManagerBase
+// ----------------------------------------------------------------------------
+
+AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface *clientInterface)
+ :
+#ifdef AUDIO_POLICY_TEST
+ Thread(false),
+#endif //AUDIO_POLICY_TEST
+ mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0),
+ mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
+ mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
+ mA2dpSuspended(false)
+{
+ mpClientInterface = clientInterface;
+
+ for (int i = 0; i < AudioSystem::NUM_FORCE_USE; i++) {
+ mForceUse[i] = AudioSystem::FORCE_NONE;
+ }
+
+ initializeVolumeCurves();
+
+ // devices available by default are speaker, ear piece and microphone
+ mAvailableOutputDevices = AudioSystem::DEVICE_OUT_EARPIECE |
+ AudioSystem::DEVICE_OUT_SPEAKER;
+ mAvailableInputDevices = AudioSystem::DEVICE_IN_BUILTIN_MIC;
+
+#ifdef WITH_A2DP
+ mA2dpOutput = 0;
+ mDuplicatedOutput = 0;
+ mA2dpDeviceAddress = String8("");
+#endif
+ mScoDeviceAddress = String8("");
+
+ // open hardware output
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER;
+ mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+
+ if (mHardwareOutput == 0) {
+ LOGE("Failed to initialize hardware output stream, samplingRate: %d, format %d, channels %d",
+ outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannels);
+ } else {
+ addOutput(mHardwareOutput, outputDesc);
+ setOutputDevice(mHardwareOutput, (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER, true);
+ //TODO: configure audio effect output stage here
+ }
+
+ updateDeviceForStrategy();
+#ifdef AUDIO_POLICY_TEST
+ if (mHardwareOutput != 0) {
+ AudioParameter outputCmd = AudioParameter();
+ outputCmd.addInt(String8("set_id"), 0);
+ mpClientInterface->setParameters(mHardwareOutput, outputCmd.toString());
+
+ mTestDevice = AudioSystem::DEVICE_OUT_SPEAKER;
+ mTestSamplingRate = 44100;
+ mTestFormat = AudioSystem::PCM_16_BIT;
+ mTestChannels = AudioSystem::CHANNEL_OUT_STEREO;
+ mTestLatencyMs = 0;
+ mCurOutput = 0;
+ mDirectOutput = false;
+ for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+ mTestOutputs[i] = 0;
+ }
+
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ snprintf(buffer, SIZE, "AudioPolicyManagerTest");
+ run(buffer, ANDROID_PRIORITY_AUDIO);
+ }
+#endif //AUDIO_POLICY_TEST
+}
+
+AudioPolicyManagerBase::~AudioPolicyManagerBase()
+{
+#ifdef AUDIO_POLICY_TEST
+ exit();
+#endif //AUDIO_POLICY_TEST
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ mpClientInterface->closeOutput(mOutputs.keyAt(i));
+ delete mOutputs.valueAt(i);
+ }
+ mOutputs.clear();
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ mpClientInterface->closeInput(mInputs.keyAt(i));
+ delete mInputs.valueAt(i);
+ }
+ mInputs.clear();
+}
+
+status_t AudioPolicyManagerBase::initCheck()
+{
+ return (mHardwareOutput == 0) ? NO_INIT : NO_ERROR;
+}
+
+#ifdef AUDIO_POLICY_TEST
+bool AudioPolicyManagerBase::threadLoop()
+{
+ LOGV("entering threadLoop()");
+ while (!exitPending())
+ {
+ String8 command;
+ int valueInt;
+ String8 value;
+
+ Mutex::Autolock _l(mLock);
+ mWaitWorkCV.waitRelative(mLock, milliseconds(50));
+
+ command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
+ AudioParameter param = AudioParameter(command);
+
+ if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR &&
+ valueInt != 0) {
+ LOGV("Test command %s received", command.string());
+ String8 target;
+ if (param.get(String8("target"), target) != NO_ERROR) {
+ target = "Manager";
+ }
+ if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_output"));
+ mCurOutput = valueInt;
+ }
+ if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_direct"));
+ if (value == "false") {
+ mDirectOutput = false;
+ } else if (value == "true") {
+ mDirectOutput = true;
+ }
+ }
+ if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_input"));
+ mTestInput = valueInt;
+ }
+
+ if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_format"));
+ int format = AudioSystem::INVALID_FORMAT;
+ if (value == "PCM 16 bits") {
+ format = AudioSystem::PCM_16_BIT;
+ } else if (value == "PCM 8 bits") {
+ format = AudioSystem::PCM_8_BIT;
+ } else if (value == "Compressed MP3") {
+ format = AudioSystem::MP3;
+ }
+ if (format != AudioSystem::INVALID_FORMAT) {
+ if (target == "Manager") {
+ mTestFormat = format;
+ } else if (mTestOutputs[mCurOutput] != 0) {
+ AudioParameter outputParam = AudioParameter();
+ outputParam.addInt(String8("format"), format);
+ mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+ }
+ }
+ }
+ if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_channels"));
+ int channels = 0;
+
+ if (value == "Channels Stereo") {
+ channels = AudioSystem::CHANNEL_OUT_STEREO;
+ } else if (value == "Channels Mono") {
+ channels = AudioSystem::CHANNEL_OUT_MONO;
+ }
+ if (channels != 0) {
+ if (target == "Manager") {
+ mTestChannels = channels;
+ } else if (mTestOutputs[mCurOutput] != 0) {
+ AudioParameter outputParam = AudioParameter();
+ outputParam.addInt(String8("channels"), channels);
+ mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+ }
+ }
+ }
+ if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_sampleRate"));
+ if (valueInt >= 0 && valueInt <= 96000) {
+ int samplingRate = valueInt;
+ if (target == "Manager") {
+ mTestSamplingRate = samplingRate;
+ } else if (mTestOutputs[mCurOutput] != 0) {
+ AudioParameter outputParam = AudioParameter();
+ outputParam.addInt(String8("sampling_rate"), samplingRate);
+ mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+ }
+ }
+ }
+
+ if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_reopen"));
+
+ mpClientInterface->closeOutput(mHardwareOutput);
+ delete mOutputs.valueFor(mHardwareOutput);
+ mOutputs.removeItem(mHardwareOutput);
+
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER;
+ mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+ if (mHardwareOutput == 0) {
+ LOGE("Failed to reopen hardware output stream, samplingRate: %d, format %d, channels %d",
+ outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannels);
+ } else {
+ AudioParameter outputCmd = AudioParameter();
+ outputCmd.addInt(String8("set_id"), 0);
+ mpClientInterface->setParameters(mHardwareOutput, outputCmd.toString());
+ addOutput(mHardwareOutput, outputDesc);
+ }
+ }
+
+
+ mpClientInterface->setParameters(0, String8("test_cmd_policy="));
+ }
+ }
+ return false;
+}
+
+void AudioPolicyManagerBase::exit()
+{
+ {
+ AutoMutex _l(mLock);
+ requestExit();
+ mWaitWorkCV.signal();
+ }
+ requestExitAndWait();
+}
+
+int AudioPolicyManagerBase::testOutputIndex(audio_io_handle_t output)
+{
+ for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+ if (output == mTestOutputs[i]) return i;
+ }
+ return 0;
+}
+#endif //AUDIO_POLICY_TEST
+
+// ---
+
+void AudioPolicyManagerBase::addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc)
+{
+ outputDesc->mId = id;
+ mOutputs.add(id, outputDesc);
+}
+
+
+#ifdef WITH_A2DP
+status_t AudioPolicyManagerBase::handleA2dpConnection(AudioSystem::audio_devices device,
+ const char *device_address)
+{
+ // when an A2DP device is connected, open an A2DP and a duplicated output
+ LOGV("opening A2DP output for device %s", device_address);
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = device;
+ mA2dpOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+ if (mA2dpOutput) {
+ // add A2DP output descriptor
+ addOutput(mA2dpOutput, outputDesc);
+
+ //TODO: configure audio effect output stage here
+
+ // set initial stream volume for A2DP device
+ applyStreamVolumes(mA2dpOutput, device);
+ if (a2dpUsedForSonification()) {
+ mDuplicatedOutput = mpClientInterface->openDuplicateOutput(mA2dpOutput, mHardwareOutput);
+ }
+ if (mDuplicatedOutput != 0 ||
+ !a2dpUsedForSonification()) {
+ // If both A2DP and duplicated outputs are open, send device address to A2DP hardware
+ // interface
+ AudioParameter param;
+ param.add(String8("a2dp_sink_address"), String8(device_address));
+ mpClientInterface->setParameters(mA2dpOutput, param.toString());
+ mA2dpDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
+
+ if (a2dpUsedForSonification()) {
+ // add duplicated output descriptor
+ AudioOutputDescriptor *dupOutputDesc = new AudioOutputDescriptor();
+ dupOutputDesc->mOutput1 = mOutputs.valueFor(mHardwareOutput);
+ dupOutputDesc->mOutput2 = mOutputs.valueFor(mA2dpOutput);
+ dupOutputDesc->mSamplingRate = outputDesc->mSamplingRate;
+ dupOutputDesc->mFormat = outputDesc->mFormat;
+ dupOutputDesc->mChannels = outputDesc->mChannels;
+ dupOutputDesc->mLatency = outputDesc->mLatency;
+ addOutput(mDuplicatedOutput, dupOutputDesc);
+ applyStreamVolumes(mDuplicatedOutput, device);
+ }
+ } else {
+ LOGW("getOutput() could not open duplicated output for %d and %d",
+ mHardwareOutput, mA2dpOutput);
+ mpClientInterface->closeOutput(mA2dpOutput);
+ mOutputs.removeItem(mA2dpOutput);
+ mA2dpOutput = 0;
+ delete outputDesc;
+ return NO_INIT;
+ }
+ } else {
+ LOGW("setDeviceConnectionState() could not open A2DP output for device %x", device);
+ delete outputDesc;
+ return NO_INIT;
+ }
+ AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
+
+ if (!a2dpUsedForSonification()) {
+ // mute music on A2DP output if a notification or ringtone is playing
+ uint32_t refCount = hwOutputDesc->strategyRefCount(STRATEGY_SONIFICATION);
+ refCount += hwOutputDesc->strategyRefCount(STRATEGY_ENFORCED_AUDIBLE);
+ for (uint32_t i = 0; i < refCount; i++) {
+ setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);
+ }
+ }
+
+ mA2dpSuspended = false;
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerBase::handleA2dpDisconnection(AudioSystem::audio_devices device,
+ const char *device_address)
+{
+ if (mA2dpOutput == 0) {
+ LOGW("setDeviceConnectionState() disconnecting A2DP and no A2DP output!");
+ return INVALID_OPERATION;
+ }
+
+ if (mA2dpDeviceAddress != device_address) {
+ LOGW("setDeviceConnectionState() disconnecting unknow A2DP sink address %s", device_address);
+ return INVALID_OPERATION;
+ }
+
+ // mute media strategy to avoid outputting sound on hardware output while music stream
+ // is switched from A2DP output and before music is paused by music application
+ setStrategyMute(STRATEGY_MEDIA, true, mHardwareOutput);
+ setStrategyMute(STRATEGY_MEDIA, false, mHardwareOutput, MUTE_TIME_MS);
+
+ if (!a2dpUsedForSonification()) {
+ // unmute music on A2DP output if a notification or ringtone is playing
+ uint32_t refCount = mOutputs.valueFor(mHardwareOutput)->strategyRefCount(STRATEGY_SONIFICATION);
+ refCount += mOutputs.valueFor(mHardwareOutput)->strategyRefCount(STRATEGY_ENFORCED_AUDIBLE);
+ for (uint32_t i = 0; i < refCount; i++) {
+ setStrategyMute(STRATEGY_MEDIA, false, mA2dpOutput);
+ }
+ }
+ mA2dpDeviceAddress = "";
+ mA2dpSuspended = false;
+ return NO_ERROR;
+}
+
+void AudioPolicyManagerBase::closeA2dpOutputs()
+{
+
+ LOGV("setDeviceConnectionState() closing A2DP and duplicated output!");
+
+ if (mDuplicatedOutput != 0) {
+ AudioOutputDescriptor *dupOutputDesc = mOutputs.valueFor(mDuplicatedOutput);
+ AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
+ // As all active tracks on duplicated output will be deleted,
+ // and as they were also referenced on hardware output, the reference
+ // count for their stream type must be adjusted accordingly on
+ // hardware output.
+ for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
+ int refCount = dupOutputDesc->mRefCount[i];
+ hwOutputDesc->changeRefCount((AudioSystem::stream_type)i,-refCount);
+ }
+
+ mpClientInterface->closeOutput(mDuplicatedOutput);
+ delete mOutputs.valueFor(mDuplicatedOutput);
+ mOutputs.removeItem(mDuplicatedOutput);
+ mDuplicatedOutput = 0;
+ }
+ if (mA2dpOutput != 0) {
+ AudioParameter param;
+ param.add(String8("closing"), String8("true"));
+ mpClientInterface->setParameters(mA2dpOutput, param.toString());
+
+ mpClientInterface->closeOutput(mA2dpOutput);
+ delete mOutputs.valueFor(mA2dpOutput);
+ mOutputs.removeItem(mA2dpOutput);
+ mA2dpOutput = 0;
+ }
+}
+
+void AudioPolicyManagerBase::checkOutputForStrategy(routing_strategy strategy)
+{
+ uint32_t prevDevice = getDeviceForStrategy(strategy);
+ uint32_t curDevice = getDeviceForStrategy(strategy, false);
+ bool a2dpWasUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(prevDevice & ~AudioSystem::DEVICE_OUT_SPEAKER));
+ bool a2dpIsUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(curDevice & ~AudioSystem::DEVICE_OUT_SPEAKER));
+ audio_io_handle_t srcOutput = 0;
+ audio_io_handle_t dstOutput = 0;
+
+ if (a2dpWasUsed && !a2dpIsUsed) {
+ bool dupUsed = a2dpUsedForSonification() && a2dpWasUsed && (AudioSystem::popCount(prevDevice) == 2);
+ dstOutput = mHardwareOutput;
+ if (dupUsed) {
+ LOGV("checkOutputForStrategy() moving strategy %d from duplicated", strategy);
+ srcOutput = mDuplicatedOutput;
+ } else {
+ LOGV("checkOutputForStrategy() moving strategy %d from a2dp", strategy);
+ srcOutput = mA2dpOutput;
+ }
+ }
+ if (a2dpIsUsed && !a2dpWasUsed) {
+ bool dupUsed = a2dpUsedForSonification() && a2dpIsUsed && (AudioSystem::popCount(curDevice) == 2);
+ srcOutput = mHardwareOutput;
+ if (dupUsed) {
+ LOGV("checkOutputForStrategy() moving strategy %d to duplicated", strategy);
+ dstOutput = mDuplicatedOutput;
+ } else {
+ LOGV("checkOutputForStrategy() moving strategy %d to a2dp", strategy);
+ dstOutput = mA2dpOutput;
+ }
+ }
+
+ if (srcOutput != 0 && dstOutput != 0) {
+ // Move effects associated to this strategy from previous output to new output
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ EffectDescriptor *desc = mEffects.valueAt(i);
+ if (desc->mSession != AudioSystem::SESSION_OUTPUT_STAGE &&
+ desc->mStrategy == strategy &&
+ desc->mIo == srcOutput) {
+ LOGV("checkOutputForStrategy() moving effect %d to output %d", mEffects.keyAt(i), dstOutput);
+ mpClientInterface->moveEffects(desc->mSession, srcOutput, dstOutput);
+ desc->mIo = dstOutput;
+ }
+ }
+ // Move tracks associated to this strategy from previous output to new output
+ for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
+ if (getStrategy((AudioSystem::stream_type)i) == strategy) {
+ mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, dstOutput);
+ }
+ }
+ }
+}
+
+void AudioPolicyManagerBase::checkOutputForAllStrategies()
+{
+ checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
+ checkOutputForStrategy(STRATEGY_PHONE);
+ checkOutputForStrategy(STRATEGY_SONIFICATION);
+ checkOutputForStrategy(STRATEGY_MEDIA);
+ checkOutputForStrategy(STRATEGY_DTMF);
+}
+
+void AudioPolicyManagerBase::checkA2dpSuspend()
+{
+ // suspend A2DP output if:
+ // (NOT already suspended) &&
+ // ((SCO device is connected &&
+ // (forced usage for communication || for record is SCO))) ||
+ // (phone state is ringing || in call)
+ //
+ // restore A2DP output if:
+ // (Already suspended) &&
+ // ((SCO device is NOT connected ||
+ // (forced usage NOT for communication && NOT for record is SCO))) &&
+ // (phone state is NOT ringing && NOT in call)
+ //
+ if (mA2dpOutput == 0) {
+ return;
+ }
+
+ if (mA2dpSuspended) {
+ if (((mScoDeviceAddress == "") ||
+ ((mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO) &&
+ (mForceUse[AudioSystem::FOR_RECORD] != AudioSystem::FORCE_BT_SCO))) &&
+ ((mPhoneState != AudioSystem::MODE_IN_CALL) &&
+ (mPhoneState != AudioSystem::MODE_RINGTONE))) {
+
+ mpClientInterface->restoreOutput(mA2dpOutput);
+ mA2dpSuspended = false;
+ }
+ } else {
+ if (((mScoDeviceAddress != "") &&
+ ((mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
+ (mForceUse[AudioSystem::FOR_RECORD] == AudioSystem::FORCE_BT_SCO))) ||
+ ((mPhoneState == AudioSystem::MODE_IN_CALL) ||
+ (mPhoneState == AudioSystem::MODE_RINGTONE))) {
+
+ mpClientInterface->suspendOutput(mA2dpOutput);
+ mA2dpSuspended = true;
+ }
+ }
+}
+
+
+#endif
+
+uint32_t AudioPolicyManagerBase::getNewDevice(audio_io_handle_t output, bool fromCache)
+{
+ uint32_t device = 0;
+
+ AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+ // check the following by order of priority to request a routing change if necessary:
+ // 1: the strategy enforced audible is active on the output:
+ // use device for strategy enforced audible
+ // 2: we are in call or the strategy phone is active on the output:
+ // use device for strategy phone
+ // 3: the strategy sonification is active on the output:
+ // use device for strategy sonification
+ // 4: the strategy media is active on the output:
+ // use device for strategy media
+ // 5: the strategy DTMF is active on the output:
+ // use device for strategy DTMF
+ if (outputDesc->isUsedByStrategy(STRATEGY_ENFORCED_AUDIBLE)) {
+ device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
+ } else if (isInCall() ||
+ outputDesc->isUsedByStrategy(STRATEGY_PHONE)) {
+ device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
+ } else if (outputDesc->isUsedByStrategy(STRATEGY_SONIFICATION)) {
+ device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
+ } else if (outputDesc->isUsedByStrategy(STRATEGY_MEDIA)) {
+ device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
+ } else if (outputDesc->isUsedByStrategy(STRATEGY_DTMF)) {
+ device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+ }
+
+ LOGV("getNewDevice() selected device %x", device);
+ return device;
+}
+
+uint32_t AudioPolicyManagerBase::getStrategyForStream(AudioSystem::stream_type stream) {
+ return (uint32_t)getStrategy(stream);
+}
+
+uint32_t AudioPolicyManagerBase::getDevicesForStream(AudioSystem::stream_type stream) {
+ uint32_t devices;
+ // By checking the range of stream before calling getStrategy, we avoid
+ // getStrategy's behavior for invalid streams. getStrategy would do a LOGE
+ // and then return STRATEGY_MEDIA, but we want to return the empty set.
+ if (stream < (AudioSystem::stream_type) 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ devices = 0;
+ } else {
+ AudioPolicyManagerBase::routing_strategy strategy = getStrategy(stream);
+ devices = getDeviceForStrategy(strategy, true);
+ }
+ return devices;
+}
+
+AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy(
+ AudioSystem::stream_type stream) {
+ // stream to strategy mapping
+ switch (stream) {
+ case AudioSystem::VOICE_CALL:
+ case AudioSystem::BLUETOOTH_SCO:
+ return STRATEGY_PHONE;
+ case AudioSystem::RING:
+ case AudioSystem::NOTIFICATION:
+ case AudioSystem::ALARM:
+ return STRATEGY_SONIFICATION;
+ case AudioSystem::DTMF:
+ return STRATEGY_DTMF;
+ default:
+ LOGE("unknown stream type");
+ case AudioSystem::SYSTEM:
+ // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
+ // while key clicks are played produces a poor result
+ case AudioSystem::TTS:
+ case AudioSystem::MUSIC:
+ return STRATEGY_MEDIA;
+ case AudioSystem::ENFORCED_AUDIBLE:
+ return STRATEGY_ENFORCED_AUDIBLE;
+ }
+}
+
+uint32_t AudioPolicyManagerBase::getDeviceForStrategy(routing_strategy strategy, bool fromCache)
+{
+ uint32_t device = 0;
+
+ if (fromCache) {
+ LOGV("getDeviceForStrategy() from cache strategy %d, device %x", strategy, mDeviceForStrategy[strategy]);
+ return mDeviceForStrategy[strategy];
+ }
+
+ switch (strategy) {
+ case STRATEGY_DTMF:
+ if (!isInCall()) {
+ // when off call, DTMF strategy follows the same rules as MEDIA strategy
+ device = getDeviceForStrategy(STRATEGY_MEDIA, false);
+ break;
+ }
+ // when in call, DTMF and PHONE strategies follow the same rules
+ // FALL THROUGH
+
+ case STRATEGY_PHONE:
+ // for phone strategy, we first consider the forced use and then the available devices by order
+ // of priority
+ switch (mForceUse[AudioSystem::FOR_COMMUNICATION]) {
+ case AudioSystem::FORCE_BT_SCO:
+ if (!isInCall() || strategy != STRATEGY_DTMF) {
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+ if (device) break;
+ }
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO;
+ if (device) break;
+ // if SCO device is requested but no SCO device is available, fall back to default case
+ // FALL THROUGH
+
+ default: // FORCE_NONE
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADPHONE;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADSET;
+ if (device) break;
+#ifdef WITH_A2DP
+ // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
+ if (!isInCall() && !mA2dpSuspended) {
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+ if (device) break;
+ }
+#endif
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_EARPIECE;
+ if (device == 0) {
+ LOGE("getDeviceForStrategy() earpiece device not found");
+ }
+ break;
+
+ case AudioSystem::FORCE_SPEAKER:
+#ifdef WITH_A2DP
+ // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
+ // A2DP speaker when forcing to speaker output
+ if (!isInCall() && !mA2dpSuspended) {
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+ if (device) break;
+ }
+#endif
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
+ if (device) break;
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
+ if (device == 0) {
+ LOGE("getDeviceForStrategy() speaker device not found");
+ }
+ break;
+ }
+ break;
+
+ case STRATEGY_SONIFICATION:
+
+ // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
+ // handleIncallSonification().
+ if (isInCall()) {
+ device = getDeviceForStrategy(STRATEGY_PHONE, false);
+ break;
+ }
+ // FALL THROUGH
+
+ case STRATEGY_ENFORCED_AUDIBLE:
+ // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
+ // except when in call where it doesn't default to STRATEGY_PHONE behavior
+
+ device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
+ if (device == 0) {
+ LOGE("getDeviceForStrategy() speaker device not found");
+ }
+ // The second device used for sonification is the same as the device used by media strategy
+ // FALL THROUGH
+
+ case STRATEGY_MEDIA: {
+ uint32_t device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADPHONE;
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADSET;
+ }
+#ifdef WITH_A2DP
+ if ((mA2dpOutput != 0) && !mA2dpSuspended &&
+ (strategy == STRATEGY_MEDIA || a2dpUsedForSonification())) {
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP;
+ }
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+ }
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+ }
+ }
+#endif
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
+ }
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
+ }
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
+ }
+ if (device2 == 0) {
+ device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
+ }
+
+ // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
+ // STRATEGY_ENFORCED_AUDIBLE, 0 otherwise
+ device |= device2;
+ if (device == 0) {
+ LOGE("getDeviceForStrategy() speaker device not found");
+ }
+ } break;
+
+ default:
+ LOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
+ break;
+ }
+
+ LOGV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
+ return device;
+}
+
+void AudioPolicyManagerBase::updateDeviceForStrategy()
+{
+ for (int i = 0; i < NUM_STRATEGIES; i++) {
+ mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false);
+ }
+}
+
+void AudioPolicyManagerBase::setOutputDevice(audio_io_handle_t output, uint32_t device, bool force, int delayMs)
+{
+ LOGV("setOutputDevice() output %d device %x delayMs %d", output, device, delayMs);
+ AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+
+
+ if (outputDesc->isDuplicated()) {
+ setOutputDevice(outputDesc->mOutput1->mId, device, force, delayMs);
+ setOutputDevice(outputDesc->mOutput2->mId, device, force, delayMs);
+ return;
+ }
+#ifdef WITH_A2DP
+ // filter devices according to output selected
+ if (output == mA2dpOutput) {
+ device &= AudioSystem::DEVICE_OUT_ALL_A2DP;
+ } else {
+ device &= ~AudioSystem::DEVICE_OUT_ALL_A2DP;
+ }
+#endif
+
+ uint32_t prevDevice = (uint32_t)outputDesc->device();
+ // Do not change the routing if:
+ // - the requestede device is 0
+ // - the requested device is the same as current device and force is not specified.
+ // Doing this check here allows the caller to call setOutputDevice() without conditions
+ if ((device == 0 || device == prevDevice) && !force) {
+ LOGV("setOutputDevice() setting same device %x or null device for output %d", device, output);
+ return;
+ }
+
+ outputDesc->mDevice = device;
+ // mute media streams if both speaker and headset are selected
+ if (output == mHardwareOutput && AudioSystem::popCount(device) == 2) {
+ setStrategyMute(STRATEGY_MEDIA, true, output);
+ // wait for the PCM output buffers to empty before proceeding with the rest of the command
+ // FIXME: increased delay due to larger buffers used for low power audio mode.
+ // remove when low power audio is controlled by policy manager.
+ usleep(outputDesc->mLatency*8*1000);
+ }
+
+ // do the routing
+ AudioParameter param = AudioParameter();
+ param.addInt(String8(AudioParameter::keyRouting), (int)device);
+ mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs);
+ // update stream volumes according to new device
+ applyStreamVolumes(output, device, delayMs);
+
+ // if changing from a combined headset + speaker route, unmute media streams
+ if (output == mHardwareOutput && AudioSystem::popCount(prevDevice) == 2) {
+ setStrategyMute(STRATEGY_MEDIA, false, output, delayMs);
+ }
+}
+
+uint32_t AudioPolicyManagerBase::getDeviceForInputSource(int inputSource)
+{
+ uint32_t device;
+
+ switch(inputSource) {
+ case AUDIO_SOURCE_DEFAULT:
+ case AUDIO_SOURCE_MIC:
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ case AUDIO_SOURCE_VOICE_COMMUNICATION:
+ if (mForceUse[AudioSystem::FOR_RECORD] == AudioSystem::FORCE_BT_SCO &&
+ mAvailableInputDevices & AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+ device = AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+ } else if (mAvailableInputDevices & AudioSystem::DEVICE_IN_WIRED_HEADSET) {
+ device = AudioSystem::DEVICE_IN_WIRED_HEADSET;
+ } else {
+ device = AudioSystem::DEVICE_IN_BUILTIN_MIC;
+ }
+ break;
+ case AUDIO_SOURCE_CAMCORDER:
+ if (hasBackMicrophone()) {
+ device = AudioSystem::DEVICE_IN_BACK_MIC;
+ } else {
+ device = AudioSystem::DEVICE_IN_BUILTIN_MIC;
+ }
+ break;
+ case AUDIO_SOURCE_VOICE_UPLINK:
+ case AUDIO_SOURCE_VOICE_DOWNLINK:
+ case AUDIO_SOURCE_VOICE_CALL:
+ device = AudioSystem::DEVICE_IN_VOICE_CALL;
+ break;
+ default:
+ LOGW("getDeviceForInputSource() invalid input source %d", inputSource);
+ device = 0;
+ break;
+ }
+ LOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
+ return device;
+}
+
+audio_io_handle_t AudioPolicyManagerBase::getActiveInput()
+{
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ if (mInputs.valueAt(i)->mRefCount > 0) {
+ return mInputs.keyAt(i);
+ }
+ }
+ return 0;
+}
+
+
+AudioPolicyManagerBase::device_category AudioPolicyManagerBase::getDeviceCategory(uint32_t device)
+{
+ if (device == 0) {
+ // this happens when forcing a route update and no track is active on an output.
+ // In this case the returned category is not important.
+ return DEVICE_CATEGORY_SPEAKER;
+ }
+
+ if (AudioSystem::popCount(device) > 1) {
+ // Multiple device selection is either:
+ // - speaker + one other device: give priority to speaker in this case.
+ // - one A2DP device + another device: happens with duplicated output. In this case
+ // retain the device on the A2DP output as the other must not correspond to an active
+ // selection if not the speaker.
+ if (device & AUDIO_DEVICE_OUT_SPEAKER)
+ return DEVICE_CATEGORY_SPEAKER;
+
+ device &= AUDIO_DEVICE_OUT_ALL_A2DP;
+ }
+
+ LOGW_IF(AudioSystem::popCount(device) != 1,
+ "getDeviceCategory() invalid device combination: %08x",
+ device);
+
+ switch(device) {
+ case AUDIO_DEVICE_OUT_EARPIECE:
+ return DEVICE_CATEGORY_EARPIECE;
+ case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+ case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+ case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
+ case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
+ case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+ case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+ return DEVICE_CATEGORY_HEADSET;
+ case AUDIO_DEVICE_OUT_SPEAKER:
+ case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
+ case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
+ default:
+ return DEVICE_CATEGORY_SPEAKER;
+ }
+}
+
+float AudioPolicyManagerBase::volIndexToAmpl(uint32_t device, const StreamDescriptor& streamDesc,
+ int indexInUi)
+{
+ device_category deviceCategory = getDeviceCategory(device);
+ const VolumeCurvePoint *curve = streamDesc.mVolumeCurve[deviceCategory];
+
+ // the volume index in the UI is relative to the min and max volume indices for this stream type
+ int nbSteps = 1 + curve[VOLMAX].mIndex -
+ curve[VOLMIN].mIndex;
+ int volIdx = (nbSteps * (indexInUi - streamDesc.mIndexMin)) /
+ (streamDesc.mIndexMax - streamDesc.mIndexMin);
+
+ // find what part of the curve this index volume belongs to, or if it's out of bounds
+ int segment = 0;
+ if (volIdx < curve[VOLMIN].mIndex) { // out of bounds
+ return 0.0f;
+ } else if (volIdx < curve[VOLKNEE1].mIndex) {
+ segment = 0;
+ } else if (volIdx < curve[VOLKNEE2].mIndex) {
+ segment = 1;
+ } else if (volIdx <= curve[VOLMAX].mIndex) {
+ segment = 2;
+ } else { // out of bounds
+ return 1.0f;
+ }
+
+ // linear interpolation in the attenuation table in dB
+ float decibels = curve[segment].mDBAttenuation +
+ ((float)(volIdx - curve[segment].mIndex)) *
+ ( (curve[segment+1].mDBAttenuation -
+ curve[segment].mDBAttenuation) /
+ ((float)(curve[segment+1].mIndex -
+ curve[segment].mIndex)) );
+
+ float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
+
+ LOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f",
+ curve[segment].mIndex, volIdx,
+ curve[segment+1].mIndex,
+ curve[segment].mDBAttenuation,
+ decibels,
+ curve[segment+1].mDBAttenuation,
+ amplification);
+
+ return amplification;
+}
+
+const AudioPolicyManagerBase::VolumeCurvePoint
+ AudioPolicyManagerBase::sDefaultVolumeCurve[AudioPolicyManagerBase::VOLCNT] = {
+ {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManagerBase::VolumeCurvePoint
+ AudioPolicyManagerBase::sDefaultMediaVolumeCurve[AudioPolicyManagerBase::VOLCNT] = {
+ {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManagerBase::VolumeCurvePoint
+ AudioPolicyManagerBase::sSpeakerMediaVolumeCurve[AudioPolicyManagerBase::VOLCNT] = {
+ {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
+};
+
+const AudioPolicyManagerBase::VolumeCurvePoint
+ AudioPolicyManagerBase::sSpeakerSonificationVolumeCurve[AudioPolicyManagerBase::VOLCNT] = {
+ {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
+};
+
+
+const AudioPolicyManagerBase::VolumeCurvePoint
+ *AudioPolicyManagerBase::sVolumeProfiles[AudioPolicyManagerBase::NUM_STRATEGIES]
+ [AudioPolicyManagerBase::DEVICE_CATEGORY_CNT] = {
+ { // STRATEGY_MEDIA
+ sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+ sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+ sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EARPIECE
+ },
+ { // STRATEGY_PHONE
+ sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+ sDefaultVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+ sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE
+ },
+ { // STRATEGY_SONIFICATION
+ sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+ sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+ sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE
+ },
+ { // STRATEGY_DTMF
+ sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+ sDefaultVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+ sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE
+ },
+ { // STRATEGY_ENFORCED_AUDIBLE
+ sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+ sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+ sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE
+ },
+};
+
+void AudioPolicyManagerBase::initializeVolumeCurves()
+{
+ for (int i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
+ for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
+ mStreams[i].mVolumeCurve[j] =
+ sVolumeProfiles[getStrategy((AudioSystem::stream_type)i)][j];
+ }
+ }
+}
+
+float AudioPolicyManagerBase::computeVolume(int stream, int index, audio_io_handle_t output, uint32_t device)
+{
+ float volume = 1.0;
+ AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+ StreamDescriptor &streamDesc = mStreams[stream];
+
+ if (device == 0) {
+ device = outputDesc->device();
+ }
+
+ // if volume is not 0 (not muted), force media volume to max on digital output
+ if (stream == AudioSystem::MUSIC &&
+ index != mStreams[stream].mIndexMin &&
+ (device == AudioSystem::DEVICE_OUT_AUX_DIGITAL ||
+ device == AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET)) {
+ return 1.0;
+ }
+
+ volume = volIndexToAmpl(device, streamDesc, index);
+
+ // if a headset is connected, apply the following rules to ring tones and notifications
+ // to avoid sound level bursts in user's ears:
+ // - always attenuate ring tones and notifications volume by 6dB
+ // - if music is playing, always limit the volume to current music volume,
+ // with a minimum threshold at -36dB so that notification is always perceived.
+ if ((device &
+ (AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ AudioSystem::DEVICE_OUT_WIRED_HEADSET |
+ AudioSystem::DEVICE_OUT_WIRED_HEADPHONE)) &&
+ ((getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) ||
+ (stream == AudioSystem::SYSTEM)) &&
+ streamDesc.mCanBeMuted) {
+ volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
+ // when the phone is ringing we must consider that music could have been paused just before
+ // by the music application and behave as if music was active if the last music track was
+ // just stopped
+ if (outputDesc->mRefCount[AudioSystem::MUSIC] || mLimitRingtoneVolume) {
+ float musicVol = computeVolume(AudioSystem::MUSIC, mStreams[AudioSystem::MUSIC].mIndexCur, output, device);
+ float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ? musicVol : SONIFICATION_HEADSET_VOLUME_MIN;
+ if (volume > minVol) {
+ volume = minVol;
+ LOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol);
+ }
+ }
+ }
+
+ return volume;
+}
+
+status_t AudioPolicyManagerBase::checkAndSetVolume(int stream, int index, audio_io_handle_t output, uint32_t device, int delayMs, bool force)
+{
+
+ // do not change actual stream volume if the stream is muted
+ if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) {
+ LOGV("checkAndSetVolume() stream %d muted count %d", stream, mOutputs.valueFor(output)->mMuteCount[stream]);
+ return NO_ERROR;
+ }
+
+ // do not change in call volume if bluetooth is connected and vice versa
+ if ((stream == AudioSystem::VOICE_CALL && mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
+ (stream == AudioSystem::BLUETOOTH_SCO && mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO)) {
+ LOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
+ stream, mForceUse[AudioSystem::FOR_COMMUNICATION]);
+ return INVALID_OPERATION;
+ }
+
+ float volume = computeVolume(stream, index, output, device);
+ // We actually change the volume if:
+ // - the float value returned by computeVolume() changed
+ // - the force flag is set
+ if (volume != mOutputs.valueFor(output)->mCurVolume[stream] ||
+ force) {
+ mOutputs.valueFor(output)->mCurVolume[stream] = volume;
+ LOGV("setStreamVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs);
+ if (stream == AudioSystem::VOICE_CALL ||
+ stream == AudioSystem::DTMF ||
+ stream == AudioSystem::BLUETOOTH_SCO) {
+ // offset value to reflect actual hardware volume that never reaches 0
+ // 1% corresponds roughly to first step in VOICE_CALL stream volume setting (see AudioService.java)
+ volume = 0.01 + 0.99 * volume;
+ // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
+ // enabled
+ if (stream == AudioSystem::BLUETOOTH_SCO) {
+ mpClientInterface->setStreamVolume(AudioSystem::VOICE_CALL, volume, output, delayMs);
+ }
+ }
+
+ mpClientInterface->setStreamVolume((AudioSystem::stream_type)stream, volume, output, delayMs);
+ }
+
+ if (stream == AudioSystem::VOICE_CALL ||
+ stream == AudioSystem::BLUETOOTH_SCO) {
+ float voiceVolume;
+ // Force voice volume to max for bluetooth SCO as volume is managed by the headset
+ if (stream == AudioSystem::VOICE_CALL) {
+ voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
+ } else {
+ voiceVolume = 1.0;
+ }
+
+ if (voiceVolume != mLastVoiceVolume && output == mHardwareOutput) {
+ mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
+ mLastVoiceVolume = voiceVolume;
+ }
+ }
+
+ return NO_ERROR;
+}
+
+void AudioPolicyManagerBase::applyStreamVolumes(audio_io_handle_t output, uint32_t device, int delayMs, bool force)
+{
+ LOGV("applyStreamVolumes() for output %d and device %x", output, device);
+
+ for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, device, delayMs, force);
+ }
+}
+
+void AudioPolicyManagerBase::setStrategyMute(routing_strategy strategy, bool on, audio_io_handle_t output, int delayMs)
+{
+ LOGV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output);
+ for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ if (getStrategy((AudioSystem::stream_type)stream) == strategy) {
+ setStreamMute(stream, on, output, delayMs);
+ }
+ }
+}
+
+void AudioPolicyManagerBase::setStreamMute(int stream, bool on, audio_io_handle_t output, int delayMs)
+{
+ StreamDescriptor &streamDesc = mStreams[stream];
+ AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
+
+ LOGV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d", stream, on, output, outputDesc->mMuteCount[stream]);
+
+ if (on) {
+ if (outputDesc->mMuteCount[stream] == 0) {
+ if (streamDesc.mCanBeMuted) {
+ checkAndSetVolume(stream, 0, output, outputDesc->device(), delayMs);
+ }
+ }
+ // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
+ outputDesc->mMuteCount[stream]++;
+ } else {
+ if (outputDesc->mMuteCount[stream] == 0) {
+ LOGW("setStreamMute() unmuting non muted stream!");
+ return;
+ }
+ if (--outputDesc->mMuteCount[stream] == 0) {
+ checkAndSetVolume(stream, streamDesc.mIndexCur, output, outputDesc->device(), delayMs);
+ }
+ }
+}
+
+void AudioPolicyManagerBase::handleIncallSonification(int stream, bool starting, bool stateChange)
+{
+ // if the stream pertains to sonification strategy and we are in call we must
+ // mute the stream if it is low visibility. If it is high visibility, we must play a tone
+ // in the device used for phone strategy and play the tone if the selected device does not
+ // interfere with the device used for phone strategy
+ // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
+ // many times as there are active tracks on the output
+
+ if (getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) {
+ AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mHardwareOutput);
+ LOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
+ stream, starting, outputDesc->mDevice, stateChange);
+ if (outputDesc->mRefCount[stream]) {
+ int muteCount = 1;
+ if (stateChange) {
+ muteCount = outputDesc->mRefCount[stream];
+ }
+ if (AudioSystem::isLowVisibility((AudioSystem::stream_type)stream)) {
+ LOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
+ for (int i = 0; i < muteCount; i++) {
+ setStreamMute(stream, starting, mHardwareOutput);
+ }
+ } else {
+ LOGV("handleIncallSonification() high visibility");
+ if (outputDesc->device() & getDeviceForStrategy(STRATEGY_PHONE)) {
+ LOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
+ for (int i = 0; i < muteCount; i++) {
+ setStreamMute(stream, starting, mHardwareOutput);
+ }
+ }
+ if (starting) {
+ mpClientInterface->startTone(ToneGenerator::TONE_SUP_CALL_WAITING, AudioSystem::VOICE_CALL);
+ } else {
+ mpClientInterface->stopTone();
+ }
+ }
+ }
+ }
+}
+
+bool AudioPolicyManagerBase::isInCall()
+{
+ return isStateInCall(mPhoneState);
+}
+
+bool AudioPolicyManagerBase::isStateInCall(int state) {
+ return ((state == AudioSystem::MODE_IN_CALL) ||
+ (state == AudioSystem::MODE_IN_COMMUNICATION));
+}
+
+bool AudioPolicyManagerBase::needsDirectOuput(AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags,
+ uint32_t device)
+{
+ return ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
+ (format !=0 && !AudioSystem::isLinearPCM(format)));
+}
+
+uint32_t AudioPolicyManagerBase::getMaxEffectsCpuLoad()
+{
+ return MAX_EFFECTS_CPU_LOAD;
+}
+
+uint32_t AudioPolicyManagerBase::getMaxEffectsMemory()
+{
+ return MAX_EFFECTS_MEMORY;
+}
+
+// --- AudioOutputDescriptor class implementation
+
+AudioPolicyManagerBase::AudioOutputDescriptor::AudioOutputDescriptor()
+ : mId(0), mSamplingRate(0), mFormat(0), mChannels(0), mLatency(0),
+ mFlags((AudioSystem::output_flags)0), mDevice(0), mOutput1(0), mOutput2(0)
+{
+ // clear usage count for all stream types
+ for (int i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
+ mRefCount[i] = 0;
+ mCurVolume[i] = -1.0;
+ mMuteCount[i] = 0;
+ mStopTime[i] = 0;
+ }
+}
+
+uint32_t AudioPolicyManagerBase::AudioOutputDescriptor::device()
+{
+ uint32_t device = 0;
+ if (isDuplicated()) {
+ device = mOutput1->mDevice | mOutput2->mDevice;
+ } else {
+ device = mDevice;
+ }
+ return device;
+}
+
+void AudioPolicyManagerBase::AudioOutputDescriptor::changeRefCount(AudioSystem::stream_type stream, int delta)
+{
+ // forward usage count change to attached outputs
+ if (isDuplicated()) {
+ mOutput1->changeRefCount(stream, delta);
+ mOutput2->changeRefCount(stream, delta);
+ }
+ if ((delta + (int)mRefCount[stream]) < 0) {
+ LOGW("changeRefCount() invalid delta %d for stream %d, refCount %d", delta, stream, mRefCount[stream]);
+ mRefCount[stream] = 0;
+ return;
+ }
+ mRefCount[stream] += delta;
+ LOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]);
+}
+
+uint32_t AudioPolicyManagerBase::AudioOutputDescriptor::refCount()
+{
+ uint32_t refcount = 0;
+ for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
+ refcount += mRefCount[i];
+ }
+ return refcount;
+}
+
+uint32_t AudioPolicyManagerBase::AudioOutputDescriptor::strategyRefCount(routing_strategy strategy)
+{
+ uint32_t refCount = 0;
+ for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
+ if (getStrategy((AudioSystem::stream_type)i) == strategy) {
+ refCount += mRefCount[i];
+ }
+ }
+ return refCount;
+}
+
+status_t AudioPolicyManagerBase::AudioOutputDescriptor::dump(int fd)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Format: %d\n", mFormat);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Channels: %08x\n", mChannels);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Latency: %d\n", mLatency);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Flags %08x\n", mFlags);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Devices %08x\n", device());
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Stream volume refCount muteCount\n");
+ result.append(buffer);
+ for (int i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
+ snprintf(buffer, SIZE, " %02d %.03f %02d %02d\n", i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
+ result.append(buffer);
+ }
+ write(fd, result.string(), result.size());
+
+ return NO_ERROR;
+}
+
+// --- AudioInputDescriptor class implementation
+
+AudioPolicyManagerBase::AudioInputDescriptor::AudioInputDescriptor()
+ : mSamplingRate(0), mFormat(0), mChannels(0),
+ mAcoustics((AudioSystem::audio_in_acoustics)0), mDevice(0), mRefCount(0),
+ mInputSource(0)
+{
+}
+
+status_t AudioPolicyManagerBase::AudioInputDescriptor::dump(int fd)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Format: %d\n", mFormat);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Channels: %08x\n", mChannels);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Acoustics %08x\n", mAcoustics);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ return NO_ERROR;
+}
+
+// --- StreamDescriptor class implementation
+
+void AudioPolicyManagerBase::StreamDescriptor::dump(char* buffer, size_t size)
+{
+ snprintf(buffer, size, " %02d %02d %02d %d\n",
+ mIndexMin,
+ mIndexMax,
+ mIndexCur,
+ mCanBeMuted);
+}
+
+// --- EffectDescriptor class implementation
+
+status_t AudioPolicyManagerBase::EffectDescriptor::dump(int fd)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, " I/O: %d\n", mIo);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Session: %d\n", mSession);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Name: %s\n", mDesc.name);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " %s\n", mEnabled ? "Enabled" : "Disabled");
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ return NO_ERROR;
+}
+
+
+
+}; // namespace android
diff --git a/audio/AudioPolicyManagerBase.h b/audio/AudioPolicyManagerBase.h
new file mode 100755
index 0000000..3ab748d
--- /dev/null
+++ b/audio/AudioPolicyManagerBase.h
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <utils/Timers.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <hardware_legacy/AudioPolicyInterface.h>
+
+
+namespace android_audio_legacy {
+ using android::KeyedVector;
+
+// ----------------------------------------------------------------------------
+
+#define MAX_DEVICE_ADDRESS_LEN 20
+// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
+#define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
+// Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB
+#define SONIFICATION_HEADSET_VOLUME_MIN 0.016
+// Time in milliseconds during which we consider that music is still active after a music
+// track was stopped - see computeVolume()
+#define SONIFICATION_HEADSET_MUSIC_DELAY 5000
+// Time in milliseconds during witch some streams are muted while the audio path
+// is switched
+#define MUTE_TIME_MS 2000
+
+#define NUM_TEST_OUTPUTS 5
+
+#define NUM_VOL_CURVE_KNEES 2
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManagerBase implements audio policy manager behavior common to all platforms.
+// Each platform must implement an AudioPolicyManager class derived from AudioPolicyManagerBase
+// and override methods for which the platform specific behavior differs from the implementation
+// in AudioPolicyManagerBase. Even if no specific behavior is required, the AudioPolicyManager
+// class must be implemented as well as the class factory function createAudioPolicyManager()
+// and provided in a shared library libaudiopolicy.so.
+// ----------------------------------------------------------------------------
+
+class AudioPolicyManagerBase: public AudioPolicyInterface
+#ifdef AUDIO_POLICY_TEST
+ , public Thread
+#endif //AUDIO_POLICY_TEST
+{
+
+public:
+ AudioPolicyManagerBase(AudioPolicyClientInterface *clientInterface);
+ virtual ~AudioPolicyManagerBase();
+
+ // AudioPolicyInterface
+ virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address);
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address);
+ virtual void setPhoneState(int state);
+ virtual void setRingerMode(uint32_t mode, uint32_t mask);
+ virtual void setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config);
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage);
+ virtual void setSystemProperty(const char* property, const char* value);
+ virtual status_t initCheck();
+ virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate = 0,
+ uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::output_flags flags =
+ AudioSystem::OUTPUT_FLAG_INDIRECT);
+ virtual status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
+ virtual status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
+ virtual void releaseOutput(audio_io_handle_t output);
+ virtual audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics);
+ // indicates to the audio policy manager that the input starts being used.
+ virtual status_t startInput(audio_io_handle_t input);
+ // indicates to the audio policy manager that the input stops being used.
+ virtual status_t stopInput(audio_io_handle_t input);
+ virtual void releaseInput(audio_io_handle_t input);
+ virtual void initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax);
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index);
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index);
+
+ // return the strategy corresponding to a given stream type
+ virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream);
+
+ // return the enabled output devices for the given stream type
+ virtual uint32_t getDevicesForStream(AudioSystem::stream_type stream);
+
+ virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
+ virtual status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t io,
+ uint32_t strategy,
+ int session,
+ int id);
+ virtual status_t unregisterEffect(int id);
+ virtual status_t setEffectEnabled(int id, bool enabled);
+
+ virtual bool isStreamActive(int stream, uint32_t inPastMs = 0) const;
+
+ virtual status_t dump(int fd);
+
+protected:
+
+ enum routing_strategy {
+ STRATEGY_MEDIA,
+ STRATEGY_PHONE,
+ STRATEGY_SONIFICATION,
+ STRATEGY_DTMF,
+ STRATEGY_ENFORCED_AUDIBLE,
+ NUM_STRATEGIES
+ };
+
+ // 4 points to define the volume attenuation curve, each characterized by the volume
+ // index (from 0 to 100) at which they apply, and the attenuation in dB at that index.
+ // we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl()
+
+ enum { VOLMIN = 0, VOLKNEE1 = 1, VOLKNEE2 = 2, VOLMAX = 3, VOLCNT = 4};
+
+ class VolumeCurvePoint
+ {
+ public:
+ int mIndex;
+ float mDBAttenuation;
+ };
+
+ // device categories used for volume curve management.
+ enum device_category {
+ DEVICE_CATEGORY_HEADSET,
+ DEVICE_CATEGORY_SPEAKER,
+ DEVICE_CATEGORY_EARPIECE,
+ DEVICE_CATEGORY_CNT
+ };
+
+ // default volume curve
+ static const VolumeCurvePoint sDefaultVolumeCurve[AudioPolicyManagerBase::VOLCNT];
+ // default volume curve for media strategy
+ static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManagerBase::VOLCNT];
+ // volume curve for media strategy on speakers
+ static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManagerBase::VOLCNT];
+ // volume curve for sonification strategy on speakers
+ static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[AudioPolicyManagerBase::VOLCNT];
+ // default volume curves per strategy and device category. See initializeVolumeCurves()
+ static const VolumeCurvePoint *sVolumeProfiles[NUM_STRATEGIES][DEVICE_CATEGORY_CNT];
+
+ // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
+ // and keep track of the usage of this output by each audio stream type.
+ class AudioOutputDescriptor
+ {
+ public:
+ AudioOutputDescriptor();
+
+ status_t dump(int fd);
+
+ uint32_t device();
+ void changeRefCount(AudioSystem::stream_type, int delta);
+ uint32_t refCount();
+ uint32_t strategyRefCount(routing_strategy strategy);
+ bool isUsedByStrategy(routing_strategy strategy) { return (strategyRefCount(strategy) != 0);}
+ bool isDuplicated() { return (mOutput1 != NULL && mOutput2 != NULL); }
+
+ audio_io_handle_t mId; // output handle
+ uint32_t mSamplingRate; //
+ uint32_t mFormat; //
+ uint32_t mChannels; // output configuration
+ uint32_t mLatency; //
+ AudioSystem::output_flags mFlags; //
+ uint32_t mDevice; // current device this output is routed to
+ uint32_t mRefCount[AudioSystem::NUM_STREAM_TYPES]; // number of streams of each type using this output
+ nsecs_t mStopTime[AudioSystem::NUM_STREAM_TYPES];
+ AudioOutputDescriptor *mOutput1; // used by duplicated outputs: first output
+ AudioOutputDescriptor *mOutput2; // used by duplicated outputs: second output
+ float mCurVolume[AudioSystem::NUM_STREAM_TYPES]; // current stream volume
+ int mMuteCount[AudioSystem::NUM_STREAM_TYPES]; // mute request counter
+ };
+
+ // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
+ // and keep track of the usage of this input.
+ class AudioInputDescriptor
+ {
+ public:
+ AudioInputDescriptor();
+
+ status_t dump(int fd);
+
+ uint32_t mSamplingRate; //
+ uint32_t mFormat; // input configuration
+ uint32_t mChannels; //
+ AudioSystem::audio_in_acoustics mAcoustics; //
+ uint32_t mDevice; // current device this input is routed to
+ uint32_t mRefCount; // number of AudioRecord clients using this output
+ int mInputSource; // input source selected by application (mediarecorder.h)
+ };
+
+ // stream descriptor used for volume control
+ class StreamDescriptor
+ {
+ public:
+ StreamDescriptor()
+ : mIndexMin(0), mIndexMax(1), mIndexCur(1), mCanBeMuted(true) {}
+
+ void dump(char* buffer, size_t size);
+
+ int mIndexMin; // min volume index
+ int mIndexMax; // max volume index
+ int mIndexCur; // current volume index
+ bool mCanBeMuted; // true is the stream can be muted
+
+ const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT];
+ };
+
+ // stream descriptor used for volume control
+ class EffectDescriptor
+ {
+ public:
+
+ status_t dump(int fd);
+
+ int mIo; // io the effect is attached to
+ routing_strategy mStrategy; // routing strategy the effect is associated to
+ int mSession; // audio session the effect is on
+ effect_descriptor_t mDesc; // effect descriptor
+ bool mEnabled; // enabled state: CPU load being used or not
+ };
+
+ void addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc);
+
+ // return the strategy corresponding to a given stream type
+ static routing_strategy getStrategy(AudioSystem::stream_type stream);
+ // return appropriate device for streams handled by the specified strategy according to current
+ // phone state, connected devices...
+ // if fromCache is true, the device is returned from mDeviceForStrategy[], otherwise it is determined
+ // by current state (device connected, phone state, force use, a2dp output...)
+ // This allows to:
+ // 1 speed up process when the state is stable (when starting or stopping an output)
+ // 2 access to either current device selection (fromCache == true) or
+ // "future" device selection (fromCache == false) when called from a context
+ // where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+ // before updateDeviceForStrategy() is called.
+ virtual uint32_t getDeviceForStrategy(routing_strategy strategy, bool fromCache = true);
+ // change the route of the specified output
+ void setOutputDevice(audio_io_handle_t output, uint32_t device, bool force = false, int delayMs = 0);
+ // select input device corresponding to requested audio source
+ virtual uint32_t getDeviceForInputSource(int inputSource);
+ // return io handle of active input or 0 if no input is active
+ audio_io_handle_t getActiveInput();
+ // initialize volume curves for each strategy and device category
+ void initializeVolumeCurves();
+ // compute the actual volume for a given stream according to the requested index and a particular
+ // device
+ virtual float computeVolume(int stream, int index, audio_io_handle_t output, uint32_t device);
+ // check that volume change is permitted, compute and send new volume to audio hardware
+ status_t checkAndSetVolume(int stream, int index, audio_io_handle_t output, uint32_t device, int delayMs = 0, bool force = false);
+ // apply all stream volumes to the specified output and device
+ void applyStreamVolumes(audio_io_handle_t output, uint32_t device, int delayMs = 0, bool force = false);
+ // Mute or unmute all streams handled by the specified strategy on the specified output
+ void setStrategyMute(routing_strategy strategy, bool on, audio_io_handle_t output, int delayMs = 0);
+ // Mute or unmute the stream on the specified output
+ void setStreamMute(int stream, bool on, audio_io_handle_t output, int delayMs = 0);
+ // handle special cases for sonification strategy while in call: mute streams or replace by
+ // a special tone in the device used for communication
+ void handleIncallSonification(int stream, bool starting, bool stateChange);
+ // true is current platform implements a back microphone
+ virtual bool hasBackMicrophone() const { return false; }
+ // true if device is in a telephony or VoIP call
+ virtual bool isInCall();
+ // true if given state represents a device in a telephony or VoIP call
+ virtual bool isStateInCall(int state);
+
+#ifdef WITH_A2DP
+ // true is current platform supports suplication of notifications and ringtones over A2DP output
+ virtual bool a2dpUsedForSonification() const { return true; }
+ status_t handleA2dpConnection(AudioSystem::audio_devices device,
+ const char *device_address);
+ status_t handleA2dpDisconnection(AudioSystem::audio_devices device,
+ const char *device_address);
+ void closeA2dpOutputs();
+ // checks and if necessary changes output (a2dp, duplicated or hardware) used for all strategies.
+ // must be called every time a condition that affects the output choice for a given strategy is
+ // changed: connected device, phone state, force use...
+ // Must be called before updateDeviceForStrategy()
+ void checkOutputForStrategy(routing_strategy strategy);
+ // Same as checkOutputForStrategy() but for a all strategies in order of priority
+ void checkOutputForAllStrategies();
+ // manages A2DP output suspend/restore according to phone state and BT SCO usage
+ void checkA2dpSuspend();
+#endif
+ // selects the most appropriate device on output for current state
+ // must be called every time a condition that affects the device choice for a given output is
+ // changed: connected device, phone state, force use, output start, output stop..
+ // see getDeviceForStrategy() for the use of fromCache parameter
+ uint32_t getNewDevice(audio_io_handle_t output, bool fromCache = true);
+ // updates cache of device used by all strategies (mDeviceForStrategy[])
+ // must be called every time a condition that affects the device choice for a given strategy is
+ // changed: connected device, phone state, force use...
+ // cached values are used by getDeviceForStrategy() if parameter fromCache is true.
+ // Must be called after checkOutputForAllStrategies()
+ void updateDeviceForStrategy();
+ // true if current platform requires a specific output to be opened for this particular
+ // set of parameters. This function is called by getOutput() and is implemented by platform
+ // specific audio policy manager.
+ virtual bool needsDirectOuput(AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags,
+ uint32_t device);
+ virtual uint32_t getMaxEffectsCpuLoad();
+ virtual uint32_t getMaxEffectsMemory();
+#ifdef AUDIO_POLICY_TEST
+ virtual bool threadLoop();
+ void exit();
+ int testOutputIndex(audio_io_handle_t output);
+#endif //AUDIO_POLICY_TEST
+
+ status_t setEffectEnabled(EffectDescriptor *pDesc, bool enabled);
+
+ // returns the category the device belongs to with regard to volume curve management
+ static device_category getDeviceCategory(uint32_t device);
+
+ AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
+ audio_io_handle_t mHardwareOutput; // hardware output handler
+ audio_io_handle_t mA2dpOutput; // A2DP output handler
+ audio_io_handle_t mDuplicatedOutput; // duplicated output handler: outputs to hardware and A2DP.
+
+ KeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mOutputs; // list of output descriptors
+ KeyedVector<audio_io_handle_t, AudioInputDescriptor *> mInputs; // list of input descriptors
+ uint32_t mAvailableOutputDevices; // bit field of all available output devices
+ uint32_t mAvailableInputDevices; // bit field of all available input devices
+ int mPhoneState; // current phone state
+ uint32_t mRingerMode; // current ringer mode
+ AudioSystem::forced_config mForceUse[AudioSystem::NUM_FORCE_USE]; // current forced use configuration
+
+ StreamDescriptor mStreams[AudioSystem::NUM_STREAM_TYPES]; // stream descriptors for volume control
+ String8 mA2dpDeviceAddress; // A2DP device MAC address
+ String8 mScoDeviceAddress; // SCO device MAC address
+ bool mLimitRingtoneVolume; // limit ringtone volume to music volume if headset connected
+ uint32_t mDeviceForStrategy[NUM_STRATEGIES];
+ float mLastVoiceVolume; // last voice volume value sent to audio HAL
+
+ // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
+ static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
+ // Maximum memory allocated to audio effects in KB
+ static const uint32_t MAX_EFFECTS_MEMORY = 512;
+ uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
+ uint32_t mTotalEffectsMemory; // current memory used by effects
+ KeyedVector<int, EffectDescriptor *> mEffects; // list of registered audio effects
+ bool mA2dpSuspended; // true if A2DP output is suspended
+
+#ifdef AUDIO_POLICY_TEST
+ Mutex mLock;
+ Condition mWaitWorkCV;
+
+ int mCurOutput;
+ bool mDirectOutput;
+ audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS];
+ int mTestInput;
+ uint32_t mTestDevice;
+ uint32_t mTestSamplingRate;
+ uint32_t mTestFormat;
+ uint32_t mTestChannels;
+ uint32_t mTestLatencyMs;
+#endif //AUDIO_POLICY_TEST
+
+private:
+ static float volIndexToAmpl(uint32_t device, const StreamDescriptor& streamDesc,
+ int indexInUi);
+};
+
+};
diff --git a/audio/AudioPolicyManagerDefault.cpp b/audio/AudioPolicyManagerDefault.cpp
new file mode 100755
index 0000000..9083638
--- /dev/null
+++ b/audio/AudioPolicyManagerDefault.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyManagerDefault"
+//#define LOG_NDEBUG 0
+
+#include "AudioPolicyManagerDefault.h"
+
+namespace android_audio_legacy {
+
+extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
+{
+ return new AudioPolicyManagerDefault(clientInterface);
+}
+
+extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
+{
+ delete interface;
+}
+
+}; // namespace android
diff --git a/audio/AudioPolicyManagerDefault.h b/audio/AudioPolicyManagerDefault.h
new file mode 100755
index 0000000..b2b2576
--- /dev/null
+++ b/audio/AudioPolicyManagerDefault.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <hardware_legacy/AudioPolicyManagerBase.h>
+
+namespace android_audio_legacy {
+
+class AudioPolicyManagerDefault: public AudioPolicyManagerBase
+{
+
+public:
+ AudioPolicyManagerDefault(AudioPolicyClientInterface *clientInterface)
+ : AudioPolicyManagerBase(clientInterface) {}
+
+ virtual ~AudioPolicyManagerDefault() {}
+
+protected:
+ // true is current platform implements a back microphone
+ virtual bool hasBackMicrophone() const { return false; }
+#ifdef WITH_A2DP
+ // true is current platform supports suplication of notifications and ringtones over A2DP output
+ virtual bool a2dpUsedForSonification() const { return true; }
+#endif
+
+};
+};
diff --git a/audio/AudioSystem.h b/audio/AudioSystem.h
new file mode 100755
index 0000000..bd75a73
--- /dev/null
+++ b/audio/AudioSystem.h
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOSYSTEM_H_
+#define ANDROID_AUDIOSYSTEM_H_
+#define ANDROID_AUDIOPARAMETER_H_
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <media/IAudioFlinger.h>
+
+namespace android {
+
+typedef void (*audio_error_callback)(status_t err);
+typedef int audio_io_handle_t;
+
+class IAudioPolicyService;
+class String8;
+
+class AudioSystem
+{
+public:
+
+ enum stream_type {
+ DEFAULT =-1,
+ VOICE_CALL = 0,
+ SYSTEM = 1,
+ RING = 2,
+ MUSIC = 3,
+ ALARM = 4,
+ NOTIFICATION = 5,
+ BLUETOOTH_SCO = 6,
+ ENFORCED_AUDIBLE = 7, // Sounds that cannot be muted by user and must be routed to speaker
+ DTMF = 8,
+ TTS = 9,
+#ifdef HAVE_FM_RADIO
+ FM = 10,
+#endif
+ NUM_STREAM_TYPES
+ };
+
+ // Audio sub formats (see AudioSystem::audio_format).
+ enum pcm_sub_format {
+ PCM_SUB_16_BIT = 0x1, // must be 1 for backward compatibility
+ PCM_SUB_8_BIT = 0x2, // must be 2 for backward compatibility
+ };
+
+ // MP3 sub format field definition : can use 11 LSBs in the same way as MP3 frame header to specify
+ // bit rate, stereo mode, version...
+ enum mp3_sub_format {
+ //TODO
+ };
+
+ // AMR NB/WB sub format field definition: specify frame block interleaving, bandwidth efficient or octet aligned,
+ // encoding mode for recording...
+ enum amr_sub_format {
+ //TODO
+ };
+
+ // AAC sub format field definition: specify profile or bitrate for recording...
+ enum aac_sub_format {
+ //TODO
+ };
+
+ // VORBIS sub format field definition: specify quality for recording...
+ enum vorbis_sub_format {
+ //TODO
+ };
+
+ // Audio format consists in a main format field (upper 8 bits) and a sub format field (lower 24 bits).
+ // The main format indicates the main codec type. The sub format field indicates options and parameters
+ // for each format. The sub format is mainly used for record to indicate for instance the requested bitrate
+ // or profile. It can also be used for certain formats to give informations not present in the encoded
+ // audio stream (e.g. octet alignement for AMR).
+ enum audio_format {
+ INVALID_FORMAT = -1,
+ FORMAT_DEFAULT = 0,
+ PCM = 0x00000000, // must be 0 for backward compatibility
+ MP3 = 0x01000000,
+ AMR_NB = 0x02000000,
+ AMR_WB = 0x03000000,
+ AAC = 0x04000000,
+ HE_AAC_V1 = 0x05000000,
+ HE_AAC_V2 = 0x06000000,
+ VORBIS = 0x07000000,
+ MAIN_FORMAT_MASK = 0xFF000000,
+ SUB_FORMAT_MASK = 0x00FFFFFF,
+ // Aliases
+ PCM_16_BIT = (PCM|PCM_SUB_16_BIT),
+ PCM_8_BIT = (PCM|PCM_SUB_8_BIT)
+ };
+
+
+ // Channel mask definitions must be kept in sync with JAVA values in /media/java/android/media/AudioFormat.java
+ enum audio_channels {
+ // output channels
+ CHANNEL_OUT_FRONT_LEFT = 0x4,
+ CHANNEL_OUT_FRONT_RIGHT = 0x8,
+ CHANNEL_OUT_FRONT_CENTER = 0x10,
+ CHANNEL_OUT_LOW_FREQUENCY = 0x20,
+ CHANNEL_OUT_BACK_LEFT = 0x40,
+ CHANNEL_OUT_BACK_RIGHT = 0x80,
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x100,
+ CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x200,
+ CHANNEL_OUT_BACK_CENTER = 0x400,
+ CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT,
+ CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT),
+ CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
+ CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER),
+ CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
+ CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER),
+ CHANNEL_OUT_ALL = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER | CHANNEL_OUT_BACK_CENTER),
+
+ // input channels
+ CHANNEL_IN_LEFT = 0x4,
+ CHANNEL_IN_RIGHT = 0x8,
+ CHANNEL_IN_FRONT = 0x10,
+ CHANNEL_IN_BACK = 0x20,
+ CHANNEL_IN_LEFT_PROCESSED = 0x40,
+ CHANNEL_IN_RIGHT_PROCESSED = 0x80,
+ CHANNEL_IN_FRONT_PROCESSED = 0x100,
+ CHANNEL_IN_BACK_PROCESSED = 0x200,
+ CHANNEL_IN_PRESSURE = 0x400,
+ CHANNEL_IN_X_AXIS = 0x800,
+ CHANNEL_IN_Y_AXIS = 0x1000,
+ CHANNEL_IN_Z_AXIS = 0x2000,
+ CHANNEL_IN_VOICE_UPLINK = 0x4000,
+ CHANNEL_IN_VOICE_DNLINK = 0x8000,
+#ifdef OMAP_ENHANCEMENT
+ CHANNEL_IN_VOICE_UPLINK_DNLINK = 0x10000,
+#endif
+ CHANNEL_IN_MONO = CHANNEL_IN_FRONT,
+ CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT),
+ CHANNEL_IN_ALL = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT | CHANNEL_IN_FRONT | CHANNEL_IN_BACK|
+ CHANNEL_IN_LEFT_PROCESSED | CHANNEL_IN_RIGHT_PROCESSED | CHANNEL_IN_FRONT_PROCESSED | CHANNEL_IN_BACK_PROCESSED|
+ CHANNEL_IN_PRESSURE | CHANNEL_IN_X_AXIS | CHANNEL_IN_Y_AXIS | CHANNEL_IN_Z_AXIS |
+#ifdef OMAP_ENHANCEMENT
+ CHANNEL_IN_VOICE_UPLINK | CHANNEL_IN_VOICE_DNLINK | CHANNEL_IN_VOICE_UPLINK_DNLINK)
+#else
+ CHANNEL_IN_VOICE_UPLINK | CHANNEL_IN_VOICE_DNLINK )
+#endif
+ };
+
+ enum audio_mode {
+ MODE_INVALID = -2,
+ MODE_CURRENT = -1,
+ MODE_NORMAL = 0,
+ MODE_RINGTONE,
+ MODE_IN_CALL,
+ MODE_IN_COMMUNICATION,
+ NUM_MODES // not a valid entry, denotes end-of-list
+ };
+
+ enum audio_in_acoustics {
+ AGC_ENABLE = 0x0001,
+ AGC_DISABLE = 0,
+ NS_ENABLE = 0x0002,
+ NS_DISABLE = 0,
+ TX_IIR_ENABLE = 0x0004,
+ TX_DISABLE = 0
+ };
+
+ // special audio session values
+ enum audio_sessions {
+ SESSION_OUTPUT_STAGE = -1, // session for effects attached to a particular output stream
+ // (value must be less than 0)
+ SESSION_OUTPUT_MIX = 0, // session for effects applied to output mix. These effects can
+ // be moved by audio policy manager to another output stream
+ // (value must be 0)
+ };
+
+ /* These are static methods to control the system-wide AudioFlinger
+ * only privileged processes can have access to them
+ */
+
+ // mute/unmute microphone
+ static status_t muteMicrophone(bool state);
+ static status_t isMicrophoneMuted(bool *state);
+
+ // set/get master volume
+ static status_t setMasterVolume(float value);
+ static status_t getMasterVolume(float* volume);
+ // mute/unmute audio outputs
+ static status_t setMasterMute(bool mute);
+ static status_t getMasterMute(bool* mute);
+
+ // set/get stream volume on specified output
+ static status_t setStreamVolume(int stream, float value, int output);
+ static status_t getStreamVolume(int stream, float* volume, int output);
+
+ // mute/unmute stream
+ static status_t setStreamMute(int stream, bool mute);
+ static status_t getStreamMute(int stream, bool* mute);
+
+ // set audio mode in audio hardware (see AudioSystem::audio_mode)
+ static status_t setMode(int mode);
+
+ // returns true in *state if tracks are active on the specified stream
+ static status_t isStreamActive(int stream, bool *state);
+
+ // set/get audio hardware parameters. The function accepts a list of parameters
+ // key value pairs in the form: key1=value1;key2=value2;...
+ // Some keys are reserved for standard parameters (See AudioParameter class).
+ static status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
+ static String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+
+ static void setErrorCallback(audio_error_callback cb);
+
+ // helper function to obtain AudioFlinger service handle
+ static const sp<IAudioFlinger>& get_audio_flinger();
+
+ static float linearToLog(int volume);
+ static int logToLinear(float volume);
+
+ static status_t getOutputSamplingRate(int* samplingRate, int stream = DEFAULT);
+ static status_t getOutputFrameCount(int* frameCount, int stream = DEFAULT);
+ static status_t getOutputLatency(uint32_t* latency, int stream = DEFAULT);
+
+ static bool routedToA2dpOutput(int streamType);
+
+ static status_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
+ size_t* buffSize);
+
+ static status_t setVoiceVolume(float volume);
+#ifdef HAVE_FM_RADIO
+ static status_t setFmVolume(float volume);
+#endif
+
+ // return the number of audio frames written by AudioFlinger to audio HAL and
+ // audio dsp to DAC since the output on which the specificed stream is playing
+ // has exited standby.
+ // returned status (from utils/Errors.h) can be:
+ // - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
+ // - INVALID_OPERATION: Not supported on current hardware platform
+ // - BAD_VALUE: invalid parameter
+ // NOTE: this feature is not supported on all hardware platforms and it is
+ // necessary to check returned status before using the returned values.
+ static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream = DEFAULT);
+
+ static unsigned int getInputFramesLost(audio_io_handle_t ioHandle);
+
+ static int newAudioSessionId();
+ //
+ // AudioPolicyService interface
+ //
+
+ enum audio_devices {
+ // output devices
+ DEVICE_OUT_EARPIECE = 0x1,
+ DEVICE_OUT_SPEAKER = 0x2,
+ DEVICE_OUT_WIRED_HEADSET = 0x4,
+ DEVICE_OUT_WIRED_HEADPHONE = 0x8,
+ DEVICE_OUT_BLUETOOTH_SCO = 0x10,
+ DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20,
+ DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40,
+ DEVICE_OUT_BLUETOOTH_A2DP = 0x80,
+ DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
+ DEVICE_OUT_AUX_DIGITAL = 0x400,
+#ifdef HAVE_FM_RADIO
+ DEVICE_OUT_FM = 0x800,
+ DEVICE_OUT_FM_SPEAKER = 0x1000,
+ DEVICE_OUT_FM_ALL = (DEVICE_OUT_FM | DEVICE_OUT_FM_SPEAKER),
+#elif defined(OMAP_ENHANCEMENT)
+ DEVICE_OUT_FM_TRANSMIT = 0x800,
+ DEVICE_OUT_LOW_POWER = 0x1000,
+#endif
+ DEVICE_OUT_HDMI = 0x2000,
+ DEVICE_OUT_DEFAULT = 0x8000,
+ DEVICE_OUT_ALL = (DEVICE_OUT_EARPIECE | DEVICE_OUT_SPEAKER | DEVICE_OUT_WIRED_HEADSET |
+#ifdef HAVE_FM_RADIO
+ DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_FM | DEVICE_OUT_FM_SPEAKER | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+#else
+ DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+#endif
+ DEVICE_OUT_BLUETOOTH_SCO_CARKIT | DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+#if defined(OMAP_ENHANCEMENT) && !defined(HAVE_FM_RADIO)
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL | DEVICE_OUT_LOW_POWER |
+ DEVICE_OUT_FM_TRANSMIT | DEVICE_OUT_DEFAULT),
+#else
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL | DEVICE_OUT_HDMI | DEVICE_OUT_DEFAULT),
+#endif
+ DEVICE_OUT_ALL_A2DP = (DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+
+ // input devices
+ DEVICE_IN_COMMUNICATION = 0x10000,
+ DEVICE_IN_AMBIENT = 0x20000,
+ DEVICE_IN_BUILTIN_MIC = 0x40000,
+ DEVICE_IN_BLUETOOTH_SCO_HEADSET = 0x80000,
+ DEVICE_IN_WIRED_HEADSET = 0x100000,
+ DEVICE_IN_AUX_DIGITAL = 0x200000,
+ DEVICE_IN_VOICE_CALL = 0x400000,
+ DEVICE_IN_BACK_MIC = 0x800000,
+#ifdef HAVE_FM_RADIO
+ DEVICE_IN_FM_RX = 0x1000000,
+ DEVICE_IN_FM_RX_A2DP = 0x2000000,
+#endif
+#ifdef OMAP_ENHANCEMENT
+ DEVICE_IN_FM_ANALOG = 0x1000000,
+#endif
+ DEVICE_IN_DEFAULT = 0x80000000,
+
+ DEVICE_IN_ALL = (DEVICE_IN_COMMUNICATION | DEVICE_IN_AMBIENT | DEVICE_IN_BUILTIN_MIC |
+ DEVICE_IN_BLUETOOTH_SCO_HEADSET | DEVICE_IN_WIRED_HEADSET | DEVICE_IN_AUX_DIGITAL |
+#ifdef HAVE_FM_RADIO
+ DEVICE_IN_VOICE_CALL | DEVICE_IN_BACK_MIC | DEVICE_IN_FM_RX | DEVICE_IN_FM_RX_A2DP | DEVICE_IN_DEFAULT)
+#elif OMAP_ENHANCEMENT
+ DEVICE_IN_VOICE_CALL | DEVICE_IN_BACK_MIC | DEVICE_IN_FM_ANALOG | DEVICE_IN_DEFAULT)
+#else
+ DEVICE_IN_VOICE_CALL | DEVICE_IN_BACK_MIC | DEVICE_IN_DEFAULT)
+#endif
+
+ };
+
+ // device connection states used for setDeviceConnectionState()
+ enum device_connection_state {
+ DEVICE_STATE_UNAVAILABLE,
+ DEVICE_STATE_AVAILABLE,
+ NUM_DEVICE_STATES
+ };
+
+ // request to open a direct output with getOutput() (by opposition to sharing an output with other AudioTracks)
+ enum output_flags {
+ OUTPUT_FLAG_INDIRECT = 0x0,
+ OUTPUT_FLAG_DIRECT = 0x1
+ };
+
+ // device categories used for setForceUse()
+ enum forced_config {
+ FORCE_NONE,
+ FORCE_SPEAKER,
+ FORCE_HEADPHONES,
+ FORCE_BT_SCO,
+ FORCE_BT_A2DP,
+ FORCE_WIRED_ACCESSORY,
+ FORCE_BT_CAR_DOCK,
+ FORCE_BT_DESK_DOCK,
+ NUM_FORCE_CONFIG,
+ FORCE_DEFAULT = FORCE_NONE
+ };
+
+ // usages used for setForceUse()
+ enum force_use {
+ FOR_COMMUNICATION,
+ FOR_MEDIA,
+ FOR_RECORD,
+ FOR_DOCK,
+ NUM_FORCE_USE
+ };
+
+ // types of io configuration change events received with ioConfigChanged()
+ enum io_config_event {
+ OUTPUT_OPENED,
+ OUTPUT_CLOSED,
+ OUTPUT_CONFIG_CHANGED,
+ INPUT_OPENED,
+ INPUT_CLOSED,
+ INPUT_CONFIG_CHANGED,
+ STREAM_CONFIG_CHANGED,
+ NUM_CONFIG_EVENTS
+ };
+
+ // audio output descritor used to cache output configurations in client process to avoid frequent calls
+ // through IAudioFlinger
+ class OutputDescriptor {
+ public:
+ OutputDescriptor()
+ : samplingRate(0), format(0), channels(0), frameCount(0), latency(0) {}
+
+ uint32_t samplingRate;
+ int32_t format;
+ int32_t channels;
+ size_t frameCount;
+ uint32_t latency;
+ };
+
+ //
+ // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
+ //
+ static status_t setDeviceConnectionState(audio_devices device, device_connection_state state, const char *device_address);
+ static device_connection_state getDeviceConnectionState(audio_devices device, const char *device_address);
+ static status_t setPhoneState(int state);
+ static status_t setRingerMode(uint32_t mode, uint32_t mask);
+ static status_t setForceUse(force_use usage, forced_config config);
+ static forced_config getForceUse(force_use usage);
+ static audio_io_handle_t getOutput(stream_type stream,
+ uint32_t samplingRate = 0,
+ uint32_t format = FORMAT_DEFAULT,
+ uint32_t channels = CHANNEL_OUT_STEREO,
+ output_flags flags = OUTPUT_FLAG_INDIRECT);
+ static status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
+ static status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
+ static void releaseOutput(audio_io_handle_t output);
+ static audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate = 0,
+ uint32_t format = FORMAT_DEFAULT,
+ uint32_t channels = CHANNEL_IN_MONO,
+ audio_in_acoustics acoustics = (audio_in_acoustics)0);
+ static status_t startInput(audio_io_handle_t input);
+ static status_t stopInput(audio_io_handle_t input);
+ static void releaseInput(audio_io_handle_t input);
+ static status_t initStreamVolume(stream_type stream,
+ int indexMin,
+ int indexMax);
+ static status_t setStreamVolumeIndex(stream_type stream, int index);
+ static status_t getStreamVolumeIndex(stream_type stream, int *index);
+
+ static uint32_t getStrategyForStream(stream_type stream);
+
+ static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
+ static status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id);
+ static status_t unregisterEffect(int id);
+
+ static const sp<IAudioPolicyService>& get_audio_policy_service();
+
+ // ----------------------------------------------------------------------------
+
+ static uint32_t popCount(uint32_t u);
+ static bool isOutputDevice(audio_devices device);
+ static bool isInputDevice(audio_devices device);
+ static bool isA2dpDevice(audio_devices device);
+#ifdef HAVE_FM_RADIO
+ static bool isFmDevice(audio_devices device);
+#endif
+ static bool isBluetoothScoDevice(audio_devices device);
+ static bool isLowVisibility(stream_type stream);
+ static bool isOutputChannel(uint32_t channel);
+ static bool isInputChannel(uint32_t channel);
+ static bool isValidFormat(uint32_t format);
+ static bool isLinearPCM(uint32_t format);
+
+private:
+
+ class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
+ {
+ public:
+ AudioFlingerClient() {
+ }
+
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, int ioHandle, void *param2);
+ };
+
+ class AudioPolicyServiceClient: public IBinder::DeathRecipient
+ {
+ public:
+ AudioPolicyServiceClient() {
+ }
+
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+ };
+
+ static sp<AudioFlingerClient> gAudioFlingerClient;
+ static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
+ friend class AudioFlingerClient;
+ friend class AudioPolicyServiceClient;
+
+ static Mutex gLock;
+ static sp<IAudioFlinger> gAudioFlinger;
+ static audio_error_callback gAudioErrorCallback;
+
+ static size_t gInBuffSize;
+ // previous parameters for recording buffer size queries
+ static uint32_t gPrevInSamplingRate;
+ static int gPrevInFormat;
+ static int gPrevInChannelCount;
+
+ static sp<IAudioPolicyService> gAudioPolicyService;
+
+ // mapping between stream types and outputs
+ static DefaultKeyedVector<int, audio_io_handle_t> gStreamOutputMap;
+ // list of output descritor containing cached parameters (sampling rate, framecount, channel count...)
+ static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
+};
+
+class AudioParameter {
+
+public:
+ AudioParameter() {}
+ AudioParameter(const String8& keyValuePairs);
+ virtual ~AudioParameter();
+
+ // reserved parameter keys for changing standard parameters with setParameters() function.
+ // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
+ // configuration changes and act accordingly.
+ // keyRouting: to change audio routing, value is an int in AudioSystem::audio_devices
+ // keySamplingRate: to change sampling rate routing, value is an int
+ // keyFormat: to change audio format, value is an int in AudioSystem::audio_format
+ // keyChannels: to change audio channel configuration, value is an int in AudioSystem::audio_channels
+ // keyFrameCount: to change audio output frame count, value is an int
+ // keyInputSource: to change audio input source, value is an int in audio_source
+ // (defined in media/mediarecorder.h)
+ static const char *keyRouting;
+ static const char *keySamplingRate;
+ static const char *keyFormat;
+ static const char *keyChannels;
+ static const char *keyFrameCount;
+#ifdef HAVE_FM_RADIO
+ static const char *keyFmOn;
+ static const char *keyFmOff;
+#endif
+ static const char *keyInputSource;
+
+ String8 toString();
+
+ status_t add(const String8& key, const String8& value);
+ status_t addInt(const String8& key, const int value);
+ status_t addFloat(const String8& key, const float value);
+
+ status_t remove(const String8& key);
+
+ status_t get(const String8& key, String8& value);
+ status_t getInt(const String8& key, int& value);
+ status_t getFloat(const String8& key, float& value);
+ status_t getAt(size_t index, String8& key, String8& value);
+
+ size_t size() { return mParameters.size(); }
+
+private:
+ String8 mKeyValuePairs;
+ KeyedVector <String8, String8> mParameters;
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIOSYSTEM_H_*/
diff --git a/audio/audio_policy_hal.cpp b/audio/audio_policy_hal.cpp
new file mode 100755
index 0000000..110a919
--- /dev/null
+++ b/audio/audio_policy_hal.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "legacy_audio_policy_hal"
+#define LOG_NDEBUG 0
+
+#include <stdint.h>
+
+#include <hardware/hardware.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+
+#include <hardware_legacy/AudioPolicyInterface.h>
+#include <hardware_legacy/AudioSystemLegacy.h>
+
+#include "AudioPolicyCompatClient.h"
+
+namespace android_audio_legacy {
+
+extern "C" {
+
+struct legacy_ap_module {
+ struct audio_policy_module module;
+};
+
+struct legacy_ap_device {
+ struct audio_policy_device device;
+};
+
+struct legacy_audio_policy {
+ struct audio_policy policy;
+
+ void *service;
+ struct audio_policy_service_ops *aps_ops;
+ AudioPolicyCompatClient *service_client;
+ AudioPolicyInterface *apm;
+};
+
+static inline struct legacy_audio_policy * to_lap(struct audio_policy *pol)
+{
+ return reinterpret_cast<struct legacy_audio_policy *>(pol);
+}
+
+static inline const struct legacy_audio_policy * to_clap(const struct audio_policy *pol)
+{
+ return reinterpret_cast<const struct legacy_audio_policy *>(pol);
+}
+
+
+static int ap_set_device_connection_state(struct audio_policy *pol,
+ audio_devices_t device,
+ audio_policy_dev_state_t state,
+ const char *device_address)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->setDeviceConnectionState(
+ (AudioSystem::audio_devices)device,
+ (AudioSystem::device_connection_state)state,
+ device_address);
+}
+
+static audio_policy_dev_state_t ap_get_device_connection_state(
+ const struct audio_policy *pol,
+ audio_devices_t device,
+ const char *device_address)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return (audio_policy_dev_state_t)lap->apm->getDeviceConnectionState(
+ (AudioSystem::audio_devices)device,
+ device_address);
+}
+
+static void ap_set_phone_state(struct audio_policy *pol, int state)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->setPhoneState(state);
+}
+
+ /* indicate a change in ringer mode */
+static void ap_set_ringer_mode(struct audio_policy *pol, uint32_t mode,
+ uint32_t mask)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->setRingerMode(mode, mask);
+}
+
+ /* force using a specific device category for the specified usage */
+static void ap_set_force_use(struct audio_policy *pol,
+ audio_policy_force_use_t usage,
+ audio_policy_forced_cfg_t config)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->setForceUse((AudioSystem::force_use)usage,
+ (AudioSystem::forced_config)config);
+}
+
+ /* retreive current device category forced for a given usage */
+static audio_policy_forced_cfg_t ap_get_force_use(
+ const struct audio_policy *pol,
+ audio_policy_force_use_t usage)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return (audio_policy_forced_cfg_t)lap->apm->getForceUse(
+ (AudioSystem::force_use)usage);
+}
+
+/* if can_mute is true, then audio streams that are marked ENFORCED_AUDIBLE
+ * can still be muted. */
+static void ap_set_can_mute_enforced_audible(struct audio_policy *pol,
+ bool can_mute)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->setSystemProperty("ro.camera.sound.forced", can_mute ? "0" : "1");
+}
+
+static int ap_init_check(const struct audio_policy *pol)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return lap->apm->initCheck();
+}
+
+static audio_io_handle_t ap_get_output(struct audio_policy *pol,
+ audio_stream_type_t stream,
+ uint32_t sampling_rate,
+ uint32_t format,
+ uint32_t channels,
+ audio_policy_output_flags_t flags)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+
+ LOGV("%s: tid %d", __func__, gettid());
+ return lap->apm->getOutput((AudioSystem::stream_type)stream,
+ sampling_rate, format, channels,
+ (AudioSystem::output_flags)flags);
+}
+
+static int ap_start_output(struct audio_policy *pol, audio_io_handle_t output,
+ audio_stream_type_t stream, int session)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->startOutput(output, (AudioSystem::stream_type)stream,
+ session);
+}
+
+static int ap_stop_output(struct audio_policy *pol, audio_io_handle_t output,
+ audio_stream_type_t stream, int session)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->stopOutput(output, (AudioSystem::stream_type)stream,
+ session);
+}
+
+static void ap_release_output(struct audio_policy *pol,
+ audio_io_handle_t output)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->releaseOutput(output);
+}
+
+static audio_io_handle_t ap_get_input(struct audio_policy *pol, int inputSource,
+ uint32_t sampling_rate,
+ uint32_t format,
+ uint32_t channels,
+ audio_in_acoustics_t acoustics)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->getInput(inputSource, sampling_rate, format, channels,
+ (AudioSystem::audio_in_acoustics)acoustics);
+}
+
+static int ap_start_input(struct audio_policy *pol, audio_io_handle_t input)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->startInput(input);
+}
+
+static int ap_stop_input(struct audio_policy *pol, audio_io_handle_t input)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->stopInput(input);
+}
+
+static void ap_release_input(struct audio_policy *pol, audio_io_handle_t input)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->releaseInput(input);
+}
+
+static void ap_init_stream_volume(struct audio_policy *pol,
+ audio_stream_type_t stream, int index_min,
+ int index_max)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ lap->apm->initStreamVolume((AudioSystem::stream_type)stream, index_min,
+ index_max);
+}
+
+static int ap_set_stream_volume_index(struct audio_policy *pol,
+ audio_stream_type_t stream,
+ int index)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->setStreamVolumeIndex((AudioSystem::stream_type)stream,
+ index);
+}
+
+static int ap_get_stream_volume_index(const struct audio_policy *pol,
+ audio_stream_type_t stream,
+ int *index)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return lap->apm->getStreamVolumeIndex((AudioSystem::stream_type)stream,
+ index);
+}
+
+static uint32_t ap_get_strategy_for_stream(const struct audio_policy *pol,
+ audio_stream_type_t stream)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return lap->apm->getStrategyForStream((AudioSystem::stream_type)stream);
+}
+
+static uint32_t ap_get_devices_for_stream(const struct audio_policy *pol,
+ audio_stream_type_t stream)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return lap->apm->getDevicesForStream((AudioSystem::stream_type)stream);
+}
+
+static audio_io_handle_t ap_get_output_for_effect(struct audio_policy *pol,
+ struct effect_descriptor_s *desc)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->getOutputForEffect(desc);
+}
+
+static int ap_register_effect(struct audio_policy *pol,
+ struct effect_descriptor_s *desc,
+ audio_io_handle_t io,
+ uint32_t strategy,
+ int session,
+ int id)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->registerEffect(desc, io, strategy, session, id);
+}
+
+static int ap_unregister_effect(struct audio_policy *pol, int id)
+{
+ struct legacy_audio_policy *lap = to_lap(pol);
+ return lap->apm->unregisterEffect(id);
+}
+
+static int ap_set_effect_enabled(struct audio_policy *pol, int id, bool enabled)
+{
+ return NO_ERROR;
+}
+
+static bool ap_is_stream_active(const struct audio_policy *pol, int stream,
+ uint32_t in_past_ms)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return lap->apm->isStreamActive(stream, in_past_ms);
+}
+
+static int ap_dump(const struct audio_policy *pol, int fd)
+{
+ const struct legacy_audio_policy *lap = to_clap(pol);
+ return lap->apm->dump(fd);
+}
+
+static int create_legacy_ap(const struct audio_policy_device *device,
+ struct audio_policy_service_ops *aps_ops,
+ void *service,
+ struct audio_policy **ap)
+{
+ struct legacy_audio_policy *lap;
+ int ret;
+
+ if (!service || !aps_ops)
+ return -EINVAL;
+
+ lap = (struct legacy_audio_policy *)calloc(1, sizeof(*lap));
+ if (!lap)
+ return -ENOMEM;
+
+ lap->policy.set_device_connection_state = ap_set_device_connection_state;
+ lap->policy.get_device_connection_state = ap_get_device_connection_state;
+ lap->policy.set_phone_state = ap_set_phone_state;
+ lap->policy.set_ringer_mode = ap_set_ringer_mode;
+ lap->policy.set_force_use = ap_set_force_use;
+ lap->policy.get_force_use = ap_get_force_use;
+ lap->policy.set_can_mute_enforced_audible =
+ ap_set_can_mute_enforced_audible;
+ lap->policy.init_check = ap_init_check;
+ lap->policy.get_output = ap_get_output;
+ lap->policy.start_output = ap_start_output;
+ lap->policy.stop_output = ap_stop_output;
+ lap->policy.release_output = ap_release_output;
+ lap->policy.get_input = ap_get_input;
+ lap->policy.start_input = ap_start_input;
+ lap->policy.stop_input = ap_stop_input;
+ lap->policy.release_input = ap_release_input;
+ lap->policy.init_stream_volume = ap_init_stream_volume;
+ lap->policy.set_stream_volume_index = ap_set_stream_volume_index;
+ lap->policy.get_stream_volume_index = ap_get_stream_volume_index;
+ lap->policy.get_strategy_for_stream = ap_get_strategy_for_stream;
+ lap->policy.get_devices_for_stream = ap_get_devices_for_stream;
+ lap->policy.get_output_for_effect = ap_get_output_for_effect;
+ lap->policy.register_effect = ap_register_effect;
+ lap->policy.unregister_effect = ap_unregister_effect;
+ lap->policy.set_effect_enabled = ap_set_effect_enabled;
+ lap->policy.is_stream_active = ap_is_stream_active;
+ lap->policy.dump = ap_dump;
+
+ lap->service = service;
+ lap->aps_ops = aps_ops;
+ lap->service_client =
+ new AudioPolicyCompatClient(aps_ops, service);
+ if (!lap->service_client) {
+ ret = -ENOMEM;
+ goto err_new_compat_client;
+ }
+
+ lap->apm = createAudioPolicyManager(lap->service_client);
+ if (!lap->apm) {
+ ret = -ENOMEM;
+ goto err_create_apm;
+ }
+
+ *ap = &lap->policy;
+ return 0;
+
+err_create_apm:
+ delete lap->service_client;
+err_new_compat_client:
+ free(lap);
+ *ap = NULL;
+ return ret;
+}
+
+static int destroy_legacy_ap(const struct audio_policy_device *ap_dev,
+ struct audio_policy *ap)
+{
+ struct legacy_audio_policy *lap = to_lap(ap);
+
+ if (!lap)
+ return 0;
+
+ if (lap->apm)
+ destroyAudioPolicyManager(lap->apm);
+ if (lap->service_client)
+ delete lap->service_client;
+ free(lap);
+ return 0;
+}
+
+static int legacy_ap_dev_close(hw_device_t* device)
+{
+ if (device)
+ free(device);
+ return 0;
+}
+
+static int legacy_ap_dev_open(const hw_module_t* module, const char* name,
+ hw_device_t** device)
+{
+ struct legacy_ap_device *dev;
+
+ if (strcmp(name, AUDIO_POLICY_INTERFACE) != 0)
+ return -EINVAL;
+
+ dev = (struct legacy_ap_device *)calloc(1, sizeof(*dev));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->device.common.tag = HARDWARE_DEVICE_TAG;
+ dev->device.common.version = 0;
+ dev->device.common.module = const_cast<hw_module_t*>(module);
+ dev->device.common.close = legacy_ap_dev_close;
+ dev->device.create_audio_policy = create_legacy_ap;
+ dev->device.destroy_audio_policy = destroy_legacy_ap;
+
+ *device = &dev->device.common;
+
+ return 0;
+}
+
+static struct hw_module_methods_t legacy_ap_module_methods = {
+ open: legacy_ap_dev_open
+};
+
+struct legacy_ap_module HAL_MODULE_INFO_SYM = {
+ module: {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ version_major: 1,
+ version_minor: 0,
+ id: AUDIO_POLICY_HARDWARE_MODULE_ID,
+ name: "LEGACY Audio Policy HAL",
+ author: "The Android Open Source Project",
+ methods: &legacy_ap_module_methods,
+ dso : NULL,
+ reserved : {0},
+ },
+ },
+};
+
+}; // extern "C"
+
+}; // namespace android_audio_legacy