summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Laurent <elaurent@google.com>2009-07-17 12:17:14 -0700
committerEric Laurent <elaurent@google.com>2009-07-23 06:03:39 -0700
commita553c25b33c99b345cf1c8688f8df0ed8df14e5a (patch)
tree025c461b13e66ad0ceac8d0f8d9b13fd88ae168a
parentebd7bc54028949619bbf3fa5ed6c1188f588c230 (diff)
downloadframeworks_base-a553c25b33c99b345cf1c8688f8df0ed8df14e5a.zip
frameworks_base-a553c25b33c99b345cf1c8688f8df0ed8df14e5a.tar.gz
frameworks_base-a553c25b33c99b345cf1c8688f8df0ed8df14e5a.tar.bz2
Fix issue 1795088 Improve audio routing code
Initial commit for review. Integrated comments after patch set 1 review. Fixed lockup in AudioFlinger::ThreadBase::exit() Fixed lockup when playing tone with AudioPlocyService startTone()
-rw-r--r--api/current.xml433
-rw-r--r--camera/libcameraservice/CameraService.cpp9
-rw-r--r--cmds/servicemanager/service_manager.c1
-rw-r--r--cmds/system_server/library/system_init.cpp4
-rw-r--r--core/java/android/server/BluetoothA2dpService.java15
-rw-r--r--core/jni/android_media_AudioRecord.cpp26
-rw-r--r--core/jni/android_media_AudioSystem.cpp166
-rw-r--r--core/jni/android_media_AudioTrack.cpp29
-rw-r--r--include/media/AudioRecord.h57
-rw-r--r--include/media/AudioSystem.h398
-rw-r--r--include/media/AudioTrack.h25
-rw-r--r--include/media/IAudioFlinger.h56
-rw-r--r--include/media/IAudioFlingerClient.h6
-rw-r--r--include/media/IAudioPolicyService.h90
-rw-r--r--include/private/media/AudioTrackShared.h13
-rw-r--r--include/tts/TtsEngine.h4
-rw-r--r--libs/audioflinger/A2dpAudioInterface.cpp224
-rw-r--r--libs/audioflinger/A2dpAudioInterface.h50
-rw-r--r--libs/audioflinger/Android.mk64
-rw-r--r--libs/audioflinger/AudioDumpInterface.cpp382
-rw-r--r--libs/audioflinger/AudioDumpInterface.h116
-rw-r--r--libs/audioflinger/AudioFlinger.cpp3236
-rw-r--r--libs/audioflinger/AudioFlinger.h525
-rw-r--r--libs/audioflinger/AudioHardwareGeneric.cpp161
-rw-r--r--libs/audioflinger/AudioHardwareGeneric.h49
-rw-r--r--libs/audioflinger/AudioHardwareInterface.cpp109
-rw-r--r--libs/audioflinger/AudioHardwareStub.cpp52
-rw-r--r--libs/audioflinger/AudioHardwareStub.h37
-rw-r--r--libs/audioflinger/AudioMixer.cpp1
-rw-r--r--libs/audioflinger/AudioMixer.h3
-rw-r--r--libs/audioflinger/AudioPolicyManagerGeneric.cpp764
-rw-r--r--libs/audioflinger/AudioPolicyManagerGeneric.h189
-rw-r--r--libs/audioflinger/AudioPolicyService.cpp677
-rw-r--r--libs/audioflinger/AudioPolicyService.h201
-rw-r--r--media/java/android/media/AudioFormat.java55
-rw-r--r--media/java/android/media/AudioManager.java203
-rw-r--r--media/java/android/media/AudioRecord.java36
-rw-r--r--media/java/android/media/AudioService.java701
-rw-r--r--media/java/android/media/AudioSystem.java157
-rw-r--r--media/java/android/media/AudioTrack.java35
-rw-r--r--media/java/android/media/IAudioService.aidl16
-rw-r--r--media/java/android/media/JetPlayer.java2
-rw-r--r--media/java/android/media/ToneGenerator.java4
-rw-r--r--media/jni/soundpool/SoundPool.cpp5
-rw-r--r--media/libmedia/Android.mk3
-rw-r--r--media/libmedia/AudioRecord.cpp90
-rw-r--r--media/libmedia/AudioSystem.cpp742
-rw-r--r--media/libmedia/AudioTrack.cpp189
-rw-r--r--media/libmedia/IAudioFlinger.cpp382
-rw-r--r--media/libmedia/IAudioFlingerClient.cpp45
-rw-r--r--media/libmedia/IAudioPolicyService.cpp423
-rw-r--r--media/libmedia/JetPlayer.cpp2
-rw-r--r--media/libmedia/ToneGenerator.cpp2
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp16
-rw-r--r--media/mediaserver/main_mediaserver.cpp2
-rw-r--r--media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java96
-rw-r--r--packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java26
-rw-r--r--packages/TtsService/jni/android_tts_SynthProxy.cpp10
-rw-r--r--services/java/com/android/server/HeadsetObserver.java48
59 files changed, 8533 insertions, 2929 deletions
diff --git a/api/current.xml b/api/current.xml
index 9248ccb..fcebc73 100644
--- a/api/current.xml
+++ b/api/current.xml
@@ -73186,7 +73186,7 @@
value="1"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -73197,7 +73197,7 @@
value="0"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -73208,7 +73208,7 @@
value="2"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -73219,6 +73219,358 @@
value="3"
static="true"
final="true"
+ deprecated="deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_INVALID"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="-1"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_BACK"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="524288"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_BACK_PROCESSED"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="8388608"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_DEFAULT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="0"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_FRONT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="262144"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_FRONT_PROCESSED"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="4194304"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_LEFT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="65536"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_LEFT_PROCESSED"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="1048576"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_MONO"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="262144"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_PRESSURE"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="16777216"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_RIGHT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="131072"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_RIGHT_PROCESSED"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="2097152"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_STEREO"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="196608"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_X_AXIS"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="33554432"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_Y_AXIS"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="67108864"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_IN_Z_AXIS"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="134217728"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_5POINT1"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="63"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_7POINT1"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="255"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_BACK_CENTER"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="256"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_BACK_LEFT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="16"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_BACK_RIGHT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="32"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_DEFAULT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="0"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_FRONT_CENTER"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="4"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_FRONT_LEFT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="1"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_FRONT_LEFT_OF_CENTER"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="64"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_FRONT_RIGHT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="2"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_FRONT_RIGHT_OF_CENTER"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="128"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_LOW_FREQUENCY"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="8"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_MONO"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="1"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_QUAD"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="51"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_STEREO"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="3"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="CHANNEL_OUT_SURROUND"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="263"
+ static="true"
+ final="true"
deprecated="not deprecated"
visibility="public"
>
@@ -73336,6 +73688,19 @@
visibility="public"
>
</method>
+<method name="getParameters"
+ return="java.lang.String"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="keys" type="java.lang.String">
+</parameter>
+</method>
<method name="getRingerMode"
return="int"
abstract="false"
@@ -73454,6 +73819,17 @@
visibility="public"
>
</method>
+<method name="isWiredHeadsetOn"
+ return="boolean"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="loadSoundEffects"
return="void"
abstract="false"
@@ -73500,7 +73876,7 @@
synchronized="false"
static="false"
final="false"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
<parameter name="on" type="boolean">
@@ -73545,6 +73921,19 @@
<parameter name="mode" type="int">
</parameter>
</method>
+<method name="setParameters"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="keyValuePairs" type="java.lang.String">
+</parameter>
+</method>
<method name="setRingerMode"
return="void"
abstract="false"
@@ -73650,6 +74039,19 @@
<parameter name="vibrateSetting" type="int">
</parameter>
</method>
+<method name="setWiredHeadsetOn"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="deprecated"
+ visibility="public"
+>
+<parameter name="on" type="boolean">
+</parameter>
+</method>
<method name="shouldVibrate"
return="boolean"
abstract="false"
@@ -74022,7 +74424,7 @@
value="-1"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -74044,7 +74446,7 @@
value="16"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -74055,7 +74457,7 @@
value="4"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -74066,7 +74468,7 @@
value="1"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -74077,7 +74479,7 @@
value="8"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -74088,7 +74490,7 @@
value="2"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -74103,6 +74505,17 @@
visibility="public"
>
</field>
+<field name="STREAM_DTMF"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="8"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
<field name="STREAM_MUSIC"
type="int"
transient="false"
diff --git a/camera/libcameraservice/CameraService.cpp b/camera/libcameraservice/CameraService.cpp
index 97b43a4..e7c1fbb 100644
--- a/camera/libcameraservice/CameraService.cpp
+++ b/camera/libcameraservice/CameraService.cpp
@@ -33,7 +33,6 @@
#include "CameraService.h"
#include <cutils/atomic.h>
-#include <cutils/properties.h>
namespace android {
@@ -199,13 +198,7 @@ static sp<MediaPlayer> newMediaPlayer(const char *file)
{
sp<MediaPlayer> mp = new MediaPlayer();
if (mp->setDataSource(file) == NO_ERROR) {
- char value[PROPERTY_VALUE_MAX];
- property_get("ro.camera.sound.forced", value, "0");
- if (atoi(value)) {
- mp->setAudioStreamType(AudioSystem::ENFORCED_AUDIBLE);
- } else {
- mp->setAudioStreamType(AudioSystem::SYSTEM);
- }
+ mp->setAudioStreamType(AudioSystem::ENFORCED_AUDIBLE);
mp->prepare();
} else {
mp.clear();
diff --git a/cmds/servicemanager/service_manager.c b/cmds/servicemanager/service_manager.c
index e4aa8b5..f3a4713 100644
--- a/cmds/servicemanager/service_manager.c
+++ b/cmds/servicemanager/service_manager.c
@@ -30,6 +30,7 @@ static struct {
{ AID_MEDIA, "media.audio_flinger" },
{ AID_MEDIA, "media.player" },
{ AID_MEDIA, "media.camera" },
+ { AID_MEDIA, "media.audio_policy" },
{ AID_RADIO, "radio.phone" },
{ AID_RADIO, "radio.sms" },
{ AID_RADIO, "radio.phonesubinfo" },
diff --git a/cmds/system_server/library/system_init.cpp b/cmds/system_server/library/system_init.cpp
index ea78461..1d57fdc 100644
--- a/cmds/system_server/library/system_init.cpp
+++ b/cmds/system_server/library/system_init.cpp
@@ -17,6 +17,7 @@
#include <SurfaceFlinger.h>
#include <AudioFlinger.h>
#include <CameraService.h>
+#include <AudioPolicyService.h>
#include <MediaPlayerService.h>
#include <android_runtime/AndroidRuntime.h>
@@ -80,6 +81,9 @@ extern "C" status_t system_init()
// Start the camera service
CameraService::instantiate();
+
+ // Start the audio policy service
+ AudioPolicyService::instantiate();
}
// And now start the Android runtime. We have to do this bit
diff --git a/core/java/android/server/BluetoothA2dpService.java b/core/java/android/server/BluetoothA2dpService.java
index 722a7cc..fb436e5 100644
--- a/core/java/android/server/BluetoothA2dpService.java
+++ b/core/java/android/server/BluetoothA2dpService.java
@@ -54,7 +54,6 @@ public class BluetoothA2dpService extends IBluetoothA2dp.Stub {
private static final String BLUETOOTH_ADMIN_PERM = android.Manifest.permission.BLUETOOTH_ADMIN;
private static final String BLUETOOTH_PERM = android.Manifest.permission.BLUETOOTH;
- private static final String A2DP_SINK_ADDRESS = "a2dp_sink_address";
private static final String BLUETOOTH_ENABLED = "bluetooth_enabled";
private static final int MESSAGE_CONNECT_TO = 1;
@@ -238,7 +237,7 @@ public class BluetoothA2dpService extends IBluetoothA2dp.Stub {
}
}
}
- mAudioManager.setParameter(BLUETOOTH_ENABLED, "true");
+ mAudioManager.setParameters(BLUETOOTH_ENABLED+"=true");
}
private synchronized void onBluetoothDisable() {
@@ -262,8 +261,8 @@ public class BluetoothA2dpService extends IBluetoothA2dp.Stub {
}
mAudioDevices.clear();
}
- mAudioManager.setBluetoothA2dpOn(false);
- mAudioManager.setParameter(BLUETOOTH_ENABLED, "false");
+
+ mAudioManager.setParameters(BLUETOOTH_ENABLED+"=false");
}
public synchronized int connectSink(String address) {
@@ -403,8 +402,7 @@ public class BluetoothA2dpService extends IBluetoothA2dp.Stub {
Intent intent = new Intent(AudioManager.ACTION_AUDIO_BECOMING_NOISY);
mContext.sendBroadcast(intent);
}
- if (--mSinkCount == 0)
- mAudioManager.setBluetoothA2dpOn(false);
+ mSinkCount--;
} else if (state == BluetoothA2dp.STATE_CONNECTED) {
mSinkCount ++;
}
@@ -417,11 +415,6 @@ public class BluetoothA2dpService extends IBluetoothA2dp.Stub {
mContext.sendBroadcast(intent, BLUETOOTH_PERM);
if (DBG) log("A2DP state : address: " + address + " State:" + prevState + "->" + state);
-
- if (state == BluetoothA2dp.STATE_CONNECTED) {
- mAudioManager.setParameter(A2DP_SINK_ADDRESS, address);
- mAudioManager.setBluetoothA2dpOn(true);
- }
}
}
diff --git a/core/jni/android_media_AudioRecord.cpp b/core/jni/android_media_AudioRecord.cpp
index 44a9e8c..0be996d 100644
--- a/core/jni/android_media_AudioRecord.cpp
+++ b/core/jni/android_media_AudioRecord.cpp
@@ -28,8 +28,8 @@
#include "android_runtime/AndroidRuntime.h"
#include "utils/Log.h"
-#include "media/AudioSystem.h"
#include "media/AudioRecord.h"
+#include "media/mediarecorder.h"
// ----------------------------------------------------------------------------
@@ -62,7 +62,7 @@ struct audiorecord_callback_cookie {
#define AUDIORECORD_ERROR_BAD_VALUE -2
#define AUDIORECORD_ERROR_INVALID_OPERATION -3
#define AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT -16
-#define AUDIORECORD_ERROR_SETUP_INVALIDCHANNELCOUNT -17
+#define AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK -17
#define AUDIORECORD_ERROR_SETUP_INVALIDFORMAT -18
#define AUDIORECORD_ERROR_SETUP_INVALIDSOURCE -19
#define AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED -20
@@ -122,17 +122,18 @@ static void recorderCallback(int event, void* user, void *info) {
// ----------------------------------------------------------------------------
static int
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
- jint source, jint sampleRateInHertz, jint nbChannels,
+ jint source, jint sampleRateInHertz, jint channels,
jint audioFormat, jint buffSizeInBytes)
{
//LOGV(">> Entering android_media_AudioRecord_setup");
- //LOGV("sampleRate=%d, audioFormat=%d, nbChannels=%d, buffSizeInBytes=%d",
- // sampleRateInHertz, audioFormat, nbChannels, buffSizeInBytes);
+ //LOGV("sampleRate=%d, audioFormat=%d, channels=%x, buffSizeInBytes=%d",
+ // sampleRateInHertz, audioFormat, channels, buffSizeInBytes);
- if ((nbChannels == 0) || (nbChannels > 2)) {
+ if (!AudioSystem::isInputChannel(channels)) {
LOGE("Error creating AudioRecord: channel count is not 1 or 2.");
- return AUDIORECORD_ERROR_SETUP_INVALIDCHANNELCOUNT;
+ return AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
}
+ uint32_t nbChannels = AudioSystem::popCount(channels);
// compare the format against the Java constants
if ((audioFormat != javaAudioRecordFields.PCM16)
@@ -152,12 +153,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
int frameSize = nbChannels * bytesPerSample;
size_t frameCount = buffSizeInBytes / frameSize;
- // convert and check input source value
- // input_source values defined in AudioRecord.h are equal to
- // JAVA MediaRecord.AudioSource values minus 1.
- AudioRecord::input_source arSource = (AudioRecord::input_source)(source - 1);
- if (arSource < AudioRecord::DEFAULT_INPUT ||
- arSource >= AudioRecord::NUM_INPUT_SOURCES) {
+ if (source >= AUDIO_SOURCE_LIST_END) {
LOGE("Error creating AudioRecord: unknown source.");
return AUDIORECORD_ERROR_SETUP_INVALIDSOURCE;
}
@@ -184,10 +180,10 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
// we use a weak reference so the AudioRecord object can be garbage collected.
lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
- lpRecorder->set(arSource,
+ lpRecorder->set(source,
sampleRateInHertz,
format, // word length, PCM
- nbChannels,
+ channels,
frameCount,
0, // flags
recorderCallback,// callback_t
diff --git a/core/jni/android_media_AudioSystem.cpp b/core/jni/android_media_AudioSystem.cpp
index 692610e..3d8d296 100644
--- a/core/jni/android_media_AudioSystem.cpp
+++ b/core/jni/android_media_AudioSystem.cpp
@@ -50,25 +50,6 @@ static int check_AudioSystem_Command(status_t status)
}
static int
-android_media_AudioSystem_setVolume(JNIEnv *env, jobject clazz, jint type, jint volume)
-{
- LOGV("setVolume(%d)", int(volume));
-
- return check_AudioSystem_Command(AudioSystem::setStreamVolume(type, AudioSystem::linearToLog(volume)));
-}
-
-static int
-android_media_AudioSystem_getVolume(JNIEnv *env, jobject clazz, jint type)
-{
- float v;
- int v_int = -1;
- if (AudioSystem::getStreamVolume(int(type), &v) == NO_ERROR) {
- v_int = AudioSystem::logToLinear(v);
- }
- return v_int;
-}
-
-static int
android_media_AudioSystem_muteMicrophone(JNIEnv *env, jobject thiz, jboolean on)
{
return check_AudioSystem_Command(AudioSystem::muteMicrophone(on));
@@ -82,34 +63,6 @@ android_media_AudioSystem_isMicrophoneMuted(JNIEnv *env, jobject thiz)
return state;
}
-static int
-android_media_AudioSystem_setRouting(JNIEnv *env, jobject clazz, jint mode, jint routes, jint mask)
-{
- return check_AudioSystem_Command(AudioSystem::setRouting(mode, uint32_t(routes), uint32_t(mask)));
-}
-
-static jint
-android_media_AudioSystem_getRouting(JNIEnv *env, jobject clazz, jint mode)
-{
- uint32_t routes = -1;
- AudioSystem::getRouting(mode, &routes);
- return jint(routes);
-}
-
-static int
-android_media_AudioSystem_setMode(JNIEnv *env, jobject clazz, jint mode)
-{
- return check_AudioSystem_Command(AudioSystem::setMode(mode));
-}
-
-static jint
-android_media_AudioSystem_getMode(JNIEnv *env, jobject clazz)
-{
- int mode = AudioSystem::MODE_INVALID;
- AudioSystem::getMode(&mode);
- return jint(mode);
-}
-
static jboolean
android_media_AudioSystem_isMusicActive(JNIEnv *env, jobject thiz)
{
@@ -118,16 +71,29 @@ android_media_AudioSystem_isMusicActive(JNIEnv *env, jobject thiz)
return state;
}
-// Temporary interface, do not use
-// TODO: Replace with a more generic key:value get/set mechanism
-static void
-android_media_AudioSystem_setParameter(JNIEnv *env, jobject thiz, jstring key, jstring value)
+static int
+android_media_AudioSystem_setParameters(JNIEnv *env, jobject thiz, jstring keyValuePairs)
{
- const char *c_key = env->GetStringUTFChars(key, NULL);
- const char *c_value = env->GetStringUTFChars(value, NULL);
- AudioSystem::setParameter(c_key, c_value);
- env->ReleaseStringUTFChars(key, c_key);
- env->ReleaseStringUTFChars(value, c_value);
+ const jchar* c_keyValuePairs = env->GetStringCritical(keyValuePairs, 0);
+ String8 c_keyValuePairs8;
+ if (keyValuePairs) {
+ c_keyValuePairs8 = String8(c_keyValuePairs, env->GetStringLength(keyValuePairs));
+ env->ReleaseStringCritical(keyValuePairs, c_keyValuePairs);
+ }
+ int status = check_AudioSystem_Command(AudioSystem::setParameters(0, c_keyValuePairs8));
+ return status;
+}
+
+static jstring
+android_media_AudioSystem_getParameters(JNIEnv *env, jobject thiz, jstring keys)
+{
+ const jchar* c_keys = env->GetStringCritical(keys, 0);
+ String8 c_keys8;
+ if (keys) {
+ c_keys8 = String8(c_keys, env->GetStringLength(keys));
+ env->ReleaseStringCritical(keys, c_keys);
+ }
+ return env->NewStringUTF(AudioSystem::getParameters(0, c_keys8).string());
}
void android_media_AudioSystem_error_callback(status_t err)
@@ -152,19 +118,93 @@ void android_media_AudioSystem_error_callback(status_t err)
env->CallStaticVoidMethod(clazz, env->GetStaticMethodID(clazz, "errorCallbackFromNative","(I)V"), error);
}
+static int
+android_media_AudioSystem_setDeviceConnectionState(JNIEnv *env, jobject thiz, jint device, jint state, jstring device_address)
+{
+ const char *c_address = env->GetStringUTFChars(device_address, NULL);
+ int status = check_AudioSystem_Command(AudioSystem::setDeviceConnectionState(static_cast <AudioSystem::audio_devices>(device),
+ static_cast <AudioSystem::device_connection_state>(state),
+ c_address));
+ env->ReleaseStringUTFChars(device_address, c_address);
+ return status;
+}
+
+static int
+android_media_AudioSystem_getDeviceConnectionState(JNIEnv *env, jobject thiz, jint device, jstring device_address)
+{
+ const char *c_address = env->GetStringUTFChars(device_address, NULL);
+ int state = static_cast <int>(AudioSystem::getDeviceConnectionState(static_cast <AudioSystem::audio_devices>(device),
+ c_address));
+ env->ReleaseStringUTFChars(device_address, c_address);
+ return state;
+}
+
+static int
+android_media_AudioSystem_setPhoneState(JNIEnv *env, jobject thiz, jint state)
+{
+ return check_AudioSystem_Command(AudioSystem::setPhoneState(state));
+}
+
+static int
+android_media_AudioSystem_setRingerMode(JNIEnv *env, jobject thiz, jint mode, jint mask)
+{
+ return check_AudioSystem_Command(AudioSystem::setRingerMode(mode, mask));
+}
+
+static int
+android_media_AudioSystem_setForceUse(JNIEnv *env, jobject thiz, jint usage, jint config)
+{
+ return check_AudioSystem_Command(AudioSystem::setForceUse(static_cast <AudioSystem::force_use>(usage),
+ static_cast <AudioSystem::forced_config>(config)));
+}
+
+static int
+android_media_AudioSystem_getForceUse(JNIEnv *env, jobject thiz, jint usage)
+{
+ return static_cast <int>(AudioSystem::getForceUse(static_cast <AudioSystem::force_use>(usage)));
+}
+
+static int
+android_media_AudioSystem_initStreamVolume(JNIEnv *env, jobject thiz, jint stream, jint indexMin, jint indexMax)
+{
+ return check_AudioSystem_Command(AudioSystem::initStreamVolume(static_cast <AudioSystem::stream_type>(stream),
+ indexMin,
+ indexMax));
+}
+
+static int
+android_media_AudioSystem_setStreamVolumeIndex(JNIEnv *env, jobject thiz, jint stream, jint index)
+{
+ return check_AudioSystem_Command(AudioSystem::setStreamVolumeIndex(static_cast <AudioSystem::stream_type>(stream), index));
+}
+
+static int
+android_media_AudioSystem_getStreamVolumeIndex(JNIEnv *env, jobject thiz, jint stream)
+{
+ int index;
+ if (AudioSystem::getStreamVolumeIndex(static_cast <AudioSystem::stream_type>(stream), &index) != NO_ERROR) {
+ index = -1;
+ }
+ return index;
+}
+
// ----------------------------------------------------------------------------
static JNINativeMethod gMethods[] = {
- {"setVolume", "(II)I", (void *)android_media_AudioSystem_setVolume},
- {"getVolume", "(I)I", (void *)android_media_AudioSystem_getVolume},
- {"setParameter", "(Ljava/lang/String;Ljava/lang/String;)V", (void *)android_media_AudioSystem_setParameter},
+ {"setParameters", "(Ljava/lang/String;)I", (void *)android_media_AudioSystem_setParameters},
+ {"getParameters", "(Ljava/lang/String;)Ljava/lang/String;", (void *)android_media_AudioSystem_getParameters},
{"muteMicrophone", "(Z)I", (void *)android_media_AudioSystem_muteMicrophone},
{"isMicrophoneMuted", "()Z", (void *)android_media_AudioSystem_isMicrophoneMuted},
- {"setRouting", "(III)I", (void *)android_media_AudioSystem_setRouting},
- {"getRouting", "(I)I", (void *)android_media_AudioSystem_getRouting},
- {"setMode", "(I)I", (void *)android_media_AudioSystem_setMode},
- {"getMode", "()I", (void *)android_media_AudioSystem_getMode},
{"isMusicActive", "()Z", (void *)android_media_AudioSystem_isMusicActive},
+ {"setDeviceConnectionState", "(IILjava/lang/String;)I", (void *)android_media_AudioSystem_setDeviceConnectionState},
+ {"getDeviceConnectionState", "(ILjava/lang/String;)I", (void *)android_media_AudioSystem_getDeviceConnectionState},
+ {"setPhoneState", "(I)I", (void *)android_media_AudioSystem_setPhoneState},
+ {"setRingerMode", "(II)I", (void *)android_media_AudioSystem_setRingerMode},
+ {"setForceUse", "(II)I", (void *)android_media_AudioSystem_setForceUse},
+ {"getForceUse", "(I)I", (void *)android_media_AudioSystem_getForceUse},
+ {"initStreamVolume", "(III)I", (void *)android_media_AudioSystem_initStreamVolume},
+ {"setStreamVolumeIndex","(II)I", (void *)android_media_AudioSystem_setStreamVolumeIndex},
+ {"getStreamVolumeIndex","(I)I", (void *)android_media_AudioSystem_getStreamVolumeIndex}
};
const char* const kClassPathName = "android/media/AudioSystem";
diff --git a/core/jni/android_media_AudioTrack.cpp b/core/jni/android_media_AudioTrack.cpp
index bc7f3f5..6449147 100644
--- a/core/jni/android_media_AudioTrack.cpp
+++ b/core/jni/android_media_AudioTrack.cpp
@@ -103,7 +103,7 @@ class AudioTrackJniStorage {
#define AUDIOTRACK_ERROR_BAD_VALUE -2
#define AUDIOTRACK_ERROR_INVALID_OPERATION -3
#define AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM -16
-#define AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELCOUNT -17
+#define AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK -17
#define AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT -18
#define AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE -19
#define AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED -20
@@ -164,11 +164,11 @@ static void audioCallback(int event, void* user, void *info) {
// ----------------------------------------------------------------------------
static int
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
- jint streamType, jint sampleRateInHertz, jint nbChannels,
+ jint streamType, jint sampleRateInHertz, jint channels,
jint audioFormat, jint buffSizeInBytes, jint memoryMode)
{
- LOGV("sampleRate=%d, audioFormat(from Java)=%d, nbChannels=%d, buffSize=%d",
- sampleRateInHertz, audioFormat, nbChannels, buffSizeInBytes);
+ LOGV("sampleRate=%d, audioFormat(from Java)=%d, channels=%x, buffSize=%d",
+ sampleRateInHertz, audioFormat, channels, buffSizeInBytes);
int afSampleRate;
int afFrameCount;
@@ -181,10 +181,11 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
- if ((nbChannels == 0) || (nbChannels > 2)) {
- LOGE("Error creating AudioTrack: channel count is not 1 or 2.");
- return AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELCOUNT;
+ if (!AudioSystem::isOutputChannel(channels)) {
+ LOGE("Error creating AudioTrack: invalid channel mask.");
+ return AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
+ int nbChannels = AudioSystem::popCount(channels);
// check the stream type
AudioSystem::stream_type atStreamType;
@@ -231,15 +232,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
int bytesPerSample = audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1;
int format = audioFormat == javaAudioTrackFields.PCM16 ?
AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT;
- int frameCount;
- if (buffSizeInBytes == -1) {
- // compute the frame count based on the system's output frame count
- // and the native sample rate
- frameCount = (sampleRateInHertz*afFrameCount)/afSampleRate;
- } else {
- // compute the frame count based on the specified buffer size
- frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);
- }
+ int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);
AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
@@ -271,7 +264,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
- nbChannels,
+ channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
@@ -291,7 +284,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
- nbChannels,
+ channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 83ff508..503cb31 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -39,21 +39,10 @@ class AudioRecord
{
public:
- // input sources values must always be defined in the range
- // [AudioRecord::DEFAULT_INPUT, AudioRecord::NUM_INPUT_SOURCES[
- enum input_source {
- DEFAULT_INPUT =-1,
- MIC_INPUT = 0,
- VOICE_UPLINK_INPUT = 1,
- VOICE_DOWNLINK_INPUT = 2,
- VOICE_CALL_INPUT = 3,
- NUM_INPUT_SOURCES
- };
-
static const int DEFAULT_SAMPLE_RATE = 8000;
/* Events used by AudioRecord callback function (callback_t).
- *
+ *
* to keep in sync with frameworks/base/media/java/android/media/AudioRecord.java
*/
enum event_type {
@@ -61,7 +50,7 @@ public:
EVENT_OVERRUN = 1, // PCM buffer overrun occured.
EVENT_MARKER = 2, // Record head is at the specified marker position
// (See setMarkerPosition()).
- EVENT_NEW_POS = 3, // Record head is at a new position
+ EVENT_NEW_POS = 3, // Record head is at a new position
// (See setPositionUpdatePeriod()).
};
@@ -123,11 +112,11 @@ public:
*
* Parameters:
*
- * inputSource: Select the audio input to record to (e.g. AudioRecord::MIC_INPUT).
+ * inputSource: Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT).
* sampleRate: Track sampling rate in Hz.
- * format: PCM sample format (e.g AudioSystem::PCM_16_BIT for signed
+ * format: Audio format (e.g AudioSystem::PCM_16_BIT for signed
* 16 bits per sample).
- * channelCount: Number of PCM channels (e.g 2 for stereo).
+ * channels: Channel mask: see AudioSystem::audio_channels.
* frameCount: Total size of track PCM buffer in frames. This defines the
* latency of the track.
* flags: A bitmask of acoustic values from enum record_flags. It enables
@@ -148,7 +137,7 @@ public:
AudioRecord(int inputSource,
uint32_t sampleRate = 0,
int format = 0,
- int channelCount = 0,
+ uint32_t channels = AudioSystem::CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -166,14 +155,14 @@ public:
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful intialization
* - INVALID_OPERATION: AudioRecord is already intitialized or record device is already in use
- * - BAD_VALUE: invalid parameter (channelCount, format, sampleRate...)
+ * - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
* - PERMISSION_DENIED: recording is not allowed for the requesting process
* */
status_t set(int inputSource = 0,
uint32_t sampleRate = 0,
int format = 0,
- int channelCount = 0,
+ uint32_t channels = AudioSystem::CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -199,6 +188,7 @@ public:
int format() const;
int channelCount() const;
+ int channels() const;
uint32_t frameCount() const;
int frameSize() const;
int inputSource() const;
@@ -222,8 +212,8 @@ public:
/* Sets marker position. When record reaches the number of frames specified,
* a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
- * with marker == 0 cancels marker notification callback.
- * If the AudioRecord has been opened with no callback function associated,
+ * with marker == 0 cancels marker notification callback.
+ * If the AudioRecord has been opened with no callback function associated,
* the operation will fail.
*
* Parameters:
@@ -238,10 +228,10 @@ public:
status_t getMarkerPosition(uint32_t *marker);
- /* Sets position update period. Every time the number of frames specified has been recorded,
- * a callback with event type EVENT_NEW_POS is called.
- * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
- * callback.
+ /* Sets position update period. Every time the number of frames specified has been recorded,
+ * a callback with event type EVENT_NEW_POS is called.
+ * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
+ * callback.
* If the AudioRecord has been opened with no callback function associated,
* the operation will fail.
*
@@ -257,8 +247,8 @@ public:
status_t getPositionUpdatePeriod(uint32_t *updatePeriod);
- /* Gets record head position. The position is the total number of frames
- * recorded since record start.
+ /* Gets record head position. The position is the total number of frames
+ * recorded since record start.
*
* Parameters:
*
@@ -270,8 +260,16 @@ public:
*/
status_t getPosition(uint32_t *position);
-
-
+ /* returns a handle on the audio input used by this AudioRecord.
+ *
+ * Parameters:
+ * none.
+ *
+ * Returned value:
+ * handle on audio hardware input
+ */
+ audio_io_handle_t getInput() { return mInput; }
+
/* obtains a buffer of "frameCount" frames. The buffer must be
* filled entirely. If the track is stopped, obtainBuffer() returns
* STOPPED instead of NO_ERROR as long as there are buffers availlable,
@@ -342,6 +340,7 @@ private:
bool mMarkerReached;
uint32_t mNewPosition;
uint32_t mUpdatePeriod;
+ audio_io_handle_t mInput;
};
}; // namespace android
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 3a3a714..0ea04a4 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -24,36 +24,130 @@
namespace android {
typedef void (*audio_error_callback)(status_t err);
+typedef void * audio_io_handle_t;
+
+class IAudioPolicyService;
+class String8;
class AudioSystem
{
public:
enum stream_type {
- DEFAULT =-1,
- VOICE_CALL = 0,
- SYSTEM = 1,
- RING = 2,
- MUSIC = 3,
- ALARM = 4,
- NOTIFICATION = 5,
- BLUETOOTH_SCO = 6,
+ DEFAULT =-1,
+ VOICE_CALL = 0,
+ SYSTEM = 1,
+ RING = 2,
+ MUSIC = 3,
+ ALARM = 4,
+ NOTIFICATION = 5,
+ BLUETOOTH_SCO = 6,
ENFORCED_AUDIBLE = 7, // Sounds that cannot be muted by user and must be routed to speaker
+ DTMF = 8,
+ TTS = 9,
NUM_STREAM_TYPES
};
- enum audio_output_type {
- AUDIO_OUTPUT_DEFAULT =-1,
- AUDIO_OUTPUT_HARDWARE = 0,
- AUDIO_OUTPUT_A2DP = 1,
- NUM_AUDIO_OUTPUT_TYPES
+ // Audio sub formats (see AudioSystem::audio_format).
+ enum pcm_sub_format {
+ PCM_SUB_16_BIT = 0x1, // must be 1 for backward compatibility
+ PCM_SUB_8_BIT = 0x2, // must be 2 for backward compatibility
+ };
+
+ // MP3 sub format field definition : can use 11 LSBs in the same way as MP3 frame header to specify
+ // bit rate, stereo mode, version...
+ enum mp3_sub_format {
+ //TODO
+ };
+
+ // AMR NB/WB sub format field definition: specify frame block interleaving, bandwidth efficient or octet aligned,
+ // encoding mode for recording...
+ enum amr_sub_format {
+ //TODO
+ };
+
+ // AAC sub format field definition: specify profile or bitrate for recording...
+ enum aac_sub_format {
+ //TODO
};
+ // VORBIS sub format field definition: specify quality for recording...
+ enum vorbis_sub_format {
+ //TODO
+ };
+
+ // Audio format consists in a main format field (upper 8 bits) and a sub format field (lower 24 bits).
+ // The main format indicates the main codec type. The sub format field indicates options and parameters
+ // for each format. The sub format is mainly used for record to indicate for instance the requested bitrate
+ // or profile. It can also be used for certain formats to give informations not present in the encoded
+ // audio stream (e.g. octet alignement for AMR).
enum audio_format {
- FORMAT_DEFAULT = 0,
- PCM_16_BIT,
- PCM_8_BIT,
- INVALID_FORMAT
+ INVALID_FORMAT = -1,
+ FORMAT_DEFAULT = 0,
+ PCM = 0x00000000, // must be 0 for backward compatibility
+ MP3 = 0x01000000,
+ AMR_NB = 0x02000000,
+ AMR_WB = 0x03000000,
+ AAC = 0x04000000,
+ HE_AAC_V1 = 0x05000000,
+ HE_AAC_V2 = 0x06000000,
+ VORBIS = 0x07000000,
+ MAIN_FORMAT_MASK = 0xFF000000,
+ SUB_FORMAT_MASK = 0x00FFFFFF,
+ // Aliases
+ PCM_16_BIT = (PCM|PCM_SUB_16_BIT),
+ PCM_8_BIT = (PCM|PCM_SUB_8_BIT)
+ };
+
+
+ // Channel mask definitions must be kept in sync with JAVA values in /media/java/android/media/AudioFormat.java
+ enum audio_channels {
+ // output channels
+ CHANNEL_OUT_FRONT_LEFT = 0x1,
+ CHANNEL_OUT_FRONT_RIGHT = 0x2,
+ CHANNEL_OUT_FRONT_CENTER = 0x4,
+ CHANNEL_OUT_LOW_FREQUENCY = 0x8,
+ CHANNEL_OUT_BACK_LEFT = 0x10,
+ CHANNEL_OUT_BACK_RIGHT = 0x20,
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40,
+ CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80,
+ CHANNEL_OUT_BACK_CENTER = 0x100,
+ CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT,
+ CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT),
+ CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
+ CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER),
+ CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
+ CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER),
+ CHANNEL_OUT_ALL = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER | CHANNEL_OUT_BACK_CENTER),
+
+ // input channels
+ CHANNEL_IN_LEFT = 0x10000,
+ CHANNEL_IN_RIGHT = 0x20000,
+ CHANNEL_IN_FRONT = 0x40000,
+ CHANNEL_IN_BACK = 0x80000,
+ CHANNEL_IN_LEFT_PROCESSED = 0x100000,
+ CHANNEL_IN_RIGHT_PROCESSED = 0x200000,
+ CHANNEL_IN_FRONT_PROCESSED = 0x400000,
+ CHANNEL_IN_BACK_PROCESSED = 0x800000,
+ CHANNEL_IN_PRESSURE = 0x1000000,
+ CHANNEL_IN_X_AXIS = 0x2000000,
+ CHANNEL_IN_Y_AXIS = 0x4000000,
+ CHANNEL_IN_Z_AXIS = 0x8000000,
+ CHANNEL_IN_VOICE_UPLINK = 0x10000000,
+ CHANNEL_IN_VOICE_DNLINK = 0x20000000,
+ CHANNEL_IN_MONO = CHANNEL_IN_FRONT,
+ CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT),
+ CHANNEL_IN_ALL = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT | CHANNEL_IN_FRONT | CHANNEL_IN_BACK|
+ CHANNEL_IN_LEFT_PROCESSED | CHANNEL_IN_RIGHT_PROCESSED | CHANNEL_IN_FRONT_PROCESSED | CHANNEL_IN_BACK_PROCESSED|
+ CHANNEL_IN_PRESSURE | CHANNEL_IN_X_AXIS | CHANNEL_IN_Y_AXIS | CHANNEL_IN_Z_AXIS |
+ CHANNEL_IN_VOICE_UPLINK | CHANNEL_IN_VOICE_DNLINK)
};
enum audio_mode {
@@ -65,15 +159,6 @@ public:
NUM_MODES // not a valid entry, denotes end-of-list
};
- enum audio_routes {
- ROUTE_EARPIECE = (1 << 0),
- ROUTE_SPEAKER = (1 << 1),
- ROUTE_BLUETOOTH_SCO = (1 << 2),
- ROUTE_HEADSET = (1 << 3),
- ROUTE_BLUETOOTH_A2DP = (1 << 4),
- ROUTE_ALL = -1UL,
- };
-
enum audio_in_acoustics {
AGC_ENABLE = 0x0001,
AGC_DISABLE = 0,
@@ -87,36 +172,37 @@ public:
* only privileged processes can have access to them
*/
- // routing helper functions
- static status_t speakerphone(bool state);
- static status_t isSpeakerphoneOn(bool* state);
- static status_t bluetoothSco(bool state);
- static status_t isBluetoothScoOn(bool* state);
+ // mute/unmute microphone
static status_t muteMicrophone(bool state);
static status_t isMicrophoneMuted(bool *state);
+ // set/get master volume
static status_t setMasterVolume(float value);
- static status_t setMasterMute(bool mute);
static status_t getMasterVolume(float* volume);
+ // mute/unmute audio outputs
+ static status_t setMasterMute(bool mute);
static status_t getMasterMute(bool* mute);
- static status_t setStreamVolume(int stream, float value);
+ // set/get stream volume on specified output
+ static status_t setStreamVolume(int stream, float value, void *output);
+ static status_t getStreamVolume(int stream, float* volume, void *output);
+
+ // mute/unmute stream
static status_t setStreamMute(int stream, bool mute);
- static status_t getStreamVolume(int stream, float* volume);
static status_t getStreamMute(int stream, bool* mute);
+ // set audio mode in audio hardware (see AudioSystem::audio_mode)
static status_t setMode(int mode);
- static status_t getMode(int* mode);
-
- static status_t setRouting(int mode, uint32_t routes, uint32_t mask);
- static status_t getRouting(int mode, uint32_t* routes);
+ // returns true if tracks are active on AudioSystem::MUSIC stream
static status_t isMusicActive(bool *state);
- // Temporary interface, do not use
- // TODO: Replace with a more generic key:value get/set mechanism
- static status_t setParameter(const char* key, const char* value);
-
+ // set/get audio hardware parameters. The function accepts a list of parameters
+ // key value pairs in the form: key1=value1;key2=value2;...
+ // Some keys are reserved for standard parameters (See AudioParameter class).
+ static status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
+ static String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+
static void setErrorCallback(audio_error_callback cb);
// helper function to obtain AudioFlinger service handle
@@ -130,47 +216,247 @@ public:
static status_t getOutputLatency(uint32_t* latency, int stream = DEFAULT);
static bool routedToA2dpOutput(int streamType);
-
- static status_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
+
+ static status_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
size_t* buffSize);
+
+ //
+ // AudioPolicyService interface
+ //
+
+ enum audio_devices {
+ // output devices
+ DEVICE_OUT_EARPIECE = 0x1,
+ DEVICE_OUT_SPEAKER = 0x2,
+ DEVICE_OUT_WIRED_HEADSET = 0x4,
+ DEVICE_OUT_WIRED_HEADPHONE = 0x8,
+ DEVICE_OUT_BLUETOOTH_SCO = 0x10,
+ DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20,
+ DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40,
+ DEVICE_OUT_BLUETOOTH_A2DP = 0x80,
+ DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
+ DEVICE_OUT_AUX_DIGITAL = 0x400,
+ DEVICE_OUT_FM_HEADPHONE = 0x800,
+ DEVICE_OUT_FM_SPEAKER = 0x1000,
+ DEVICE_OUT_TTY = 0x2000,
+ DEVICE_OUT_DEFAULT = 0x8000,
+ DEVICE_OUT_ALL = (DEVICE_OUT_EARPIECE | DEVICE_OUT_SPEAKER | DEVICE_OUT_WIRED_HEADSET |
+ DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+ DEVICE_OUT_BLUETOOTH_SCO_CARKIT | DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL | DEVICE_OUT_FM_HEADPHONE |
+ DEVICE_OUT_FM_SPEAKER | DEVICE_OUT_TTY | DEVICE_OUT_DEFAULT),
+
+ // input devices
+ DEVICE_IN_COMMUNICATION = 0x10000,
+ DEVICE_IN_AMBIENT = 0x20000,
+ DEVICE_IN_BUILTIN_MIC = 0x40000,
+ DEVICE_IN_BLUETOOTH_SCO_HEADSET = 0x80000,
+ DEVICE_IN_WIRED_HEADSET = 0x100000,
+ DEVICE_IN_AUX_DIGITAL = 0x200000,
+ DEVICE_IN_VOICE_CALL = 0x400000,
+ DEVICE_IN_DEFAULT = 0x80000000,
+
+ DEVICE_IN_ALL = (DEVICE_IN_COMMUNICATION | DEVICE_IN_AMBIENT | DEVICE_IN_BUILTIN_MIC |
+ DEVICE_IN_BLUETOOTH_SCO_HEADSET | DEVICE_IN_WIRED_HEADSET | DEVICE_IN_AUX_DIGITAL |
+ DEVICE_IN_VOICE_CALL| DEVICE_IN_DEFAULT)
+ };
+
+ // device connection states used for setDeviceConnectionState()
+ enum device_connection_state {
+ DEVICE_STATE_UNAVAILABLE,
+ DEVICE_STATE_AVAILABLE,
+ NUM_DEVICE_STATES
+ };
+
+ // request to open a direct output with getOutput() (by opposition to sharing an output with other AudioTracks)
+ enum output_flags {
+ OUTPUT_FLAG_INDIRECT = 0x0,
+ OUTPUT_FLAG_DIRECT = 0x1
+ };
+
+ // device categories used for setForceUse()
+ enum forced_config {
+ FORCE_NONE,
+ FORCE_SPEAKER,
+ FORCE_HEADPHONES,
+ FORCE_BT_SCO,
+ FORCE_BT_A2DP,
+ FORCE_WIRED_ACCESSORY,
+ NUM_FORCE_CONFIG,
+ FORCE_DEFAULT = FORCE_NONE
+ };
+
+ // usages used for setForceUse()
+ enum force_use {
+ FOR_COMMUNICATION,
+ FOR_MEDIA,
+ FOR_RECORD,
+ NUM_FORCE_USE
+ };
+
+ // types of io configuration change events received with ioConfigChanged()
+ enum io_config_event {
+ OUTPUT_OPENED,
+ OUTPUT_CLOSED,
+ OUTPUT_CONFIG_CHANGED,
+ INPUT_OPENED,
+ INPUT_CLOSED,
+ INPUT_CONFIG_CHANGED,
+ STREAM_CONFIG_CHANGED,
+ NUM_CONFIG_EVENTS
+ };
+
+ // audio output descritor used to cache output configurations in client process to avoid frequent calls
+ // through IAudioFlinger
+ class OutputDescriptor {
+ public:
+ OutputDescriptor()
+ : samplingRate(0), format(0), channels(0), frameCount(0), latency(0) {}
+
+ uint32_t samplingRate;
+ int32_t format;
+ int32_t channels;
+ size_t frameCount;
+ uint32_t latency;
+ };
+
+ //
+ // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
+ //
+ static status_t setDeviceConnectionState(audio_devices device, device_connection_state state, const char *device_address);
+ static device_connection_state getDeviceConnectionState(audio_devices device, const char *device_address);
+ static status_t setPhoneState(int state);
+ static status_t setRingerMode(uint32_t mode, uint32_t mask);
+ static status_t setForceUse(force_use usage, forced_config config);
+ static forced_config getForceUse(force_use usage);
+ static audio_io_handle_t getOutput(stream_type stream,
+ uint32_t samplingRate = 0,
+ uint32_t format = FORMAT_DEFAULT,
+ uint32_t channels = CHANNEL_OUT_STEREO,
+ output_flags flags = OUTPUT_FLAG_INDIRECT);
+ static status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ static status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ static void releaseOutput(audio_io_handle_t output);
+ static audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate = 0,
+ uint32_t format = FORMAT_DEFAULT,
+ uint32_t channels = CHANNEL_IN_MONO,
+ audio_in_acoustics acoustics = (audio_in_acoustics)0);
+ static status_t startInput(audio_io_handle_t input);
+ static status_t stopInput(audio_io_handle_t input);
+ static void releaseInput(audio_io_handle_t input);
+ static status_t initStreamVolume(stream_type stream,
+ int indexMin,
+ int indexMax);
+ static status_t setStreamVolumeIndex(stream_type stream, int index);
+ static status_t getStreamVolumeIndex(stream_type stream, int *index);
+
+ static const sp<IAudioPolicyService>& get_audio_policy_service();
+
// ----------------------------------------------------------------------------
+ static uint32_t popCount(uint32_t u);
+ static bool isOutputDevice(audio_devices device);
+ static bool isInputDevice(audio_devices device);
+ static bool isA2dpDevice(audio_devices device);
+ static bool isBluetoothScoDevice(audio_devices device);
+ static bool isLowVisibility(stream_type stream);
+ static bool isOutputChannel(uint32_t channel);
+ static bool isInputChannel(uint32_t channel);
+ static bool isValidFormat(uint32_t format);
+ static bool isLinearPCM(uint32_t format);
+
private:
class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
{
public:
- AudioFlingerClient() {
+ AudioFlingerClient() {
}
-
+
// DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
-
+
// IAudioFlingerClient
- virtual void a2dpEnabledChanged(bool enabled);
-
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, void *param1, void *param2);
};
- static int getOutput(int streamType);
- static sp<AudioFlingerClient> gAudioFlingerClient;
+ class AudioPolicyServiceClient: public IBinder::DeathRecipient
+ {
+ public:
+ AudioPolicyServiceClient() {
+ }
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+ };
+
+ static sp<AudioFlingerClient> gAudioFlingerClient;
+ static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
friend class AudioFlingerClient;
+ friend class AudioPolicyServiceClient;
static Mutex gLock;
static sp<IAudioFlinger> gAudioFlinger;
static audio_error_callback gAudioErrorCallback;
- static int gOutSamplingRate[NUM_AUDIO_OUTPUT_TYPES];
- static int gOutFrameCount[NUM_AUDIO_OUTPUT_TYPES];
- static uint32_t gOutLatency[NUM_AUDIO_OUTPUT_TYPES];
- static bool gA2dpEnabled;
-
+
static size_t gInBuffSize;
// previous parameters for recording buffer size queries
static uint32_t gPrevInSamplingRate;
static int gPrevInFormat;
static int gPrevInChannelCount;
+ static sp<IAudioPolicyService> gAudioPolicyService;
+
+ // mapping between stream types and outputs
+ static DefaultKeyedVector<int, audio_io_handle_t> gStreamOutputMap;
+ // list of output descritor containing cached parameters (sampling rate, framecount, channel count...)
+ static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
+};
+
+class AudioParameter {
+
+public:
+ AudioParameter() {}
+ AudioParameter(const String8& keyValuePairs);
+ virtual ~AudioParameter();
+
+ // reserved parameter keys for changeing standard parameters with setParameters() function.
+ // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
+ // configuration changes and act accordingly.
+ // keyRouting: to change audio routing, value is an int in AudioSystem::audio_devices
+ // keySamplingRate: to change sampling rate routing, value is an int
+ // keyFormat: to change audio format, value is an int in AudioSystem::audio_format
+ // keyChannels: to change audio channel configuration, value is an int in AudioSystem::audio_channels
+ // keyFrameCount: to change audio output frame count, value is an int
+ static const char *keyRouting;
+ static const char *keySamplingRate;
+ static const char *keyFormat;
+ static const char *keyChannels;
+ static const char *keyFrameCount;
+
+ String8 toString();
+
+ status_t add(const String8& key, const String8& value);
+ status_t addInt(const String8& key, const int value);
+ status_t addFloat(const String8& key, const float value);
+
+ status_t remove(const String8& key);
+
+ status_t get(const String8& key, String8& value);
+ status_t getInt(const String8& key, int& value);
+ status_t getFloat(const String8& key, float& value);
+
+ size_t size() { return mParameters.size(); }
+
+private:
+ String8 mKeyValuePairs;
+ KeyedVector <String8, String8> mParameters;
};
}; // namespace android
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 2e1fbda..981c2f6 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -117,9 +117,9 @@ public:
* streamType: Select the type of audio stream this track is attached to
* (e.g. AudioSystem::MUSIC).
* sampleRate: Track sampling rate in Hz.
- * format: PCM sample format (e.g AudioSystem::PCM_16_BIT for signed
+ * format: Audio format (e.g AudioSystem::PCM_16_BIT for signed
* 16 bits per sample).
- * channelCount: Number of PCM channels (e.g 2 for stereo).
+ * channels: Channel mask: see AudioSystem::audio_channels.
* frameCount: Total size of track PCM buffer in frames. This defines the
* latency of the track.
* flags: Reserved for future use.
@@ -133,7 +133,7 @@ public:
AudioTrack( int streamType,
uint32_t sampleRate = 0,
int format = 0,
- int channelCount = 0,
+ int channels = 0,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -152,7 +152,7 @@ public:
AudioTrack( int streamType,
uint32_t sampleRate = 0,
int format = 0,
- int channelCount = 0,
+ int channels = 0,
const sp<IMemory>& sharedBuffer = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -169,13 +169,13 @@ public:
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful intialization
* - INVALID_OPERATION: AudioTrack is already intitialized
- * - BAD_VALUE: invalid parameter (channelCount, format, sampleRate...)
+ * - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
* */
status_t set(int streamType =-1,
uint32_t sampleRate = 0,
int format = 0,
- int channelCount = 0,
+ int channels = 0,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -330,6 +330,16 @@ public:
*/
status_t reload();
+ /* returns a handle on the audio output used by this AudioTrack.
+ *
+ * Parameters:
+ * none.
+ *
+ * Returned value:
+ * handle on audio hardware output
+ */
+ audio_io_handle_t getOutput();
+
/* obtains a buffer of "frameCount" frames. The buffer must be
* filled entirely. If the track is stopped, obtainBuffer() returns
* STOPPED instead of NO_ERROR as long as there are buffers availlable,
@@ -387,7 +397,6 @@ private:
sp<AudioTrackThread> mAudioTrackThread;
float mVolume[2];
- uint32_t mSampleRate;
uint32_t mFrameCount;
audio_track_cblk_t* mCblk;
@@ -395,6 +404,7 @@ private:
uint8_t mFormat;
uint8_t mChannelCount;
uint8_t mMuted;
+ uint32_t mChannels;
status_t mStatus;
uint32_t mLatency;
@@ -410,6 +420,7 @@ private:
bool mMarkerReached;
uint32_t mNewPosition;
uint32_t mUpdatePeriod;
+ uint32_t mFlags;
};
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index bac3d29..26e6972 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -27,7 +27,7 @@
#include <media/IAudioTrack.h>
#include <media/IAudioRecord.h>
#include <media/IAudioFlingerClient.h>
-
+#include <utils/String8.h>
namespace android {
@@ -50,11 +50,12 @@ public:
int frameCount,
uint32_t flags,
const sp<IMemory>& sharedBuffer,
+ void *output,
status_t *status) = 0;
virtual sp<IAudioRecord> openRecord(
pid_t pid,
- int inputSource,
+ void *input,
uint32_t sampleRate,
int format,
int channelCount,
@@ -65,11 +66,11 @@ public:
/* query the audio hardware state. This state never changes,
* and therefore can be cached.
*/
- virtual uint32_t sampleRate(int output) const = 0;
- virtual int channelCount(int output) const = 0;
- virtual int format(int output) const = 0;
- virtual size_t frameCount(int output) const = 0;
- virtual uint32_t latency(int output) const = 0;
+ virtual uint32_t sampleRate(void *output) const = 0;
+ virtual int channelCount(void *output) const = 0;
+ virtual int format(void *output) const = 0;
+ virtual size_t frameCount(void *output) const = 0;
+ virtual uint32_t latency(void *output) const = 0;
/* set/get the audio hardware state. This will probably be used by
* the preference panel, mostly.
@@ -83,19 +84,14 @@ public:
/* set/get stream type state. This will probably be used by
* the preference panel, mostly.
*/
- virtual status_t setStreamVolume(int stream, float value) = 0;
+ virtual status_t setStreamVolume(int stream, float value, void *output) = 0;
virtual status_t setStreamMute(int stream, bool muted) = 0;
- virtual float streamVolume(int stream) const = 0;
+ virtual float streamVolume(int stream, void *output) const = 0;
virtual bool streamMute(int stream) const = 0;
- // set/get audio routing
- virtual status_t setRouting(int mode, uint32_t routes, uint32_t mask) = 0;
- virtual uint32_t getRouting(int mode) const = 0;
-
- // set/get audio mode
+ // set audio mode
virtual status_t setMode(int mode) = 0;
- virtual int getMode() const = 0;
// mic mute/state
virtual status_t setMicMute(bool state) = 0;
@@ -104,22 +100,34 @@ public:
// is a music stream active?
virtual bool isMusicActive() const = 0;
- // pass a generic configuration parameter to libaudio
- // Temporary interface, do not use
- // TODO: Replace with a more generic key:value get/set mechanism
- virtual status_t setParameter(const char* key, const char* value) = 0;
+ virtual status_t setParameters(void *ioHandle, const String8& keyValuePairs) = 0;
+ virtual String8 getParameters(void *ioHandle, const String8& keys) = 0;
// register a current process for audio output change notifications
virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
// retrieve the audio recording buffer size
virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount) = 0;
-
- // force AudioFlinger thread out of standby
- virtual void wakeUp() = 0;
- // is A2DP output enabled
- virtual bool isA2dpEnabled() const = 0;
+ virtual void *openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ uint32_t flags) = 0;
+ virtual void *openDuplicateOutput(void *output1, void *output2) = 0;
+ virtual status_t closeOutput(void *output) = 0;
+ virtual status_t suspendOutput(void *output) = 0;
+ virtual status_t restoreOutput(void *output) = 0;
+
+ virtual void *openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics) = 0;
+ virtual status_t closeInput(void *input) = 0;
+
+ virtual status_t setStreamOutput(uint32_t stream, void *output) = 0;
};
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
index 383ec0c..78142ce 100644
--- a/include/media/IAudioFlingerClient.h
+++ b/include/media/IAudioFlingerClient.h
@@ -20,7 +20,7 @@
#include <utils/RefBase.h>
#include <binder/IInterface.h>
-
+#include <utils/KeyedVector.h>
namespace android {
@@ -31,8 +31,8 @@ class IAudioFlingerClient : public IInterface
public:
DECLARE_META_INTERFACE(AudioFlingerClient);
- // Notifies a change of audio output from/to hardware to/from A2DP.
- virtual void a2dpEnabledChanged(bool enabled) = 0;
+ // Notifies a change of audio input/output configuration.
+ virtual void ioConfigChanged(int event, void *param1, void *param2) = 0;
};
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
new file mode 100644
index 0000000..4804bbd
--- /dev/null
+++ b/include/media/IAudioPolicyService.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IAUDIOPOLICYSERVICE_H
+#define ANDROID_IAUDIOPOLICYSERVICE_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+#include <media/AudioSystem.h>
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class IAudioPolicyService : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(AudioPolicyService);
+
+ //
+ // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
+ //
+ virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address) = 0;
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address) = 0;
+ virtual status_t setPhoneState(int state) = 0;
+ virtual status_t setRingerMode(uint32_t mode, uint32_t mask) = 0;
+ virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config) = 0;
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage) = 0;
+ virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate = 0,
+ uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT) = 0;
+ virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) = 0;
+ virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) = 0;
+ virtual void releaseOutput(audio_io_handle_t output) = 0;
+ virtual audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate = 0,
+ uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0) = 0;
+ virtual status_t startInput(audio_io_handle_t input) = 0;
+ virtual status_t stopInput(audio_io_handle_t input) = 0;
+ virtual void releaseInput(audio_io_handle_t input) = 0;
+ virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax) = 0;
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index) = 0;
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index) = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+
+class BnAudioPolicyService : public BnInterface<IAudioPolicyService>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_IAUDIOPOLICYSERVICE_H
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 496a739..8e2db20 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -55,17 +55,18 @@ struct audio_track_cblk_t
uint32_t volumeLR;
};
uint32_t sampleRate;
+ // NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for
+ // 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of
+ // 16 bit because data is converted to 16 bit before being stored in buffer
+ uint32_t frameSize;
uint8_t channels;
uint8_t flowControlFlag; // underrun (out) or overrrun (in) indication
uint8_t out; // out equals 1 for AudioTrack and 0 for AudioRecord
- uint8_t forceReady;
+ uint8_t forceReady;
uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
uint16_t waitTimeMs; // Cumulated wait time
- // Padding ensuring that data buffer starts on a cache line boundary (32 bytes).
- // See AudioFlinger::TrackBase constructor
- int32_t Padding[1];
- // Cache line boundary
-
+ // Cache line boundary (32 bytes)
+
audio_track_cblk_t();
uint32_t stepUser(uint32_t frameCount);
bool stepServer(uint32_t frameCount);
diff --git a/include/tts/TtsEngine.h b/include/tts/TtsEngine.h
index ed084ca..28b0d2f 100644
--- a/include/tts/TtsEngine.h
+++ b/include/tts/TtsEngine.h
@@ -43,7 +43,7 @@ enum tts_callback_status {
// @param [inout] void *& - The userdata pointer set in the original
// synth call
// @param [in] uint32_t - Track sampling rate in Hz
-// @param [in] audio_format - The AudioSystem::audio_format enum
+// @param [in] uint32_t - The audio format
// @param [in] int - The number of channels
// @param [inout] int8_t *& - A buffer of audio data only valid during the
// execution of the callback
@@ -54,7 +54,7 @@ enum tts_callback_status {
// TTS_CALLBACK_CONTINUE to indicate the synthesis must continue if
// there is more data to produce.
typedef tts_callback_status (synthDoneCB_t)(void *&, uint32_t,
- AudioSystem::audio_format, int, int8_t *&, size_t&, tts_synth_status);
+ uint32_t, int, int8_t *&, size_t&, tts_synth_status);
class TtsEngine;
extern "C" TtsEngine* getTtsEngine();
diff --git a/libs/audioflinger/A2dpAudioInterface.cpp b/libs/audioflinger/A2dpAudioInterface.cpp
index 16a4f2d..6f9e934 100644
--- a/libs/audioflinger/A2dpAudioInterface.cpp
+++ b/libs/audioflinger/A2dpAudioInterface.cpp
@@ -29,25 +29,41 @@ namespace android {
// ----------------------------------------------------------------------------
-A2dpAudioInterface::A2dpAudioInterface() :
- mOutput(0)
+//AudioHardwareInterface* A2dpAudioInterface::createA2dpInterface()
+//{
+// AudioHardwareInterface* hw = 0;
+//
+// hw = AudioHardwareInterface::create();
+// LOGD("new A2dpAudioInterface(hw: %p)", hw);
+// hw = new A2dpAudioInterface(hw);
+// return hw;
+//}
+
+A2dpAudioInterface::A2dpAudioInterface(AudioHardwareInterface* hw) :
+ mOutput(0), mHardwareInterface(hw), mBluetoothEnabled(true)
{
}
A2dpAudioInterface::~A2dpAudioInterface()
{
- delete mOutput;
+ closeOutputStream((AudioStreamOut *)mOutput);
+ delete mHardwareInterface;
}
status_t A2dpAudioInterface::initCheck()
{
- return 0;
+ if (mHardwareInterface == 0) return NO_INIT;
+ return mHardwareInterface->initCheck();
}
AudioStreamOut* A2dpAudioInterface::openOutputStream(
- int format, int channelCount, uint32_t sampleRate, status_t *status)
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
{
- LOGD("A2dpAudioInterface::openOutputStream %d, %d, %d\n", format, channelCount, sampleRate);
+ if (!AudioSystem::isA2dpDevice((AudioSystem::audio_devices)devices)) {
+ LOGV("A2dpAudioInterface::openOutputStream() open HW device: %x", devices);
+ return mHardwareInterface->openOutputStream(devices, format, channels, sampleRate, status);
+ }
+
status_t err = 0;
// only one output stream allowed
@@ -59,71 +75,127 @@ AudioStreamOut* A2dpAudioInterface::openOutputStream(
// create new output stream
A2dpAudioStreamOut* out = new A2dpAudioStreamOut();
- if ((err = out->set(format, channelCount, sampleRate)) == NO_ERROR) {
+ if ((err = out->set(devices, format, channels, sampleRate)) == NO_ERROR) {
mOutput = out;
+ mOutput->setBluetoothEnabled(mBluetoothEnabled);
} else {
delete out;
}
-
+
if (status)
*status = err;
return mOutput;
}
+void A2dpAudioInterface::closeOutputStream(AudioStreamOut* out) {
+ if (mOutput == 0 || mOutput != out) {
+ LOGW("Attempt to close invalid output stream");
+ }
+ else {
+ delete mOutput;
+ mOutput = 0;
+ }
+}
+
+
AudioStreamIn* A2dpAudioInterface::openInputStream(
- int inputSource, int format, int channelCount, uint32_t sampleRate,
- status_t *status, AudioSystem::audio_in_acoustics acoustics)
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status,
+ AudioSystem::audio_in_acoustics acoustics)
{
- if (status)
- *status = -1;
- return NULL;
+ return mHardwareInterface->openInputStream(devices, format, channels, sampleRate, status, acoustics);
+}
+
+void A2dpAudioInterface::closeInputStream(AudioStreamIn* in)
+{
+ return mHardwareInterface->closeInputStream(in);
+}
+
+status_t A2dpAudioInterface::setMode(int mode)
+{
+ return mHardwareInterface->setMode(mode);
}
status_t A2dpAudioInterface::setMicMute(bool state)
{
- return 0;
+ return mHardwareInterface->setMicMute(state);
}
status_t A2dpAudioInterface::getMicMute(bool* state)
{
- return 0;
+ return mHardwareInterface->getMicMute(state);
}
-status_t A2dpAudioInterface::setParameter(const char *key, const char *value)
+status_t A2dpAudioInterface::setParameters(const String8& keyValuePairs)
{
- LOGD("setParameter %s,%s\n", key, value);
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 value;
+ String8 key;
+ status_t status = NO_ERROR;
+
+ LOGV("setParameters() %s", keyValuePairs.string());
+
+ key = "bluetooth_enabled";
+ if (param.get(key, value) == NO_ERROR) {
+ mBluetoothEnabled = (value == "true");
+ if (mOutput) {
+ mOutput->setBluetoothEnabled(mBluetoothEnabled);
+ }
+ param.remove(key);
+ }
- if (!key || !value)
- return -EINVAL;
+ if (param.size()) {
+ status_t hwStatus = mHardwareInterface->setParameters(param.toString());
+ if (status == NO_ERROR) {
+ status = hwStatus;
+ }
+ }
- if (strcmp(key, "a2dp_sink_address") == 0) {
- return mOutput->setAddress(value);
+ return status;
+}
+
+String8 A2dpAudioInterface::getParameters(const String8& keys)
+{
+ AudioParameter param = AudioParameter(keys);
+ AudioParameter a2dpParam = AudioParameter();
+ String8 value;
+ String8 key;
+
+ key = "bluetooth_enabled";
+ if (param.get(key, value) == NO_ERROR) {
+ value = mBluetoothEnabled ? "true" : "false";
+ a2dpParam.add(key, value);
+ param.remove(key);
}
- if (strcmp(key, "bluetooth_enabled") == 0) {
- mOutput->setBluetoothEnabled(strcmp(value, "true") == 0);
+
+ String8 keyValuePairs = a2dpParam.toString();
+
+ if (param.size()) {
+ keyValuePairs += ";";
+ keyValuePairs += mHardwareInterface->getParameters(param.toString());
}
- return 0;
+ LOGV("getParameters() %s", keyValuePairs.string());
+ return keyValuePairs;
}
-status_t A2dpAudioInterface::setVoiceVolume(float v)
+size_t A2dpAudioInterface::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
{
- return 0;
+ return mHardwareInterface->getInputBufferSize(sampleRate, format, channelCount);
}
-status_t A2dpAudioInterface::setMasterVolume(float v)
+status_t A2dpAudioInterface::setVoiceVolume(float v)
{
- return 0;
+ return mHardwareInterface->setVoiceVolume(v);
}
-status_t A2dpAudioInterface::doRouting()
+status_t A2dpAudioInterface::setMasterVolume(float v)
{
- return 0;
+ return mHardwareInterface->setMasterVolume(v);
}
status_t A2dpAudioInterface::dump(int fd, const Vector<String16>& args)
{
- return 0;
+ return mHardwareInterface->dumpState(fd, args);
}
// ----------------------------------------------------------------------------
@@ -132,7 +204,7 @@ A2dpAudioInterface::A2dpAudioStreamOut::A2dpAudioStreamOut() :
mFd(-1), mStandby(true), mStartCount(0), mRetryCount(0), mData(NULL),
// assume BT enabled to start, this is safe because its only the
// enabled->disabled transition we are worried about
- mBluetoothEnabled(true)
+ mBluetoothEnabled(true), mDevice(0)
{
// use any address by default
strcpy(mA2dpAddress, "00:00:00:00:00:00");
@@ -140,27 +212,43 @@ A2dpAudioInterface::A2dpAudioStreamOut::A2dpAudioStreamOut() :
}
status_t A2dpAudioInterface::A2dpAudioStreamOut::set(
- int format, int channels, uint32_t rate)
+ uint32_t device, int *pFormat, uint32_t *pChannels, uint32_t *pRate)
{
- LOGD("A2dpAudioStreamOut::set %d, %d, %d\n", format, channels, rate);
+ int lFormat = pFormat ? *pFormat : 0;
+ uint32_t lChannels = pChannels ? *pChannels : 0;
+ uint32_t lRate = pRate ? *pRate : 0;
+
+ LOGD("A2dpAudioStreamOut::set %x, %d, %d, %d\n", device, lFormat, lChannels, lRate);
// fix up defaults
- if (format == 0) format = AudioSystem::PCM_16_BIT;
- if (channels == 0) channels = channelCount();
- if (rate == 0) rate = sampleRate();
+ if (lFormat == 0) lFormat = format();
+ if (lChannels == 0) lChannels = channels();
+ if (lRate == 0) lRate = sampleRate();
// check values
- if ((format != AudioSystem::PCM_16_BIT) ||
- (channels != channelCount()) ||
- (rate != sampleRate()))
+ if ((lFormat != format()) ||
+ (lChannels != channels()) ||
+ (lRate != sampleRate())){
+ if (pFormat) *pFormat = format();
+ if (pChannels) *pChannels = channels();
+ if (pRate) *pRate = sampleRate();
return BAD_VALUE;
+ }
+ if (pFormat) *pFormat = lFormat;
+ if (pChannels) *pChannels = lChannels;
+ if (pRate) *pRate = lRate;
+
+ mDevice = device;
return NO_ERROR;
}
A2dpAudioInterface::A2dpAudioStreamOut::~A2dpAudioStreamOut()
{
+ LOGV("A2dpAudioStreamOut destructor");
+ standby();
close();
+ LOGV("A2dpAudioStreamOut destructor returning from close()");
}
ssize_t A2dpAudioInterface::A2dpAudioStreamOut::write(const void* buffer, size_t bytes)
@@ -230,6 +318,59 @@ status_t A2dpAudioInterface::A2dpAudioStreamOut::standby()
return result;
}
+status_t A2dpAudioInterface::A2dpAudioStreamOut::setParameters(const String8& keyValuePairs)
+{
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 value;
+ String8 key = String8("a2dp_sink_address");
+ status_t status = NO_ERROR;
+ int device;
+ LOGV("A2dpAudioStreamOut::setParameters() %s", keyValuePairs.string());
+
+ if (param.get(key, value) == NO_ERROR) {
+ if (value.length() != strlen("00:00:00:00:00:00")) {
+ status = BAD_VALUE;
+ } else {
+ setAddress(value.string());
+ }
+ param.remove(key);
+ }
+ key = AudioParameter::keyRouting;
+ if (param.getInt(key, device) == NO_ERROR) {
+ if (AudioSystem::isA2dpDevice((AudioSystem::audio_devices)device)) {
+ mDevice = device;
+ status = NO_ERROR;
+ } else {
+ status = BAD_VALUE;
+ }
+ param.remove(key);
+ }
+
+ if (param.size()) {
+ status = BAD_VALUE;
+ }
+ return status;
+}
+
+String8 A2dpAudioInterface::A2dpAudioStreamOut::getParameters(const String8& keys)
+{
+ AudioParameter param = AudioParameter(keys);
+ String8 value;
+ String8 key = String8("a2dp_sink_address");
+
+ if (param.get(key, value) == NO_ERROR) {
+ value = mA2dpAddress;
+ param.add(key, value);
+ }
+ key = AudioParameter::keyRouting;
+ if (param.get(key, value) == NO_ERROR) {
+ param.addInt(key, (int)mDevice);
+ }
+
+ LOGV("A2dpAudioStreamOut::getParameters() %s", param.toString().string());
+ return param.toString();
+}
+
status_t A2dpAudioInterface::A2dpAudioStreamOut::setAddress(const char* address)
{
Mutex::Autolock lock(mLock);
@@ -260,12 +401,14 @@ status_t A2dpAudioInterface::A2dpAudioStreamOut::setBluetoothEnabled(bool enable
status_t A2dpAudioInterface::A2dpAudioStreamOut::close()
{
Mutex::Autolock lock(mLock);
+ LOGV("A2dpAudioStreamOut::close() calling close_l()");
return close_l();
}
status_t A2dpAudioInterface::A2dpAudioStreamOut::close_l()
{
if (mData) {
+ LOGV("A2dpAudioStreamOut::close_l() calling a2dp_cleanup(mData)");
a2dp_cleanup(mData);
mData = NULL;
}
@@ -277,5 +420,4 @@ status_t A2dpAudioInterface::A2dpAudioStreamOut::dump(int fd, const Vector<Strin
return NO_ERROR;
}
-
}; // namespace android
diff --git a/libs/audioflinger/A2dpAudioInterface.h b/libs/audioflinger/A2dpAudioInterface.h
index 091e775..d6709e2 100644
--- a/libs/audioflinger/A2dpAudioInterface.h
+++ b/libs/audioflinger/A2dpAudioInterface.h
@@ -32,38 +32,44 @@ class A2dpAudioInterface : public AudioHardwareBase
class A2dpAudioStreamOut;
public:
- A2dpAudioInterface();
+ A2dpAudioInterface(AudioHardwareInterface* hw);
virtual ~A2dpAudioInterface();
virtual status_t initCheck();
virtual status_t setVoiceVolume(float volume);
virtual status_t setMasterVolume(float volume);
+ virtual status_t setMode(int mode);
+
// mic mute
virtual status_t setMicMute(bool state);
virtual status_t getMicMute(bool* state);
- // Temporary interface, do not use
- // TODO: Replace with a more generic key:value get/set mechanism
- virtual status_t setParameter(const char *key, const char *value);
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
+
+ virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount);
// create I/O streams
virtual AudioStreamOut* openOutputStream(
- int format=0,
- int channelCount=0,
- uint32_t sampleRate=0,
+ uint32_t devices,
+ int *format=0,
+ uint32_t *channels=0,
+ uint32_t *sampleRate=0,
status_t *status=0);
+ virtual void closeOutputStream(AudioStreamOut* out);
virtual AudioStreamIn* openInputStream(
- int inputSource,
- int format,
- int channelCount,
- uint32_t sampleRate,
+ uint32_t devices,
+ int *format,
+ uint32_t *channels,
+ uint32_t *sampleRate,
status_t *status,
AudioSystem::audio_in_acoustics acoustics);
+ virtual void closeInputStream(AudioStreamIn* in);
+// static AudioHardwareInterface* createA2dpInterface();
protected:
- virtual status_t doRouting();
virtual status_t dump(int fd, const Vector<String16>& args);
private:
@@ -71,19 +77,22 @@ private:
public:
A2dpAudioStreamOut();
virtual ~A2dpAudioStreamOut();
- status_t set(int format,
- int channelCount,
- uint32_t sampleRate);
+ status_t set(uint32_t device,
+ int *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pRate);
virtual uint32_t sampleRate() const { return 44100; }
// SBC codec wants a multiple of 512
virtual size_t bufferSize() const { return 512 * 20; }
- virtual int channelCount() const { return 2; }
+ virtual uint32_t channels() const { return AudioSystem::CHANNEL_OUT_STEREO; }
virtual int format() const { return AudioSystem::PCM_16_BIT; }
virtual uint32_t latency() const { return ((1000*bufferSize())/frameSize())/sampleRate() + 200; }
- virtual status_t setVolume(float volume) { return INVALID_OPERATION; }
+ virtual status_t setVolume(float left, float right) { return INVALID_OPERATION; }
virtual ssize_t write(const void* buffer, size_t bytes);
status_t standby();
virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
private:
friend class A2dpAudioInterface;
@@ -102,11 +111,18 @@ private:
void* mData;
Mutex mLock;
bool mBluetoothEnabled;
+ uint32_t mDevice;
};
+ friend class A2dpAudioStreamOut;
+
A2dpAudioStreamOut* mOutput;
+ AudioHardwareInterface *mHardwareInterface;
+ char mA2dpAddress[20];
+ bool mBluetoothEnabled;
};
+
// ----------------------------------------------------------------------------
}; // namespace android
diff --git a/libs/audioflinger/Android.mk b/libs/audioflinger/Android.mk
index bb224be..7ed6a5f 100644
--- a/libs/audioflinger/Android.mk
+++ b/libs/audioflinger/Android.mk
@@ -1,13 +1,26 @@
LOCAL_PATH:= $(call my-dir)
+#AUDIO_POLICY_TEST := true
+#ENABLE_AUDIO_DUMP := true
+
include $(CLEAR_VARS)
+
+ifeq ($(AUDIO_POLICY_TEST),true)
+ ENABLE_AUDIO_DUMP := true
+endif
+
+
LOCAL_SRC_FILES:= \
AudioHardwareGeneric.cpp \
AudioHardwareStub.cpp \
- AudioDumpInterface.cpp \
AudioHardwareInterface.cpp
+ifeq ($(ENABLE_AUDIO_DUMP),true)
+ LOCAL_SRC_FILES += AudioDumpInterface.cpp
+ LOCAL_CFLAGS += -DENABLE_AUDIO_DUMP
+endif
+
LOCAL_SHARED_LIBRARIES := \
libcutils \
libutils \
@@ -21,8 +34,40 @@ endif
LOCAL_MODULE:= libaudiointerface
+ifeq ($(BOARD_HAVE_BLUETOOTH),true)
+ LOCAL_SRC_FILES += A2dpAudioInterface.cpp
+ LOCAL_SHARED_LIBRARIES += liba2dp
+ LOCAL_CFLAGS += -DWITH_BLUETOOTH -DWITH_A2DP
+ LOCAL_C_INCLUDES += $(call include-path-for, bluez)
+endif
+
include $(BUILD_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ AudioPolicyManagerGeneric.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libutils \
+ libmedia
+
+LOCAL_MODULE:= libaudiopolicygeneric
+
+ifeq ($(BOARD_HAVE_BLUETOOTH),true)
+ LOCAL_CFLAGS += -DWITH_A2DP
+endif
+
+ifeq ($(AUDIO_POLICY_TEST),true)
+ LOCAL_CFLAGS += -DAUDIO_POLICY_TEST
+endif
+
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_SHARED_LIBRARY)
+
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
@@ -30,7 +75,8 @@ LOCAL_SRC_FILES:= \
AudioMixer.cpp.arm \
AudioResampler.cpp.arm \
AudioResamplerSinc.cpp.arm \
- AudioResamplerCubic.cpp.arm
+ AudioResamplerCubic.cpp.arm \
+ AudioPolicyService.cpp
LOCAL_SHARED_LIBRARIES := \
libcutils \
@@ -41,17 +87,25 @@ LOCAL_SHARED_LIBRARIES := \
ifeq ($(strip $(BOARD_USES_GENERIC_AUDIO)),true)
LOCAL_STATIC_LIBRARIES += libaudiointerface
+ LOCAL_CFLAGS += -DGENERIC_AUDIO
else
LOCAL_SHARED_LIBRARIES += libaudio
endif
+ifeq ($(TARGET_SIMULATOR),true)
+ LOCAL_LDLIBS += -ldl
+else
+ LOCAL_SHARED_LIBRARIES += libdl
+endif
+
LOCAL_MODULE:= libaudioflinger
ifeq ($(BOARD_HAVE_BLUETOOTH),true)
- LOCAL_SRC_FILES += A2dpAudioInterface.cpp
- LOCAL_SHARED_LIBRARIES += liba2dp
LOCAL_CFLAGS += -DWITH_BLUETOOTH -DWITH_A2DP
- LOCAL_C_INCLUDES += $(call include-path-for, bluez)
+endif
+
+ifeq ($(AUDIO_POLICY_TEST),true)
+ LOCAL_CFLAGS += -DAUDIO_POLICY_TEST
endif
ifeq ($(TARGET_SIMULATOR),true)
diff --git a/libs/audioflinger/AudioDumpInterface.cpp b/libs/audioflinger/AudioDumpInterface.cpp
index b4940cb..87bb014 100644
--- a/libs/audioflinger/AudioDumpInterface.cpp
+++ b/libs/audioflinger/AudioDumpInterface.cpp
@@ -16,6 +16,7 @@
*/
#define LOG_TAG "AudioFlingerDump"
+//#define LOG_NDEBUG 0
#include <stdint.h>
#include <sys/types.h>
@@ -28,68 +29,209 @@
namespace android {
-bool gFirst = true; // true if first write after a standby
-
// ----------------------------------------------------------------------------
AudioDumpInterface::AudioDumpInterface(AudioHardwareInterface* hw)
+ : mFirstHwOutput(true), mPolicyCommands(String8("")), mFileName(String8(""))
{
if(hw == 0) {
LOGE("Dump construct hw = 0");
}
mFinalInterface = hw;
- mStreamOut = 0;
+ LOGV("Constructor %p, mFinalInterface %p", this, mFinalInterface);
}
AudioDumpInterface::~AudioDumpInterface()
{
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ closeOutputStream((AudioStreamOut *)mOutputs[i]);
+ }
if(mFinalInterface) delete mFinalInterface;
- if(mStreamOut) delete mStreamOut;
}
AudioStreamOut* AudioDumpInterface::openOutputStream(
- int format, int channelCount, uint32_t sampleRate, status_t *status)
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
+{
+ AudioStreamOut* outFinal = NULL;
+ int lFormat = AudioSystem::PCM_16_BIT;
+ uint32_t lChannels = AudioSystem::CHANNEL_OUT_STEREO;
+ uint32_t lRate = 44100;
+
+
+ if (AudioSystem::isA2dpDevice((AudioSystem::audio_devices)devices) || mFirstHwOutput) {
+ outFinal = mFinalInterface->openOutputStream(devices, format, channels, sampleRate, status);
+ if (outFinal != 0) {
+ lFormat = outFinal->format();
+ lChannels = outFinal->channels();
+ lRate = outFinal->sampleRate();
+ if (!AudioSystem::isA2dpDevice((AudioSystem::audio_devices)devices)) {
+ mFirstHwOutput = false;
+ }
+ }
+ } else {
+ if (format != 0 && *format != 0) lFormat = *format;
+ if (channels != 0 && *channels != 0) lChannels = *channels;
+ if (sampleRate != 0 && *sampleRate != 0) lRate = *sampleRate;
+ if (status) *status = NO_ERROR;
+ }
+ LOGV("openOutputStream(), outFinal %p", outFinal);
+
+ AudioStreamOutDump *dumOutput = new AudioStreamOutDump(this, mOutputs.size(), outFinal,
+ devices, lFormat, lChannels, lRate);
+ mOutputs.add(dumOutput);
+
+ return dumOutput;
+}
+
+void AudioDumpInterface::closeOutputStream(AudioStreamOut* out)
+{
+ AudioStreamOutDump *dumpOut = (AudioStreamOutDump *)out;
+
+ if (mOutputs.indexOf(dumpOut) < 0) {
+ LOGW("Attempt to close invalid output stream");
+ return;
+ }
+ dumpOut->standby();
+ if (dumpOut->finalStream() != NULL) {
+ mFinalInterface->closeOutputStream(dumpOut->finalStream());
+ }
+
+ mOutputs.remove(dumpOut);
+ delete dumpOut;
+}
+
+AudioStreamIn* AudioDumpInterface::openInputStream(uint32_t devices, int *format, uint32_t *channels,
+ uint32_t *sampleRate, status_t *status, AudioSystem::audio_in_acoustics acoustics)
{
- AudioStreamOut* outFinal = mFinalInterface->openOutputStream(format, channelCount, sampleRate, status);
+ AudioStreamIn* inFinal = NULL;
+ int lFormat = AudioSystem::PCM_16_BIT;
+ uint32_t lChannels = AudioSystem::CHANNEL_IN_MONO;
+ uint32_t lRate = 8000;
+
- if(outFinal) {
- mStreamOut = new AudioStreamOutDump(outFinal);
- return mStreamOut;
+ if (mInputs.size() == 0) {
+ inFinal = mFinalInterface->openInputStream(devices, format, channels, sampleRate, status, acoustics);
+ if (inFinal == 0) return 0;
+
+ lFormat = inFinal->format();
+ lChannels = inFinal->channels();
+ lRate = inFinal->sampleRate();
} else {
- LOGE("Dump outFinal=0");
- return 0;
+ if (format != 0 && *format != 0) lFormat = *format;
+ if (channels != 0 && *channels != 0) lChannels = *channels;
+ if (sampleRate != 0 && *sampleRate != 0) lRate = *sampleRate;
+ if (status) *status = NO_ERROR;
+ }
+ LOGV("openInputStream(), inFinal %p", inFinal);
+
+ AudioStreamInDump *dumInput = new AudioStreamInDump(this, mInputs.size(), inFinal,
+ devices, lFormat, lChannels, lRate);
+ mInputs.add(dumInput);
+
+ return dumInput;
+}
+void AudioDumpInterface::closeInputStream(AudioStreamIn* in)
+{
+ AudioStreamInDump *dumpIn = (AudioStreamInDump *)in;
+
+ if (mInputs.indexOf(dumpIn) < 0) {
+ LOGW("Attempt to close invalid input stream");
+ return;
+ }
+ dumpIn->standby();
+ if (dumpIn->finalStream() != NULL) {
+ mFinalInterface->closeInputStream(dumpIn->finalStream());
}
+
+ mInputs.remove(dumpIn);
+ delete dumpIn;
}
+
+status_t AudioDumpInterface::setParameters(const String8& keyValuePairs)
+{
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 value;
+ int valueInt;
+ LOGV("setParameters %s", keyValuePairs.string());
+
+ if (param.get(String8("test_cmd_file_name"), value) == NO_ERROR) {
+ mFileName = value;
+ return NO_ERROR;
+ }
+ if (param.get(String8("test_cmd_policy"), value) == NO_ERROR) {
+ Mutex::Autolock _l(mLock);
+ param.remove(String8("test_cmd_policy"));
+ mPolicyCommands = param.toString();
+ LOGV("test_cmd_policy command %s written", mPolicyCommands.string());
+ return NO_ERROR;
+ }
+
+ if (mFinalInterface != 0 ) return mFinalInterface->setParameters(keyValuePairs);
+ return NO_ERROR;
+}
+
+String8 AudioDumpInterface::getParameters(const String8& keys)
+{
+ AudioParameter param = AudioParameter(keys);
+ String8 value;
+
+// LOGV("getParameters %s", keys.string());
+
+ if (param.get(String8("test_cmd_file_name"), value) == NO_ERROR) {
+ return mFileName;
+ }
+ if (param.get(String8("test_cmd_policy"), value) == NO_ERROR) {
+ Mutex::Autolock _l(mLock);
+// LOGV("test_cmd_policy command %s read", mPolicyCommands.string());
+ return mPolicyCommands;
+ }
+
+ if (mFinalInterface != 0 ) return mFinalInterface->getParameters(keys);
+ return String8("");
+}
+
+
// ----------------------------------------------------------------------------
-AudioStreamOutDump::AudioStreamOutDump( AudioStreamOut* finalStream)
+AudioStreamOutDump::AudioStreamOutDump(AudioDumpInterface *interface,
+ int id,
+ AudioStreamOut* finalStream,
+ uint32_t devices,
+ int format,
+ uint32_t channels,
+ uint32_t sampleRate)
+ : mInterface(interface), mId(id),
+ mSampleRate(sampleRate), mFormat(format), mChannels(channels), mLatency(0), mDevice(devices),
+ mBufferSize(1024), mFinalStream(finalStream), mOutFile(0), mFileCount(0)
{
- mFinalStream = finalStream;
- mOutFile = 0;
+ LOGV("AudioStreamOutDump Constructor %p, mInterface %p, mFinalStream %p", this, mInterface, mFinalStream);
}
AudioStreamOutDump::~AudioStreamOutDump()
{
Close();
- delete mFinalStream;
}
ssize_t AudioStreamOutDump::write(const void* buffer, size_t bytes)
{
ssize_t ret;
- ret = mFinalStream->write(buffer, bytes);
- if(!mOutFile && gFirst) {
- gFirst = false;
- // check if dump file exist
- mOutFile = fopen(FLINGER_DUMP_NAME, "r");
- if(mOutFile) {
- fclose(mOutFile);
- mOutFile = fopen(FLINGER_DUMP_NAME, "ab");
+ if (mFinalStream) {
+ ret = mFinalStream->write(buffer, bytes);
+ } else {
+ usleep((bytes * 1000000) / frameSize() / sampleRate());
+ ret = bytes;
+ }
+ if(!mOutFile) {
+ if (mInterface->fileName() != "") {
+ char name[255];
+ sprintf(name, "%s_%d_%d.pcm", mInterface->fileName().string(), mId, ++mFileCount);
+ mOutFile = fopen(name, "wb");
+ LOGV("Opening dump file %s, fh %p", name, mOutFile);
}
}
if (mOutFile) {
@@ -100,13 +242,65 @@ ssize_t AudioStreamOutDump::write(const void* buffer, size_t bytes)
status_t AudioStreamOutDump::standby()
{
+ LOGV("AudioStreamOutDump standby(), mOutFile %p, mFinalStream %p", mOutFile, mFinalStream);
+
Close();
- gFirst = true;
- return mFinalStream->standby();
+ if (mFinalStream != 0 ) return mFinalStream->standby();
+ return NO_ERROR;
+}
+
+uint32_t AudioStreamOutDump::sampleRate() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->sampleRate();
+ return mSampleRate;
+}
+
+size_t AudioStreamOutDump::bufferSize() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->bufferSize();
+ return mBufferSize;
+}
+
+uint32_t AudioStreamOutDump::channels() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->channels();
+ return mChannels;
+}
+int AudioStreamOutDump::format() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->format();
+ return mFormat;
+}
+uint32_t AudioStreamOutDump::latency() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->latency();
+ return 0;
+}
+status_t AudioStreamOutDump::setVolume(float left, float right)
+{
+ if (mFinalStream != 0 ) return mFinalStream->setVolume(left, right);
+ return NO_ERROR;
+}
+status_t AudioStreamOutDump::setParameters(const String8& keyValuePairs)
+{
+ LOGV("AudioStreamOutDump::setParameters()");
+ if (mFinalStream != 0 ) return mFinalStream->setParameters(keyValuePairs);
+ return NO_ERROR;
+}
+String8 AudioStreamOutDump::getParameters(const String8& keys)
+{
+ String8 result = String8("");
+ if (mFinalStream != 0 ) result = mFinalStream->getParameters(keys);
+ return result;
}
+status_t AudioStreamOutDump::dump(int fd, const Vector<String16>& args)
+{
+ if (mFinalStream != 0 ) return mFinalStream->dump(fd, args);
+ return NO_ERROR;
+}
-void AudioStreamOutDump::Close(void)
+void AudioStreamOutDump::Close()
{
if(mOutFile) {
fclose(mOutFile);
@@ -114,4 +308,140 @@ void AudioStreamOutDump::Close(void)
}
}
+// ----------------------------------------------------------------------------
+
+AudioStreamInDump::AudioStreamInDump(AudioDumpInterface *interface,
+ int id,
+ AudioStreamIn* finalStream,
+ uint32_t devices,
+ int format,
+ uint32_t channels,
+ uint32_t sampleRate)
+ : mInterface(interface), mId(id),
+ mSampleRate(sampleRate), mFormat(format), mChannels(channels), mDevice(devices),
+ mBufferSize(1024), mFinalStream(finalStream), mInFile(0)
+{
+ LOGV("AudioStreamInDump Constructor %p, mInterface %p, mFinalStream %p", this, mInterface, mFinalStream);
+}
+
+
+AudioStreamInDump::~AudioStreamInDump()
+{
+ Close();
+}
+
+ssize_t AudioStreamInDump::read(void* buffer, ssize_t bytes)
+{
+ if (mFinalStream) {
+ return mFinalStream->read(buffer, bytes);
+ }
+
+ usleep((bytes * 1000000) / frameSize() / sampleRate());
+
+ if(!mInFile) {
+ char name[255];
+ strcpy(name, "/sdcard/music/sine440");
+ if (channels() == AudioSystem::CHANNEL_IN_MONO) {
+ strcat(name, "_mo");
+ } else {
+ strcat(name, "_st");
+ }
+ if (format() == AudioSystem::PCM_16_BIT) {
+ strcat(name, "_16b");
+ } else {
+ strcat(name, "_8b");
+ }
+ if (sampleRate() < 16000) {
+ strcat(name, "_8k");
+ } else if (sampleRate() < 32000) {
+ strcat(name, "_22k");
+ } else if (sampleRate() < 48000) {
+ strcat(name, "_44k");
+ } else {
+ strcat(name, "_48k");
+ }
+ strcat(name, ".wav");
+ mInFile = fopen(name, "rb");
+ LOGV("Opening dump file %s, fh %p", name, mInFile);
+ if (mInFile) {
+ fseek(mInFile, AUDIO_DUMP_WAVE_HDR_SIZE, SEEK_SET);
+ }
+
+ }
+ if (mInFile) {
+ ssize_t bytesRead = fread(buffer, bytes, 1, mInFile);
+ if (bytesRead != bytes) {
+ fseek(mInFile, AUDIO_DUMP_WAVE_HDR_SIZE, SEEK_SET);
+ fread((uint8_t *)buffer+bytesRead, bytes-bytesRead, 1, mInFile);
+ }
+ }
+ return bytes;
+}
+
+status_t AudioStreamInDump::standby()
+{
+ LOGV("AudioStreamInDump standby(), mInFile %p, mFinalStream %p", mInFile, mFinalStream);
+
+ Close();
+ if (mFinalStream != 0 ) return mFinalStream->standby();
+ return NO_ERROR;
+}
+
+status_t AudioStreamInDump::setGain(float gain)
+{
+ if (mFinalStream != 0 ) return mFinalStream->setGain(gain);
+ return NO_ERROR;
+}
+
+uint32_t AudioStreamInDump::sampleRate() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->sampleRate();
+ return mSampleRate;
+}
+
+size_t AudioStreamInDump::bufferSize() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->bufferSize();
+ return mBufferSize;
+}
+
+uint32_t AudioStreamInDump::channels() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->channels();
+ return mChannels;
+}
+
+int AudioStreamInDump::format() const
+{
+ if (mFinalStream != 0 ) return mFinalStream->format();
+ return mFormat;
+}
+
+status_t AudioStreamInDump::setParameters(const String8& keyValuePairs)
+{
+ LOGV("AudioStreamInDump::setParameters()");
+ if (mFinalStream != 0 ) return mFinalStream->setParameters(keyValuePairs);
+ return NO_ERROR;
+}
+
+String8 AudioStreamInDump::getParameters(const String8& keys)
+{
+ String8 result = String8("");
+ if (mFinalStream != 0 ) result = mFinalStream->getParameters(keys);
+ return result;
+}
+
+status_t AudioStreamInDump::dump(int fd, const Vector<String16>& args)
+{
+ if (mFinalStream != 0 ) return mFinalStream->dump(fd, args);
+ return NO_ERROR;
+}
+
+void AudioStreamInDump::Close()
+{
+ if(mInFile) {
+ fclose(mInFile);
+ mInFile = 0;
+ }
+}
}; // namespace android
diff --git a/libs/audioflinger/AudioDumpInterface.h b/libs/audioflinger/AudioDumpInterface.h
index b72c94e..4de4a16 100644
--- a/libs/audioflinger/AudioDumpInterface.h
+++ b/libs/audioflinger/AudioDumpInterface.h
@@ -20,35 +20,94 @@
#include <stdint.h>
#include <sys/types.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
#include <hardware_legacy/AudioHardwareBase.h>
namespace android {
-#define FLINGER_DUMP_NAME "/data/FlingerOut.pcm" // name of file used for dump
+#define AUDIO_DUMP_WAVE_HDR_SIZE 44
+
+class AudioDumpInterface;
class AudioStreamOutDump : public AudioStreamOut {
public:
- AudioStreamOutDump( AudioStreamOut* FinalStream);
+ AudioStreamOutDump(AudioDumpInterface *interface,
+ int id,
+ AudioStreamOut* finalStream,
+ uint32_t devices,
+ int format,
+ uint32_t channels,
+ uint32_t sampleRate);
~AudioStreamOutDump();
- virtual ssize_t write(const void* buffer, size_t bytes);
-
- virtual uint32_t sampleRate() const { return mFinalStream->sampleRate(); }
- virtual size_t bufferSize() const { return mFinalStream->bufferSize(); }
- virtual int channelCount() const { return mFinalStream->channelCount(); }
- virtual int format() const { return mFinalStream->format(); }
- virtual uint32_t latency() const { return mFinalStream->latency(); }
- virtual status_t setVolume(float volume)
- { return mFinalStream->setVolume(volume); }
+
+ virtual ssize_t write(const void* buffer, size_t bytes);
+ virtual uint32_t sampleRate() const;
+ virtual size_t bufferSize() const;
+ virtual uint32_t channels() const;
+ virtual int format() const;
+ virtual uint32_t latency() const;
+ virtual status_t setVolume(float left, float right);
virtual status_t standby();
- virtual status_t dump(int fd, const Vector<String16>& args) { return mFinalStream->dump(fd, args); }
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
+ virtual status_t dump(int fd, const Vector<String16>& args);
void Close(void);
+ AudioStreamOut* finalStream() { return mFinalStream; }
+ uint32_t device() { return mDevice; }
private:
+ AudioDumpInterface *mInterface;
+ int mId;
+ uint32_t mSampleRate; //
+ uint32_t mFormat; //
+ uint32_t mChannels; // output configuration
+ uint32_t mLatency; //
+ uint32_t mDevice; // current device this output is routed to
+ size_t mBufferSize;
AudioStreamOut *mFinalStream;
- FILE *mOutFile; // output file
+ FILE *mOutFile; // output file
+ int mFileCount;
};
+class AudioStreamInDump : public AudioStreamIn {
+public:
+ AudioStreamInDump(AudioDumpInterface *interface,
+ int id,
+ AudioStreamIn* finalStream,
+ uint32_t devices,
+ int format,
+ uint32_t channels,
+ uint32_t sampleRate);
+ ~AudioStreamInDump();
+
+ virtual uint32_t sampleRate() const;
+ virtual size_t bufferSize() const;
+ virtual uint32_t channels() const;
+ virtual int format() const;
+
+ virtual status_t setGain(float gain);
+ virtual ssize_t read(void* buffer, ssize_t bytes);
+ virtual status_t standby();
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
+ virtual status_t dump(int fd, const Vector<String16>& args);
+ void Close(void);
+ AudioStreamIn* finalStream() { return mFinalStream; }
+ uint32_t device() { return mDevice; }
+
+private:
+ AudioDumpInterface *mInterface;
+ int mId;
+ uint32_t mSampleRate; //
+ uint32_t mFormat; //
+ uint32_t mChannels; // output configuration
+ uint32_t mDevice; // current device this output is routed to
+ size_t mBufferSize;
+ AudioStreamIn *mFinalStream;
+ FILE *mInFile; // output file
+};
class AudioDumpInterface : public AudioHardwareBase
{
@@ -56,10 +115,13 @@ class AudioDumpInterface : public AudioHardwareBase
public:
AudioDumpInterface(AudioHardwareInterface* hw);
virtual AudioStreamOut* openOutputStream(
- int format=0,
- int channelCount=0,
- uint32_t sampleRate=0,
+ uint32_t devices,
+ int *format=0,
+ uint32_t *channels=0,
+ uint32_t *sampleRate=0,
status_t *status=0);
+ virtual void closeOutputStream(AudioStreamOut* out);
+
virtual ~AudioDumpInterface();
virtual status_t initCheck()
@@ -75,21 +137,25 @@ public:
virtual status_t getMicMute(bool* state)
{return mFinalInterface->getMicMute(state);}
- virtual status_t setParameter(const char* key, const char* value)
- {return mFinalInterface->setParameter(key, value);}
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
- virtual AudioStreamIn* openInputStream(int inputSource, int format, int channelCount,
- uint32_t sampleRate, status_t *status, AudioSystem::audio_in_acoustics acoustics)
- { return mFinalInterface->openInputStream(inputSource, format, channelCount, sampleRate, status, acoustics); }
+ virtual AudioStreamIn* openInputStream(uint32_t devices, int *format, uint32_t *channels,
+ uint32_t *sampleRate, status_t *status, AudioSystem::audio_in_acoustics acoustics);
+ virtual void closeInputStream(AudioStreamIn* in);
virtual status_t dump(int fd, const Vector<String16>& args) { return mFinalInterface->dumpState(fd, args); }
+ String8 fileName() const { return mFileName; }
protected:
- virtual status_t doRouting() {return mFinalInterface->setRouting(mMode, mRoutes[mMode]);}
-
- AudioHardwareInterface *mFinalInterface;
- AudioStreamOutDump *mStreamOut;
+ AudioHardwareInterface *mFinalInterface;
+ SortedVector<AudioStreamOutDump *> mOutputs;
+ bool mFirstHwOutput;
+ SortedVector<AudioStreamInDump *> mInputs;
+ Mutex mLock;
+ String8 mPolicyCommands;
+ String8 mFileName;
};
}; // namespace android
diff --git a/libs/audioflinger/AudioFlinger.cpp b/libs/audioflinger/AudioFlinger.cpp
index ffc0278..c05ab77 100644
--- a/libs/audioflinger/AudioFlinger.cpp
+++ b/libs/audioflinger/AudioFlinger.cpp
@@ -71,15 +71,9 @@ static const float MAX_GAIN = 4096.0f;
static const int8_t kMaxTrackRetries = 50;
static const int8_t kMaxTrackStartupRetries = 50;
-static const int kStartSleepTime = 30000;
-static const int kStopSleepTime = 30000;
-
static const int kDumpLockRetries = 50;
static const int kDumpLockSleep = 20000;
-// Maximum number of pending buffers allocated by OutputTrack::write()
-static const uint8_t kMaxOutputTrackBuffers = 5;
-
#define AUDIOFLINGER_SECURITY_ENABLED 1
@@ -121,132 +115,32 @@ static bool settingsAllowed() {
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
- mAudioHardware(0), mA2dpAudioInterface(0), mA2dpEnabled(false), mNotifyA2dpChange(false),
- mForcedSpeakerCount(0), mA2dpDisableCount(0), mA2dpSuppressed(false), mForcedRoute(0),
- mRouteRestoreTime(0), mMusicMuteSaved(false)
+ mAudioHardware(0), mMasterVolume(1.0f), mMasterMute(false)
{
mHardwareStatus = AUDIO_HW_IDLE;
+
mAudioHardware = AudioHardwareInterface::create();
+
mHardwareStatus = AUDIO_HW_INIT;
if (mAudioHardware->initCheck() == NO_ERROR) {
// open 16-bit output stream for s/w mixer
- mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
- status_t status;
- AudioStreamOut *hwOutput = mAudioHardware->openOutputStream(AudioSystem::PCM_16_BIT, 0, 0, &status);
- mHardwareStatus = AUDIO_HW_IDLE;
- if (hwOutput) {
- mHardwareMixerThread = new MixerThread(this, hwOutput, AudioSystem::AUDIO_OUTPUT_HARDWARE);
- } else {
- LOGE("Failed to initialize hardware output stream, status: %d", status);
- }
-
-#ifdef WITH_A2DP
- // Create A2DP interface
- mA2dpAudioInterface = new A2dpAudioInterface();
- AudioStreamOut *a2dpOutput = mA2dpAudioInterface->openOutputStream(AudioSystem::PCM_16_BIT, 0, 0, &status);
- if (a2dpOutput) {
- mA2dpMixerThread = new MixerThread(this, a2dpOutput, AudioSystem::AUDIO_OUTPUT_A2DP);
- if (hwOutput) {
- uint32_t frameCount = ((a2dpOutput->bufferSize()/a2dpOutput->frameSize()) * hwOutput->sampleRate()) / a2dpOutput->sampleRate();
- MixerThread::OutputTrack *a2dpOutTrack = new MixerThread::OutputTrack(mA2dpMixerThread,
- hwOutput->sampleRate(),
- AudioSystem::PCM_16_BIT,
- hwOutput->channelCount(),
- frameCount);
- mHardwareMixerThread->setOuputTrack(a2dpOutTrack);
- }
- } else {
- LOGE("Failed to initialize A2DP output stream, status: %d", status);
- }
-#endif
-
- // FIXME - this should come from settings
- setRouting(AudioSystem::MODE_NORMAL, AudioSystem::ROUTE_SPEAKER, AudioSystem::ROUTE_ALL);
- setRouting(AudioSystem::MODE_RINGTONE, AudioSystem::ROUTE_SPEAKER, AudioSystem::ROUTE_ALL);
- setRouting(AudioSystem::MODE_IN_CALL, AudioSystem::ROUTE_EARPIECE, AudioSystem::ROUTE_ALL);
+
setMode(AudioSystem::MODE_NORMAL);
setMasterVolume(1.0f);
setMasterMute(false);
-
- // Start record thread
- mAudioRecordThread = new AudioRecordThread(mAudioHardware, this);
- if (mAudioRecordThread != 0) {
- mAudioRecordThread->run("AudioRecordThread", PRIORITY_URGENT_AUDIO);
- }
- } else {
+ } else {
LOGE("Couldn't even initialize the stubbed audio hardware!");
}
}
AudioFlinger::~AudioFlinger()
{
- if (mAudioRecordThread != 0) {
- mAudioRecordThread->exit();
- mAudioRecordThread.clear();
- }
- mHardwareMixerThread.clear();
- delete mAudioHardware;
- // deleting mA2dpAudioInterface also deletes mA2dpOutput;
-#ifdef WITH_A2DP
- mA2dpMixerThread.clear();
- delete mA2dpAudioInterface;
-#endif
+ mRecordThreads.clear();
+ mPlaybackThreads.clear();
}
-#ifdef WITH_A2DP
-// setA2dpEnabled_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::setA2dpEnabled_l(bool enable)
-{
- SortedVector < sp<MixerThread::Track> > tracks;
- SortedVector < wp<MixerThread::Track> > activeTracks;
-
- LOGV_IF(enable, "set output to A2DP\n");
- LOGV_IF(!enable, "set output to hardware audio\n");
-
- // Transfer tracks playing on MUSIC stream from one mixer to the other
- if (enable) {
- mHardwareMixerThread->getTracks_l(tracks, activeTracks);
- mA2dpMixerThread->putTracks_l(tracks, activeTracks);
- } else {
- mA2dpMixerThread->getTracks_l(tracks, activeTracks);
- mHardwareMixerThread->putTracks_l(tracks, activeTracks);
- mA2dpMixerThread->mOutput->standby();
- }
- mA2dpEnabled = enable;
- mNotifyA2dpChange = true;
- mWaitWorkCV.broadcast();
-}
-
-// checkA2dpEnabledChange_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::checkA2dpEnabledChange_l()
-{
- if (mNotifyA2dpChange) {
- // Notify AudioSystem of the A2DP activation/deactivation
- size_t size = mNotificationClients.size();
- for (size_t i = 0; i < size; i++) {
- sp<IBinder> binder = mNotificationClients.itemAt(i).promote();
- if (binder != NULL) {
- LOGV("Notifying output change to client %p", binder.get());
- sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient> (binder);
- client->a2dpEnabledChanged(mA2dpEnabled);
- }
- }
- mNotifyA2dpChange = false;
- }
-}
-#endif // WITH_A2DP
-
-bool AudioFlinger::streamForcedToSpeaker(int streamType)
-{
- // NOTE that streams listed here must not be routed to A2DP by default:
- // AudioSystem::routedToA2dpOutput(streamType) == false
- return (streamType == AudioSystem::RING ||
- streamType == AudioSystem::ALARM ||
- streamType == AudioSystem::NOTIFICATION ||
- streamType == AudioSystem::ENFORCED_AUDIBLE);
-}
status_t AudioFlinger::dumpClients(int fd, const Vector<String16>& args)
{
@@ -276,10 +170,7 @@ status_t AudioFlinger::dumpInternals(int fd, const Vector<String16>& args)
char buffer[SIZE];
String8 result;
int hardwareStatus = mHardwareStatus;
-
- if (hardwareStatus == AUDIO_HW_IDLE && mHardwareMixerThread->mStandby) {
- hardwareStatus = AUDIO_HW_STANDBY;
- }
+
snprintf(buffer, SIZE, "Hardware status: %d\n", hardwareStatus);
result.append(buffer);
write(fd, result.string(), result.size());
@@ -337,13 +228,16 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
dumpClients(fd, args);
dumpInternals(fd, args);
- mHardwareMixerThread->dump(fd, args);
-#ifdef WITH_A2DP
- mA2dpMixerThread->dump(fd, args);
-#endif
- // dump record client
- if (mAudioRecordThread != 0) mAudioRecordThread->dump(fd, args);
+ // dump playback threads
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ mPlaybackThreads[i]->dump(fd, args);
+ }
+
+ // dump record threads
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ mRecordThreads[i]->dump(fd, args);
+ }
if (mAudioHardware) {
mAudioHardware->dumpState(fd, args);
@@ -353,6 +247,7 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
return NO_ERROR;
}
+
// IAudioFlinger interface
@@ -365,9 +260,10 @@ sp<IAudioTrack> AudioFlinger::createTrack(
int frameCount,
uint32_t flags,
const sp<IMemory>& sharedBuffer,
+ void *output,
status_t *status)
{
- sp<MixerThread::Track> track;
+ sp<PlaybackThread::Track> track;
sp<TrackHandle> trackHandle;
sp<Client> client;
wp<Client> wclient;
@@ -381,6 +277,12 @@ sp<IAudioTrack> AudioFlinger::createTrack(
{
Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ LOGE("unknown output thread");
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
wclient = mClients.valueFor(pid);
@@ -390,16 +292,8 @@ sp<IAudioTrack> AudioFlinger::createTrack(
client = new Client(this, pid);
mClients.add(pid, client);
}
-#ifdef WITH_A2DP
- if (isA2dpEnabled() && AudioSystem::routedToA2dpOutput(streamType)) {
- track = mA2dpMixerThread->createTrack_l(client, streamType, sampleRate, format,
- channelCount, frameCount, sharedBuffer, &lStatus);
- } else
-#endif
- {
- track = mHardwareMixerThread->createTrack_l(client, streamType, sampleRate, format,
- channelCount, frameCount, sharedBuffer, &lStatus);
- }
+ track = thread->createTrack_l(client, streamType, sampleRate, format,
+ channelCount, frameCount, sharedBuffer, &lStatus);
}
if (lStatus == NO_ERROR) {
trackHandle = new TrackHandle(track);
@@ -414,54 +308,59 @@ Exit:
return trackHandle;
}
-uint32_t AudioFlinger::sampleRate(int output) const
+uint32_t AudioFlinger::sampleRate(void *output) const
{
-#ifdef WITH_A2DP
- if (output == AudioSystem::AUDIO_OUTPUT_A2DP) {
- return mA2dpMixerThread->sampleRate();
- }
-#endif
- return mHardwareMixerThread->sampleRate();
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ LOGW("sampleRate() unknown thread %p", output);
+ return 0;
+ }
+ return thread->sampleRate();
}
-int AudioFlinger::channelCount(int output) const
+int AudioFlinger::channelCount(void *output) const
{
-#ifdef WITH_A2DP
- if (output == AudioSystem::AUDIO_OUTPUT_A2DP) {
- return mA2dpMixerThread->channelCount();
- }
-#endif
- return mHardwareMixerThread->channelCount();
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ LOGW("channelCount() unknown thread %p", output);
+ return 0;
+ }
+ return thread->channelCount();
}
-int AudioFlinger::format(int output) const
+int AudioFlinger::format(void *output) const
{
-#ifdef WITH_A2DP
- if (output == AudioSystem::AUDIO_OUTPUT_A2DP) {
- return mA2dpMixerThread->format();
- }
-#endif
- return mHardwareMixerThread->format();
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ LOGW("format() unknown thread %p", output);
+ return 0;
+ }
+ return thread->format();
}
-size_t AudioFlinger::frameCount(int output) const
+size_t AudioFlinger::frameCount(void *output) const
{
-#ifdef WITH_A2DP
- if (output == AudioSystem::AUDIO_OUTPUT_A2DP) {
- return mA2dpMixerThread->frameCount();
- }
-#endif
- return mHardwareMixerThread->frameCount();
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ LOGW("frameCount() unknown thread %p", output);
+ return 0;
+ }
+ return thread->frameCount();
}
-uint32_t AudioFlinger::latency(int output) const
+uint32_t AudioFlinger::latency(void *output) const
{
-#ifdef WITH_A2DP
- if (output == AudioSystem::AUDIO_OUTPUT_A2DP) {
- return mA2dpMixerThread->latency();
- }
-#endif
- return mHardwareMixerThread->latency();
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ LOGW("latency() unknown thread %p", output);
+ return 0;
+ }
+ return thread->latency();
}
status_t AudioFlinger::setMasterVolume(float value)
@@ -478,94 +377,12 @@ status_t AudioFlinger::setMasterVolume(float value)
value = 1.0f;
}
mHardwareStatus = AUDIO_HW_IDLE;
- mHardwareMixerThread->setMasterVolume(value);
-#ifdef WITH_A2DP
- mA2dpMixerThread->setMasterVolume(value);
-#endif
-
- return NO_ERROR;
-}
-
-status_t AudioFlinger::setRouting(int mode, uint32_t routes, uint32_t mask)
-{
- status_t err = NO_ERROR;
-
- // check calling permissions
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if ((mode < AudioSystem::MODE_CURRENT) || (mode >= AudioSystem::NUM_MODES)) {
- LOGW("Illegal value: setRouting(%d, %u, %u)", mode, routes, mask);
- return BAD_VALUE;
- }
-
-#ifdef WITH_A2DP
- LOGV("setRouting %d %d %d, tid %d, calling tid %d\n", mode, routes, mask, gettid(),
- IPCThreadState::self()->getCallingPid());
- if (mode == AudioSystem::MODE_NORMAL &&
- (mask & AudioSystem::ROUTE_BLUETOOTH_A2DP)) {
- AutoMutex lock(&mLock);
-
- bool enableA2dp = false;
- if (routes & AudioSystem::ROUTE_BLUETOOTH_A2DP) {
- enableA2dp = true;
- }
- if (mA2dpDisableCount > 0) {
- mA2dpSuppressed = enableA2dp;
- } else {
- setA2dpEnabled_l(enableA2dp);
- }
- LOGV("setOutput done\n");
- }
- // setRouting() is always called at least for mode == AudioSystem::MODE_IN_CALL when
- // SCO is enabled, whatever current mode is so we can safely handle A2DP disabling only
- // in this case to avoid doing it several times.
- if (mode == AudioSystem::MODE_IN_CALL &&
- (mask & AudioSystem::ROUTE_BLUETOOTH_SCO)) {
- AutoMutex lock(&mLock);
- handleRouteDisablesA2dp_l(routes);
- }
-#endif
- // do nothing if only A2DP routing is affected
- mask &= ~AudioSystem::ROUTE_BLUETOOTH_A2DP;
- if (mask) {
- AutoMutex lock(mHardwareLock);
- mHardwareStatus = AUDIO_HW_GET_ROUTING;
- uint32_t r;
- err = mAudioHardware->getRouting(mode, &r);
- if (err == NO_ERROR) {
- r = (r & ~mask) | (routes & mask);
- if (mode == AudioSystem::MODE_NORMAL ||
- (mode == AudioSystem::MODE_CURRENT && getMode() == AudioSystem::MODE_NORMAL)) {
- mSavedRoute = r;
- r |= mForcedRoute;
- LOGV("setRouting mSavedRoute %08x mForcedRoute %08x\n", mSavedRoute, mForcedRoute);
- }
- mHardwareStatus = AUDIO_HW_SET_ROUTING;
- err = mAudioHardware->setRouting(mode, r);
- }
- mHardwareStatus = AUDIO_HW_IDLE;
- }
- return err;
-}
+ mMasterVolume = value;
+ for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
+ mPlaybackThreads[i]->setMasterVolume(value);
-uint32_t AudioFlinger::getRouting(int mode) const
-{
- uint32_t routes = 0;
- if ((mode >= AudioSystem::MODE_CURRENT) && (mode < AudioSystem::NUM_MODES)) {
- if (mode == AudioSystem::MODE_NORMAL ||
- (mode == AudioSystem::MODE_CURRENT && getMode() == AudioSystem::MODE_NORMAL)) {
- routes = mSavedRoute;
- } else {
- mHardwareStatus = AUDIO_HW_GET_ROUTING;
- mAudioHardware->getRouting(mode, &routes);
- mHardwareStatus = AUDIO_HW_IDLE;
- }
- } else {
- LOGW("Illegal value: getRouting(%d)", mode);
- }
- return routes;
+ return NO_ERROR;
}
status_t AudioFlinger::setMode(int mode)
@@ -586,15 +403,6 @@ status_t AudioFlinger::setMode(int mode)
return ret;
}
-int AudioFlinger::getMode() const
-{
- int mode = AudioSystem::MODE_INVALID;
- mHardwareStatus = AUDIO_HW_SET_MODE;
- mAudioHardware->getMode(&mode);
- mHardwareStatus = AUDIO_HW_IDLE;
- return mode;
-}
-
status_t AudioFlinger::setMicMute(bool state)
{
// check calling permissions
@@ -624,37 +432,46 @@ status_t AudioFlinger::setMasterMute(bool muted)
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
- mHardwareMixerThread->setMasterMute(muted);
-#ifdef WITH_A2DP
- mA2dpMixerThread->setMasterMute(muted);
-#endif
+
+ mMasterMute = muted;
+ for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
+ mPlaybackThreads[i]->setMasterMute(muted);
+
return NO_ERROR;
}
float AudioFlinger::masterVolume() const
{
- return mHardwareMixerThread->masterVolume();
+ return mMasterVolume;
}
bool AudioFlinger::masterMute() const
{
- return mHardwareMixerThread->masterMute();
+ return mMasterMute;
}
-status_t AudioFlinger::setStreamVolume(int stream, float value)
+status_t AudioFlinger::setStreamVolume(int stream, float value, void *output)
{
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
- if (uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES ||
- uint32_t(stream) == AudioSystem::ENFORCED_AUDIBLE) {
+ if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
return BAD_VALUE;
}
+ AutoMutex lock(mLock);
+ PlaybackThread *thread = NULL;
+ if (output) {
+ thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ return BAD_VALUE;
+ }
+ }
+
status_t ret = NO_ERROR;
-
+
if (stream == AudioSystem::VOICE_CALL ||
stream == AudioSystem::BLUETOOTH_SCO) {
float hwValue;
@@ -671,18 +488,18 @@ status_t AudioFlinger::setStreamVolume(int stream, float value)
mHardwareStatus = AUDIO_SET_VOICE_VOLUME;
ret = mAudioHardware->setVoiceVolume(hwValue);
mHardwareStatus = AUDIO_HW_IDLE;
-
+
}
-
- mHardwareMixerThread->setStreamVolume(stream, value);
-#ifdef WITH_A2DP
- mA2dpMixerThread->setStreamVolume(stream, value);
-#endif
- mHardwareMixerThread->setStreamVolume(stream, value);
-#ifdef WITH_A2DP
- mA2dpMixerThread->setStreamVolume(stream, value);
-#endif
+ mStreamTypes[stream].volume = value;
+
+ if (thread == NULL) {
+ for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
+ mPlaybackThreads[i]->setStreamVolume(stream, value);
+
+ } else {
+ thread->setStreamVolume(stream, value);
+ }
return ret;
}
@@ -694,82 +511,116 @@ status_t AudioFlinger::setStreamMute(int stream, bool muted)
return PERMISSION_DENIED;
}
- if (uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES ||
+ if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES ||
uint32_t(stream) == AudioSystem::ENFORCED_AUDIBLE) {
return BAD_VALUE;
}
-#ifdef WITH_A2DP
- mA2dpMixerThread->setStreamMute(stream, muted);
-#endif
- if (stream == AudioSystem::MUSIC)
- {
- AutoMutex lock(&mHardwareLock);
- if (mForcedRoute != 0)
- mMusicMuteSaved = muted;
- else
- mHardwareMixerThread->setStreamMute(stream, muted);
- } else {
- mHardwareMixerThread->setStreamMute(stream, muted);
- }
+ mStreamTypes[stream].mute = muted;
+ for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
+ mPlaybackThreads[i]->setStreamMute(stream, muted);
return NO_ERROR;
}
-float AudioFlinger::streamVolume(int stream) const
+float AudioFlinger::streamVolume(int stream, void *output) const
{
- if (uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
return 0.0f;
}
-
- float volume = mHardwareMixerThread->streamVolume(stream);
+
+ AutoMutex lock(mLock);
+ float volume;
+ if (output) {
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ return 0.0f;
+ }
+ volume = thread->streamVolume(stream);
+ } else {
+ volume = mStreamTypes[stream].volume;
+ }
+
// remove correction applied by setStreamVolume()
if (stream == AudioSystem::VOICE_CALL) {
volume = (volume - 0.01) / 0.99 ;
}
-
+
return volume;
}
bool AudioFlinger::streamMute(int stream) const
{
- if (uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || stream >= (int)AudioSystem::NUM_STREAM_TYPES) {
return true;
}
-
- if (stream == AudioSystem::MUSIC && mForcedRoute != 0)
- {
- return mMusicMuteSaved;
- }
- return mHardwareMixerThread->streamMute(stream);
+
+ return mStreamTypes[stream].mute;
}
bool AudioFlinger::isMusicActive() const
{
Mutex::Autolock _l(mLock);
- #ifdef WITH_A2DP
- if (isA2dpEnabled()) {
- return mA2dpMixerThread->isMusicActive_l();
- }
- #endif
- return mHardwareMixerThread->isMusicActive_l();
+ for (uint32_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads[i]->isMusicActive()) {
+ return true;
+ }
+ }
+ return false;
}
-status_t AudioFlinger::setParameter(const char* key, const char* value)
+status_t AudioFlinger::setParameters(void *ioHandle, const String8& keyValuePairs)
{
- status_t result, result2;
- AutoMutex lock(mHardwareLock);
- mHardwareStatus = AUDIO_SET_PARAMETER;
-
- LOGV("setParameter() key %s, value %s, tid %d, calling tid %d", key, value, gettid(), IPCThreadState::self()->getCallingPid());
- result = mAudioHardware->setParameter(key, value);
- if (mA2dpAudioInterface) {
- result2 = mA2dpAudioInterface->setParameter(key, value);
- if (result2)
- result = result2;
+ status_t result;
+
+ LOGV("setParameters(): io %p, keyvalue %s, tid %d, calling tid %d",
+ ioHandle, keyValuePairs.string(), gettid(), IPCThreadState::self()->getCallingPid());
+ // check calling permissions
+ if (!settingsAllowed()) {
+ return PERMISSION_DENIED;
}
- mHardwareStatus = AUDIO_HW_IDLE;
- return result;
+
+ // ioHandle == 0 means the parameters are global to the audio hardware interface
+ if (ioHandle == 0) {
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_SET_PARAMETER;
+ result = mAudioHardware->setParameters(keyValuePairs);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ return result;
+ }
+
+ // Check if parameters are for an output
+ PlaybackThread *playbackThread = checkPlaybackThread_l(ioHandle);
+ if (playbackThread != NULL) {
+ return playbackThread->setParameters(keyValuePairs);
+ }
+
+ // Check if parameters are for an input
+ RecordThread *recordThread = checkRecordThread_l(ioHandle);
+ if (recordThread != NULL) {
+ return recordThread->setParameters(keyValuePairs);
+ }
+
+ return BAD_VALUE;
+}
+
+String8 AudioFlinger::getParameters(void *ioHandle, const String8& keys)
+{
+// LOGV("getParameters() io %p, keys %s, tid %d, calling tid %d",
+// ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid());
+
+ if (ioHandle == 0) {
+ return mAudioHardware->getParameters(keys);
+ }
+ PlaybackThread *playbackThread = checkPlaybackThread_l(ioHandle);
+ if (playbackThread != NULL) {
+ return playbackThread->getParameters(keys);
+ }
+ RecordThread *recordThread = checkRecordThread_l(ioHandle);
+ if (recordThread != NULL) {
+ return recordThread->getParameters(keys);
+ }
+ return String8("");
}
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
@@ -779,7 +630,7 @@ size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, int format, int cha
void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
{
-
+
LOGV("registerClient() %p, tid %d, calling tid %d", client.get(), gettid(), IPCThreadState::self()->getCallingPid());
Mutex::Autolock _l(mLock);
@@ -788,12 +639,21 @@ void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
LOGV("Adding notification client %p", binder.get());
binder->linkToDeath(this);
mNotificationClients.add(binder);
- client->a2dpEnabledChanged(isA2dpEnabled());
+ }
+
+ // the config change is always sent from playback or record threads to avoid deadlock
+ // with AudioSystem::gLock
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ mPlaybackThreads[i]->sendConfigEvent(AudioSystem::OUTPUT_OPENED);
+ }
+
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ mRecordThreads[i]->sendConfigEvent(AudioSystem::INPUT_OPENED);
}
}
void AudioFlinger::binderDied(const wp<IBinder>& who) {
-
+
LOGV("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(), IPCThreadState::self()->getCallingPid());
Mutex::Autolock _l(mLock);
@@ -808,6 +668,17 @@ void AudioFlinger::binderDied(const wp<IBinder>& who) {
}
}
+void AudioFlinger::audioConfigChanged(int event, void *param1, void *param2) {
+ Mutex::Autolock _l(mLock);
+ size_t size = mNotificationClients.size();
+ for (size_t i = 0; i < size; i++) {
+ sp<IBinder> binder = mNotificationClients.itemAt(i);
+ LOGV("audioConfigChanged() Notifying change to client %p", binder.get());
+ sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient> (binder);
+ client->ioConfigChanged(event, param1, param2);
+ }
+}
+
void AudioFlinger::removeClient(pid_t pid)
{
LOGV("removeClient() pid %d, tid %d, calling tid %d", pid, gettid(), IPCThreadState::self()->getCallingPid());
@@ -815,147 +686,140 @@ void AudioFlinger::removeClient(pid_t pid)
mClients.removeItem(pid);
}
-bool AudioFlinger::isA2dpEnabled() const
+// ----------------------------------------------------------------------------
+
+AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger)
+ : Thread(false),
+ mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mChannelCount(0),
+ mFormat(0), mFrameSize(1), mNewParameters(String8("")), mStandby(false)
{
- return mA2dpEnabled;
}
-void AudioFlinger::handleForcedSpeakerRoute(int command)
+AudioFlinger::ThreadBase::~ThreadBase()
{
- switch(command) {
- case ACTIVE_TRACK_ADDED:
- {
- AutoMutex lock(mHardwareLock);
- if (mForcedSpeakerCount++ == 0) {
- if (mForcedRoute == 0) {
- mMusicMuteSaved = mHardwareMixerThread->streamMute(AudioSystem::MUSIC);
- LOGV("++mForcedSpeakerCount == 0, mMusicMuteSaved = %d, mRouteRestoreTime = %d", mMusicMuteSaved, mRouteRestoreTime);
- if (!(mSavedRoute & AudioSystem::ROUTE_SPEAKER)) {
- LOGV("Route forced to Speaker ON %08x", mSavedRoute | AudioSystem::ROUTE_SPEAKER);
- mHardwareMixerThread->setStreamMute(AudioSystem::MUSIC, true);
- usleep(mHardwareMixerThread->latency()*1000);
- mHardwareStatus = AUDIO_HW_SET_ROUTING;
- mAudioHardware->setRouting(AudioSystem::MODE_NORMAL, mSavedRoute | AudioSystem::ROUTE_SPEAKER);
- mHardwareStatus = AUDIO_HW_IDLE;
- // delay track start so that audio hardware has time to siwtch routes
- usleep(kStartSleepTime);
- }
- }
- mForcedRoute = AudioSystem::ROUTE_SPEAKER;
- mRouteRestoreTime = 0;
- }
- LOGV("mForcedSpeakerCount incremented to %d", mForcedSpeakerCount);
- }
- break;
- case ACTIVE_TRACK_REMOVED:
- {
- AutoMutex lock(mHardwareLock);
- if (mForcedSpeakerCount > 0){
- if (--mForcedSpeakerCount == 0) {
- mRouteRestoreTime = systemTime() + milliseconds(kStopSleepTime/1000);
- }
- LOGV("mForcedSpeakerCount decremented to %d", mForcedSpeakerCount);
- } else {
- LOGE("mForcedSpeakerCount is already zero");
- }
- }
- break;
- case CHECK_ROUTE_RESTORE_TIME:
- case FORCE_ROUTE_RESTORE:
- if (mRouteRestoreTime) {
- AutoMutex lock(mHardwareLock);
- if (mRouteRestoreTime &&
- (systemTime() > mRouteRestoreTime || command == FORCE_ROUTE_RESTORE)) {
- mHardwareMixerThread->setStreamMute(AudioSystem::MUSIC, mMusicMuteSaved);
- mForcedRoute = 0;
- if (!(mSavedRoute & AudioSystem::ROUTE_SPEAKER)) {
- mHardwareStatus = AUDIO_HW_SET_ROUTING;
- mAudioHardware->setRouting(AudioSystem::MODE_NORMAL, mSavedRoute);
- mHardwareStatus = AUDIO_HW_IDLE;
- LOGV("Route forced to Speaker OFF %08x", mSavedRoute);
- }
- mRouteRestoreTime = 0;
- }
- }
- break;
- }
}
-#ifdef WITH_A2DP
-// handleRouteDisablesA2dp_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::handleRouteDisablesA2dp_l(int routes)
-{
- if (routes & AudioSystem::ROUTE_BLUETOOTH_SCO) {
- if (mA2dpDisableCount++ == 0) {
- if (mA2dpEnabled) {
- setA2dpEnabled_l(false);
- mA2dpSuppressed = true;
- }
- }
- LOGV("mA2dpDisableCount incremented to %d", mA2dpDisableCount);
- } else {
- if (mA2dpDisableCount > 0) {
- if (--mA2dpDisableCount == 0) {
- if (mA2dpSuppressed) {
- setA2dpEnabled_l(true);
- mA2dpSuppressed = false;
- }
- }
- LOGV("mA2dpDisableCount decremented to %d", mA2dpDisableCount);
- } else {
- LOGV("mA2dpDisableCount is already zero");
- }
+void AudioFlinger::ThreadBase::exit()
+{
+ // keep a strong ref on ourself so that we want get
+ // destroyed in the middle of requestExitAndWait()
+ sp <ThreadBase> strongMe = this;
+
+ LOGV("ThreadBase::exit");
+ {
+ AutoMutex lock(&mLock);
+ requestExit();
+ mWaitWorkCV.signal();
}
+ requestExitAndWait();
}
-#endif
-// ----------------------------------------------------------------------------
+uint32_t AudioFlinger::ThreadBase::sampleRate() const
+{
+ return mSampleRate;
+}
-AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int outputType)
- : Thread(false),
- mAudioFlinger(audioFlinger), mAudioMixer(0), mOutput(output), mOutputType(outputType),
- mSampleRate(0), mFrameCount(0), mChannelCount(0), mFormat(0), mMixBuffer(0),
- mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mStandby(false),
- mInWrite(false)
+int AudioFlinger::ThreadBase::channelCount() const
{
- mSampleRate = output->sampleRate();
- mChannelCount = output->channelCount();
+ return mChannelCount;
+}
- // FIXME - Current mixer implementation only supports stereo output
- if (mChannelCount == 1) {
- LOGE("Invalid audio hardware channel count");
+int AudioFlinger::ThreadBase::format() const
+{
+ return mFormat;
+}
+
+size_t AudioFlinger::ThreadBase::frameCount() const
+{
+ return mFrameCount;
+}
+
+status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
+{
+ status_t result;
+
+ Mutex::Autolock _l(mLock);
+ mNewParameters = keyValuePairs;
+
+ mWaitWorkCV.signal();
+ mParamCond.wait(mLock);
+
+ return mParamStatus;
+}
+
+void AudioFlinger::ThreadBase::sendConfigEvent(int event, int param)
+{
+ Mutex::Autolock _l(mLock);
+ ConfigEvent *configEvent = new ConfigEvent();
+ configEvent->mEvent = event;
+ configEvent->mParam = param;
+ mConfigEvents.add(configEvent);
+ LOGV("sendConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, param);
+ mWaitWorkCV.signal();
+}
+
+void AudioFlinger::ThreadBase::processConfigEvents()
+{
+ mLock.lock();
+ while(!mConfigEvents.isEmpty()) {
+ LOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
+ ConfigEvent *configEvent = mConfigEvents[0];
+ mConfigEvents.removeAt(0);
+ // release mLock because audioConfigChanged() will call
+ // Audioflinger::audioConfigChanged() which locks AudioFlinger mLock thus creating
+ // potential cross deadlock between AudioFlinger::mLock and mLock
+ mLock.unlock();
+ audioConfigChanged(configEvent->mEvent, configEvent->mParam);
+ delete configEvent;
+ mLock.lock();
}
+ mLock.unlock();
+}
- mFormat = output->format();
- mFrameCount = output->bufferSize() / output->channelCount() / sizeof(int16_t);
- mAudioMixer = new AudioMixer(mFrameCount, output->sampleRate());
- // FIXME - Current mixer implementation only supports stereo output: Always
- // Allocate a stereo buffer even if HW output is mono.
- mMixBuffer = new int16_t[mFrameCount * 2];
- memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));
+// ----------------------------------------------------------------------------
+
+AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output)
+ : ThreadBase(audioFlinger),
+ mOutput(output),
+ mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0),
+ mInWrite(false), mMixBuffer(0), mSuspended(false), mBytesWritten(0)
+{
+ readOutputParameters();
+
+ mMasterVolume = mAudioFlinger->masterVolume();
+ mMasterMute = mAudioFlinger->masterMute();
+
+ for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ mStreamTypes[stream].volume = mAudioFlinger->streamVolumeInternal(stream);
+ mStreamTypes[stream].mute = mAudioFlinger->streamMute(stream);
+ }
+ // notify client processes that a new input has been opened
+ sendConfigEvent(AudioSystem::OUTPUT_OPENED);
}
-AudioFlinger::MixerThread::~MixerThread()
+AudioFlinger::PlaybackThread::~PlaybackThread()
{
delete [] mMixBuffer;
- delete mAudioMixer;
+ if (mType != DUPLICATING) {
+ mAudioFlinger->mAudioHardware->closeOutputStream(mOutput);
+ }
}
-status_t AudioFlinger::MixerThread::dump(int fd, const Vector<String16>& args)
+status_t AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
{
dumpInternals(fd, args);
dumpTracks(fd, args);
return NO_ERROR;
}
-status_t AudioFlinger::MixerThread::dumpTracks(int fd, const Vector<String16>& args)
+status_t AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
- snprintf(buffer, SIZE, "Output %d mixer thread tracks\n", mOutputType);
+ snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
result.append(buffer);
result.append(" Name Clien Typ Fmt Chn Buf S M F SRate LeftV RighV Serv User\n");
for (size_t i = 0; i < mTracks.size(); ++i) {
@@ -966,7 +830,7 @@ status_t AudioFlinger::MixerThread::dumpTracks(int fd, const Vector<String16>& a
}
}
- snprintf(buffer, SIZE, "Output %d mixer thread active tracks\n", mOutputType);
+ snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
result.append(buffer);
result.append(" Name Clien Typ Fmt Chn Buf S M F SRate LeftV RighV Serv User\n");
for (size_t i = 0; i < mActiveTracks.size(); ++i) {
@@ -983,15 +847,13 @@ status_t AudioFlinger::MixerThread::dumpTracks(int fd, const Vector<String16>& a
return NO_ERROR;
}
-status_t AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
+status_t AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
- snprintf(buffer, SIZE, "Output %d mixer thread internals\n", mOutputType);
- result.append(buffer);
- snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
+ snprintf(buffer, SIZE, "Output thread %p internals\n", this);
result.append(buffer);
snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
result.append(buffer);
@@ -1008,238 +870,354 @@ status_t AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>
}
// Thread virtuals
+status_t AudioFlinger::PlaybackThread::readyToRun()
+{
+ if (mSampleRate == 0) {
+ LOGE("No working audio driver found.");
+ return NO_INIT;
+ }
+ LOGI("AudioFlinger's thread %p ready to run", this);
+ return NO_ERROR;
+}
+
+void AudioFlinger::PlaybackThread::onFirstRef()
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+
+ snprintf(buffer, SIZE, "Playback Thread %p", this);
+
+ run(buffer, ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
+sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ int streamType,
+ uint32_t sampleRate,
+ int format,
+ int channelCount,
+ int frameCount,
+ const sp<IMemory>& sharedBuffer,
+ status_t *status)
+{
+ sp<Track> track;
+ status_t lStatus;
+
+ if (mType == DIRECT) {
+ if (sampleRate != mSampleRate || format != mFormat || channelCount != mChannelCount) {
+ LOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelCount %d for output %p",
+ sampleRate, format, channelCount, mOutput);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ } else {
+ // Resampler implementation limits input sampling rate to 2 x output sampling rate.
+ if (sampleRate > mSampleRate*2) {
+ LOGE("Sample rate out of range: %d mSampleRate %d", sampleRate, mSampleRate);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
+
+ if (mOutput == 0) {
+ LOGE("Audio driver not initialized.");
+ lStatus = NO_INIT;
+ goto Exit;
+ }
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+ track = new Track(this, client, streamType, sampleRate, format,
+ channelCount, frameCount, sharedBuffer);
+ if (track->getCblk() == NULL) {
+ lStatus = NO_MEMORY;
+ goto Exit;
+ }
+ mTracks.add(track);
+ }
+ lStatus = NO_ERROR;
+
+Exit:
+ if(status) {
+ *status = lStatus;
+ }
+ return track;
+}
+
+uint32_t AudioFlinger::PlaybackThread::latency() const
+{
+ if (mOutput) {
+ return mOutput->latency();
+ }
+ else {
+ return 0;
+ }
+}
+
+status_t AudioFlinger::PlaybackThread::setMasterVolume(float value)
+{
+ mMasterVolume = value;
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::setMasterMute(bool muted)
+{
+ mMasterMute = muted;
+ return NO_ERROR;
+}
+
+float AudioFlinger::PlaybackThread::masterVolume() const
+{
+ return mMasterVolume;
+}
+
+bool AudioFlinger::PlaybackThread::masterMute() const
+{
+ return mMasterMute;
+}
+
+status_t AudioFlinger::PlaybackThread::setStreamVolume(int stream, float value)
+{
+ mStreamTypes[stream].volume = value;
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::setStreamMute(int stream, bool muted)
+{
+ mStreamTypes[stream].mute = muted;
+ return NO_ERROR;
+}
+
+float AudioFlinger::PlaybackThread::streamVolume(int stream) const
+{
+ return mStreamTypes[stream].volume;
+}
+
+bool AudioFlinger::PlaybackThread::streamMute(int stream) const
+{
+ return mStreamTypes[stream].mute;
+}
+
+bool AudioFlinger::PlaybackThread::isMusicActive() const
+{
+ Mutex::Autolock _l(mLock);
+ size_t count = mActiveTracks.size();
+ for (size_t i = 0 ; i < count ; ++i) {
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t == 0) continue;
+ Track* const track = t.get();
+ if (t->mStreamType == AudioSystem::MUSIC)
+ return true;
+ }
+ return false;
+}
+
+// addTrack_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
+{
+ status_t status = ALREADY_EXISTS;
+
+ // here the track could be either new, or restarted
+ // in both cases "unstop" the track
+ if (track->isPaused()) {
+ track->mState = TrackBase::RESUMING;
+ LOGV("PAUSED => RESUMING (%d)", track->name());
+ } else {
+ track->mState = TrackBase::ACTIVE;
+ LOGV("? => ACTIVE (%d)", track->name());
+ }
+ // set retry count for buffer fill
+ track->mRetryCount = kMaxTrackStartupRetries;
+ if (mActiveTracks.indexOf(track) < 0) {
+ // the track is newly added, make sure it fills up all its
+ // buffers before playing. This is to ensure the client will
+ // effectively get the latency it requested.
+ track->mFillingUpStatus = Track::FS_FILLING;
+ track->mResetDone = false;
+ mActiveTracks.add(track);
+ status = NO_ERROR;
+ }
+
+ LOGV("mWaitWorkCV.broadcast");
+ mWaitWorkCV.broadcast();
+
+ return status;
+}
+
+// destroyTrack_l() must be called with ThreadBase::mLock held
+void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
+{
+ track->mState = TrackBase::TERMINATED;
+ if (mActiveTracks.indexOf(track) < 0) {
+ LOGV("remove track (%d) and delete from mixer", track->name());
+ mTracks.remove(track);
+ deleteTrackName_l(track->name());
+ }
+}
+
+String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
+{
+ return mOutput->getParameters(keys);
+}
+
+void AudioFlinger::PlaybackThread::audioConfigChanged(int event, int param) {
+ AudioSystem::OutputDescriptor desc;
+ void *param2 = 0;
+
+ LOGV("PlaybackThread::audioConfigChanged, thread %p, event %d, param %d", this, event, param);
+
+ switch (event) {
+ case AudioSystem::OUTPUT_OPENED:
+ case AudioSystem::OUTPUT_CONFIG_CHANGED:
+ desc.channels = mChannelCount;
+ desc.samplingRate = mSampleRate;
+ desc.format = mFormat;
+ desc.frameCount = mFrameCount;
+ desc.latency = latency();
+ param2 = &desc;
+ break;
+
+ case AudioSystem::STREAM_CONFIG_CHANGED:
+ param2 = &param;
+ case AudioSystem::OUTPUT_CLOSED:
+ default:
+ break;
+ }
+ mAudioFlinger->audioConfigChanged(event, this, param2);
+}
+
+void AudioFlinger::PlaybackThread::readOutputParameters()
+{
+ mSampleRate = mOutput->sampleRate();
+ mChannelCount = AudioSystem::popCount(mOutput->channels());
+
+ mFormat = mOutput->format();
+ mFrameSize = mOutput->frameSize();
+ mFrameCount = mOutput->bufferSize() / mFrameSize;
+
+ mMinBytesToWrite = (mOutput->latency() * mSampleRate * mFrameSize) / 1000;
+ // FIXME - Current mixer implementation only supports stereo output: Always
+ // Allocate a stereo buffer even if HW output is mono.
+ if (mMixBuffer != NULL) delete mMixBuffer;
+ mMixBuffer = new int16_t[mFrameCount * 2];
+ memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));
+}
+
+// ----------------------------------------------------------------------------
+
+AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output)
+ : PlaybackThread(audioFlinger, output),
+ mAudioMixer(0)
+{
+ mType = PlaybackThread::MIXER;
+ mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
+
+ // FIXME - Current mixer implementation only supports stereo output
+ if (mChannelCount == 1) {
+ LOGE("Invalid audio hardware channel count");
+ }
+}
+
+AudioFlinger::MixerThread::~MixerThread()
+{
+ delete mAudioMixer;
+}
+
bool AudioFlinger::MixerThread::threadLoop()
{
unsigned long sleepTime = kBufferRecoveryInUsecs;
int16_t* curBuf = mMixBuffer;
Vector< sp<Track> > tracksToRemove;
size_t enabledTracks = 0;
- nsecs_t standbyTime = systemTime();
- size_t mixBufferSize = mFrameCount*mChannelCount*sizeof(int16_t);
+ nsecs_t standbyTime = systemTime();
+ size_t mixBufferSize = mFrameCount * mFrameSize;
nsecs_t maxPeriod = seconds(mFrameCount) / mSampleRate * 2;
-#ifdef WITH_A2DP
- bool outputTrackActive = false;
-#endif
+ while (!exitPending())
+ {
+ processConfigEvents();
- do {
enabledTracks = 0;
- { // scope for the AudioFlinger::mLock
-
- Mutex::Autolock _l(mAudioFlinger->mLock);
+ { // scope for mLock
-#ifdef WITH_A2DP
- if (mOutputTrack != NULL && !mAudioFlinger->isA2dpEnabled()) {
- if (outputTrackActive) {
- mAudioFlinger->mLock.unlock();
- mOutputTrack->stop();
- mAudioFlinger->mLock.lock();
- outputTrackActive = false;
- }
+ Mutex::Autolock _l(mLock);
+
+ if (checkForNewParameters_l()) {
+ mixBufferSize = mFrameCount * mFrameSize;
+ maxPeriod = seconds(mFrameCount) / mSampleRate * 2;
}
- mAudioFlinger->checkA2dpEnabledChange_l();
-#endif
const SortedVector< wp<Track> >& activeTracks = mActiveTracks;
// put audio hardware into standby after short delay
- if UNLIKELY(!activeTracks.size() && systemTime() > standbyTime) {
- // wait until we have something to do...
- LOGV("Audio hardware entering standby, output %d\n", mOutputType);
+ if UNLIKELY((!activeTracks.size() && systemTime() > standbyTime) ||
+ mSuspended) {
if (!mStandby) {
+ LOGV("Audio hardware entering standby, mixer %p, mSuspended %d\n", this, mSuspended);
mOutput->standby();
mStandby = true;
+ mBytesWritten = 0;
}
-
-#ifdef WITH_A2DP
- if (outputTrackActive) {
- mAudioFlinger->mLock.unlock();
- mOutputTrack->stop();
- mAudioFlinger->mLock.lock();
- outputTrackActive = false;
- }
-#endif
- if (mOutputType == AudioSystem::AUDIO_OUTPUT_HARDWARE) {
- mAudioFlinger->handleForcedSpeakerRoute(FORCE_ROUTE_RESTORE);
- }
- // we're about to wait, flush the binder command buffer
- IPCThreadState::self()->flushCommands();
- mAudioFlinger->mWaitWorkCV.wait(mAudioFlinger->mLock);
- LOGV("Audio hardware exiting standby, output %d\n", mOutputType);
-
- if (mMasterMute == false) {
- char value[PROPERTY_VALUE_MAX];
- property_get("ro.audio.silent", value, "0");
- if (atoi(value)) {
- LOGD("Silence is golden");
- setMasterMute(true);
- }
- }
-
- standbyTime = systemTime() + kStandbyTimeInNsecs;
- continue;
- }
- // Forced route to speaker is handled by hardware mixer thread
- if (mOutputType == AudioSystem::AUDIO_OUTPUT_HARDWARE) {
- mAudioFlinger->handleForcedSpeakerRoute(CHECK_ROUTE_RESTORE_TIME);
- }
+ if (!activeTracks.size() && mConfigEvents.isEmpty()) {
+ // we're about to wait, flush the binder command buffer
+ IPCThreadState::self()->flushCommands();
- // find out which tracks need to be processed
- size_t count = activeTracks.size();
- for (size_t i=0 ; i<count ; i++) {
- sp<Track> t = activeTracks[i].promote();
- if (t == 0) continue;
+ if (exitPending()) break;
- Track* const track = t.get();
- audio_track_cblk_t* cblk = track->cblk();
+ // wait until we have something to do...
+ LOGV("MixerThread %p TID %d going to sleep\n", this, gettid());
+ mWaitWorkCV.wait(mLock);
+ LOGV("MixerThread %p TID %d waking up\n", this, gettid());
- // The first time a track is added we wait
- // for all its buffers to be filled before processing it
- mAudioMixer->setActiveTrack(track->name());
- if (cblk->framesReady() && (track->isReady() || track->isStopped()) &&
- !track->isPaused())
- {
- //LOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
-
- // compute volume for this track
- int16_t left, right;
- if (track->isMuted() || mMasterMute || track->isPausing()) {
- left = right = 0;
- if (track->isPausing()) {
- LOGV("paused(%d)", track->name());
- track->setPaused();
+ if (mMasterMute == false) {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("ro.audio.silent", value, "0");
+ if (atoi(value)) {
+ LOGD("Silence is golden");
+ setMasterMute(true);
}
- } else {
- float typeVolume = mStreamTypes[track->type()].volume;
- float v = mMasterVolume * typeVolume;
- float v_clamped = v * cblk->volume[0];
- if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
- left = int16_t(v_clamped);
- v_clamped = v * cblk->volume[1];
- if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
- right = int16_t(v_clamped);
}
- // XXX: these things DON'T need to be done each time
- mAudioMixer->setBufferProvider(track);
- mAudioMixer->enable(AudioMixer::MIXING);
-
- int param;
- if ( track->mFillingUpStatus == Track::FS_FILLED) {
- // no ramp for the first volume setting
- track->mFillingUpStatus = Track::FS_ACTIVE;
- if (track->mState == TrackBase::RESUMING) {
- track->mState = TrackBase::ACTIVE;
- param = AudioMixer::RAMP_VOLUME;
- } else {
- param = AudioMixer::VOLUME;
- }
- } else {
- param = AudioMixer::RAMP_VOLUME;
- }
- mAudioMixer->setParameter(param, AudioMixer::VOLUME0, left);
- mAudioMixer->setParameter(param, AudioMixer::VOLUME1, right);
- mAudioMixer->setParameter(
- AudioMixer::TRACK,
- AudioMixer::FORMAT, track->format());
- mAudioMixer->setParameter(
- AudioMixer::TRACK,
- AudioMixer::CHANNEL_COUNT, track->channelCount());
- mAudioMixer->setParameter(
- AudioMixer::RESAMPLE,
- AudioMixer::SAMPLE_RATE,
- int(cblk->sampleRate));
-
- // reset retry count
- track->mRetryCount = kMaxTrackRetries;
- enabledTracks++;
- } else {
- //LOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
- if (track->isStopped()) {
- track->reset();
- }
- if (track->isTerminated() || track->isStopped() || track->isPaused()) {
- // We have consumed all the buffers of this track.
- // Remove it from the list of active tracks.
- LOGV("remove(%d) from active list", track->name());
- tracksToRemove.add(track);
- } else {
- // No buffers for this track. Give it a few chances to
- // fill a buffer, then remove it from active list.
- if (--(track->mRetryCount) <= 0) {
- LOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
- tracksToRemove.add(track);
- }
- }
- // LOGV("disable(%d)", track->name());
- mAudioMixer->disable(AudioMixer::MIXING);
+ standbyTime = systemTime() + kStandbyTimeInNsecs;
+ continue;
}
}
- // remove all the tracks that need to be...
- count = tracksToRemove.size();
- if (UNLIKELY(count)) {
- for (size_t i=0 ; i<count ; i++) {
- const sp<Track>& track = tracksToRemove[i];
- removeActiveTrack_l(track);
- if (track->isTerminated()) {
- mTracks.remove(track);
- deleteTrackName_l(track->mName);
- }
- }
- }
+ enabledTracks = prepareTracks_l(activeTracks, &tracksToRemove);
}
-
+
if (LIKELY(enabledTracks)) {
// mix buffers...
mAudioMixer->process(curBuf);
-#ifdef WITH_A2DP
- if (mOutputTrack != NULL && mAudioFlinger->isA2dpEnabled()) {
- if (!outputTrackActive) {
- LOGV("starting output track in mixer for output %d", mOutputType);
- mOutputTrack->start();
- outputTrackActive = true;
- }
- mOutputTrack->write(curBuf, mFrameCount);
- }
-#endif
-
// output audio to hardware
- mLastWriteTime = systemTime();
- mInWrite = true;
- mOutput->write(curBuf, mixBufferSize);
- mNumWrites++;
- mInWrite = false;
- mStandby = false;
- nsecs_t temp = systemTime();
- standbyTime = temp + kStandbyTimeInNsecs;
- nsecs_t delta = temp - mLastWriteTime;
- if (delta > maxPeriod) {
- LOGW("write blocked for %llu msecs", ns2ms(delta));
- mNumDelayedWrites++;
- }
- sleepTime = kBufferRecoveryInUsecs;
- } else {
-#ifdef WITH_A2DP
- if (mOutputTrack != NULL && mAudioFlinger->isA2dpEnabled()) {
- if (outputTrackActive) {
- mOutputTrack->write(curBuf, 0);
- if (mOutputTrack->bufferQueueEmpty()) {
- mOutputTrack->stop();
- outputTrackActive = false;
- } else {
- standbyTime = systemTime() + kStandbyTimeInNsecs;
- }
+ if (mSuspended) {
+ usleep(kMaxBufferRecoveryInUsecs);
+ } else {
+ mLastWriteTime = systemTime();
+ mInWrite = true;
+ int bytesWritten = (int)mOutput->write(curBuf, mixBufferSize);
+ if (bytesWritten > 0) mBytesWritten += bytesWritten;
+ mNumWrites++;
+ mInWrite = false;
+ mStandby = false;
+ nsecs_t temp = systemTime();
+ standbyTime = temp + kStandbyTimeInNsecs;
+ nsecs_t delta = temp - mLastWriteTime;
+ if (delta > maxPeriod) {
+ LOGW("write blocked for %llu msecs", ns2ms(delta));
+ mNumDelayedWrites++;
}
+ sleepTime = kBufferRecoveryInUsecs;
}
-#endif
+ } else {
// There was nothing to mix this round, which means all
// active tracks were late. Sleep a little bit to give
// them another chance. If we're too late, the audio
// hardware will zero-fill for us.
- //LOGV("no buffers - usleep(%lu)", sleepTime);
+ // LOGV("thread %p no buffers - usleep(%lu)", this, sleepTime);
usleep(sleepTime);
if (sleepTime < kMaxBufferRecoveryInUsecs) {
sleepTime += kBufferRecoveryInUsecs;
@@ -1250,101 +1228,165 @@ bool AudioFlinger::MixerThread::threadLoop()
// since we can't guarantee the destructors won't acquire that
// same lock.
tracksToRemove.clear();
- } while (true);
-
- return false;
-}
+ }
-status_t AudioFlinger::MixerThread::readyToRun()
-{
- if (mSampleRate == 0) {
- LOGE("No working audio driver found.");
- return NO_INIT;
+ if (!mStandby) {
+ mOutput->standby();
}
- LOGI("AudioFlinger's thread ready to run for output %d", mOutputType);
- return NO_ERROR;
+ sendConfigEvent(AudioSystem::OUTPUT_CLOSED);
+ processConfigEvents();
+
+ LOGV("MixerThread %p exiting", this);
+ return false;
}
-void AudioFlinger::MixerThread::onFirstRef()
+// prepareTracks_l() must be called with ThreadBase::mLock held
+size_t AudioFlinger::MixerThread::prepareTracks_l(const SortedVector< wp<Track> >& activeTracks, Vector< sp<Track> > *tracksToRemove)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
-
- snprintf(buffer, SIZE, "Mixer Thread for output %d", mOutputType);
- run(buffer, ANDROID_PRIORITY_URGENT_AUDIO);
-}
+ size_t enabledTracks = 0;
+ // find out which tracks need to be processed
+ size_t count = activeTracks.size();
+ for (size_t i=0 ; i<count ; i++) {
+ sp<Track> t = activeTracks[i].promote();
+ if (t == 0) continue;
-// MixerThread::createTrack_l() must be called with AudioFlinger::mLock held
-sp<AudioFlinger::MixerThread::Track> AudioFlinger::MixerThread::createTrack_l(
- const sp<AudioFlinger::Client>& client,
- int streamType,
- uint32_t sampleRate,
- int format,
- int channelCount,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- status_t *status)
-{
- sp<Track> track;
- status_t lStatus;
-
- // Resampler implementation limits input sampling rate to 2 x output sampling rate.
- if (sampleRate > mSampleRate*2) {
- LOGE("Sample rate out of range: %d mSampleRate %d", sampleRate, mSampleRate);
- lStatus = BAD_VALUE;
- goto Exit;
- }
+ Track* const track = t.get();
+ audio_track_cblk_t* cblk = track->cblk();
+ // The first time a track is added we wait
+ // for all its buffers to be filled before processing it
+ mAudioMixer->setActiveTrack(track->name());
+ if (cblk->framesReady() && (track->isReady() || track->isStopped()) &&
+ !track->isPaused())
+ {
+ //LOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
+
+ // compute volume for this track
+ int16_t left, right;
+ if (track->isMuted() || mMasterMute || track->isPausing() ||
+ mStreamTypes[track->type()].mute) {
+ left = right = 0;
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ } else {
+ float typeVolume = mStreamTypes[track->type()].volume;
+ float v = mMasterVolume * typeVolume;
+ float v_clamped = v * cblk->volume[0];
+ if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+ left = int16_t(v_clamped);
+ v_clamped = v * cblk->volume[1];
+ if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+ right = int16_t(v_clamped);
+ }
- if (mSampleRate == 0) {
- LOGE("Audio driver not initialized.");
- lStatus = NO_INIT;
- goto Exit;
+ // XXX: these things DON'T need to be done each time
+ mAudioMixer->setBufferProvider(track);
+ mAudioMixer->enable(AudioMixer::MIXING);
+
+ int param;
+ if ( track->mFillingUpStatus == Track::FS_FILLED) {
+ // no ramp for the first volume setting
+ track->mFillingUpStatus = Track::FS_ACTIVE;
+ if (track->mState == TrackBase::RESUMING) {
+ track->mState = TrackBase::ACTIVE;
+ param = AudioMixer::RAMP_VOLUME;
+ } else {
+ param = AudioMixer::VOLUME;
+ }
+ } else {
+ param = AudioMixer::RAMP_VOLUME;
+ }
+ mAudioMixer->setParameter(param, AudioMixer::VOLUME0, left);
+ mAudioMixer->setParameter(param, AudioMixer::VOLUME1, right);
+ mAudioMixer->setParameter(
+ AudioMixer::TRACK,
+ AudioMixer::FORMAT, track->format());
+ mAudioMixer->setParameter(
+ AudioMixer::TRACK,
+ AudioMixer::CHANNEL_COUNT, track->channelCount());
+ mAudioMixer->setParameter(
+ AudioMixer::RESAMPLE,
+ AudioMixer::SAMPLE_RATE,
+ int(cblk->sampleRate));
+
+ // reset retry count
+ track->mRetryCount = kMaxTrackRetries;
+ enabledTracks++;
+ } else {
+ //LOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
+ if (track->isStopped()) {
+ track->reset();
+ }
+ if (track->isTerminated() || track->isStopped() || track->isPaused()) {
+ // We have consumed all the buffers of this track.
+ // Remove it from the list of active tracks.
+ tracksToRemove->add(track);
+ mAudioMixer->disable(AudioMixer::MIXING);
+ } else {
+ // No buffers for this track. Give it a few chances to
+ // fill a buffer, then remove it from active list.
+ if (--(track->mRetryCount) <= 0) {
+ LOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
+ tracksToRemove->add(track);
+ }
+ // For tracks using static shared memry buffer, make sure that we have
+ // written enough data to audio hardware before disabling the track
+ // NOTE: this condition with arrive before track->mRetryCount <= 0 so we
+ // don't care about code removing track from active list above.
+ if ((track->mSharedBuffer == 0) || (mBytesWritten >= mMinBytesToWrite)) {
+ mAudioMixer->disable(AudioMixer::MIXING);
+ } else {
+ enabledTracks++;
+ }
+ }
+ }
}
- track = new Track(this, client, streamType, sampleRate, format,
- channelCount, frameCount, sharedBuffer);
- if (track->getCblk() == NULL) {
- lStatus = NO_MEMORY;
- goto Exit;
+ // remove all the tracks that need to be...
+ count = tracksToRemove->size();
+ if (UNLIKELY(count)) {
+ for (size_t i=0 ; i<count ; i++) {
+ const sp<Track>& track = tracksToRemove->itemAt(i);
+ mActiveTracks.remove(track);
+ if (track->isTerminated()) {
+ mTracks.remove(track);
+ deleteTrackName_l(track->mName);
+ }
+ }
}
- mTracks.add(track);
- lStatus = NO_ERROR;
-Exit:
- if(status) {
- *status = lStatus;
- }
- return track;
+ return enabledTracks;
}
-// getTracks_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::MixerThread::getTracks_l(
+void AudioFlinger::MixerThread::getTracks(
SortedVector < sp<Track> >& tracks,
- SortedVector < wp<Track> >& activeTracks)
+ SortedVector < wp<Track> >& activeTracks,
+ int streamType)
{
+ LOGV ("MixerThread::getTracks() mixer %p, mTracks.size %d, mActiveTracks.size %d", this, mTracks.size(), mActiveTracks.size());
+ Mutex::Autolock _l(mLock);
size_t size = mTracks.size();
- LOGV ("MixerThread::getTracks_l() for output %d, mTracks.size %d, mActiveTracks.size %d", mOutputType, mTracks.size(), mActiveTracks.size());
for (size_t i = 0; i < size; i++) {
sp<Track> t = mTracks[i];
- if (AudioSystem::routedToA2dpOutput(t->mStreamType)) {
+ if (t->type() == streamType) {
tracks.add(t);
int j = mActiveTracks.indexOf(t);
if (j >= 0) {
t = mActiveTracks[j].promote();
if (t != NULL) {
- activeTracks.add(t);
- }
+ activeTracks.add(t);
+ }
}
}
}
size = activeTracks.size();
for (size_t i = 0; i < size; i++) {
- removeActiveTrack_l(activeTracks[i]);
+ mActiveTracks.remove(activeTracks[i]);
}
-
+
size = tracks.size();
for (size_t i = 0; i < size; i++) {
sp<Track> t = tracks[i];
@@ -1353,219 +1395,554 @@ void AudioFlinger::MixerThread::getTracks_l(
}
}
-// putTracks_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::MixerThread::putTracks_l(
+void AudioFlinger::MixerThread::putTracks(
SortedVector < sp<Track> >& tracks,
SortedVector < wp<Track> >& activeTracks)
{
-
- LOGV ("MixerThread::putTracks_l() for output %d, tracks.size %d, activeTracks.size %d", mOutputType, tracks.size(), activeTracks.size());
-
+ LOGV ("MixerThread::putTracks() mixer %p, tracks.size %d, activeTracks.size %d", this, tracks.size(), activeTracks.size());
+ Mutex::Autolock _l(mLock);
size_t size = tracks.size();
for (size_t i = 0; i < size ; i++) {
sp<Track> t = tracks[i];
int name = getTrackName_l();
if (name < 0) return;
-
+
t->mName = name;
- t->mMixerThread = this;
+ t->mThread = this;
mTracks.add(t);
int j = activeTracks.indexOf(t);
if (j >= 0) {
- addActiveTrack_l(t);
- }
+ mActiveTracks.add(t);
+ }
}
}
-uint32_t AudioFlinger::MixerThread::sampleRate() const
-{
- return mSampleRate;
-}
-
-int AudioFlinger::MixerThread::channelCount() const
+// getTrackName_l() must be called with ThreadBase::mLock held
+int AudioFlinger::MixerThread::getTrackName_l()
{
- return mChannelCount;
+ return mAudioMixer->getTrackName();
}
-int AudioFlinger::MixerThread::format() const
+// deleteTrackName_l() must be called with ThreadBase::mLock held
+void AudioFlinger::MixerThread::deleteTrackName_l(int name)
{
- return mFormat;
+ mAudioMixer->deleteTrackName(name);
}
-size_t AudioFlinger::MixerThread::frameCount() const
+// checkForNewParameters_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::MixerThread::checkForNewParameters_l()
{
- return mFrameCount;
-}
+ bool reconfig = false;
-uint32_t AudioFlinger::MixerThread::latency() const
-{
- if (mOutput) {
- return mOutput->latency();
- }
- else {
- return 0;
+ if (mNewParameters != "") {
+ status_t status = NO_ERROR;
+ AudioParameter param = AudioParameter(mNewParameters);
+ int value;
+ if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
+ if (value != AudioSystem::PCM_16_BIT) {
+ status = BAD_VALUE;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
+ if (value != AudioSystem::CHANNEL_OUT_STEREO) {
+ status = BAD_VALUE;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
+ // do not accept frame count changes if tracks are open as the track buffer
+ // size depends on frame count and correct behavior would not be garantied
+ // if frame count is changed after track creation
+ if (!mTracks.isEmpty()) {
+ status = INVALID_OPERATION;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (status == NO_ERROR) {
+ status = mOutput->setParameters(mNewParameters);
+ if (!mStandby && status == INVALID_OPERATION) {
+ mOutput->standby();
+ mStandby = true;
+ mBytesWritten = 0;
+ status = mOutput->setParameters(mNewParameters);
+ }
+ if (status == NO_ERROR && reconfig) {
+ delete mAudioMixer;
+ readOutputParameters();
+ mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
+ for (size_t i = 0; i < mTracks.size() ; i++) {
+ int name = getTrackName_l();
+ if (name < 0) break;
+ mTracks[i]->mName = name;
+ }
+ sendConfigEvent(AudioSystem::OUTPUT_CONFIG_CHANGED);
+ }
+ }
+ mParamStatus = status;
+ mNewParameters = "";
+ mParamCond.signal();
}
+ return reconfig;
}
-status_t AudioFlinger::MixerThread::setMasterVolume(float value)
+status_t AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
{
- mMasterVolume = value;
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ PlaybackThread::dumpInternals(fd, args);
+
+ snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
+ result.append(buffer);
+ write(fd, result.string(), result.size());
return NO_ERROR;
}
-status_t AudioFlinger::MixerThread::setMasterMute(bool muted)
+// ----------------------------------------------------------------------------
+AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output)
+ : PlaybackThread(audioFlinger, output),
+ mLeftVolume (1.0), mRightVolume(1.0)
{
- mMasterMute = muted;
- return NO_ERROR;
+ mType = PlaybackThread::DIRECT;
}
-float AudioFlinger::MixerThread::masterVolume() const
+AudioFlinger::DirectOutputThread::~DirectOutputThread()
{
- return mMasterVolume;
}
-bool AudioFlinger::MixerThread::masterMute() const
+
+bool AudioFlinger::DirectOutputThread::threadLoop()
{
- return mMasterMute;
+ unsigned long sleepTime = kBufferRecoveryInUsecs;
+ sp<Track> trackToRemove;
+ sp<Track> activeTrack;
+ nsecs_t standbyTime = systemTime();
+ int8_t *curBuf;
+ size_t mixBufferSize = mFrameCount*mFrameSize;
+
+ while (!exitPending())
+ {
+ processConfigEvents();
+
+ { // scope for the mLock
+
+ Mutex::Autolock _l(mLock);
+
+ if (checkForNewParameters_l()) {
+ mixBufferSize = mFrameCount*mFrameSize;
+ }
+
+ // put audio hardware into standby after short delay
+ if UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
+ mSuspended) {
+ // wait until we have something to do...
+ if (!mStandby) {
+ LOGV("Audio hardware entering standby, mixer %p\n", this);
+ mOutput->standby();
+ mStandby = true;
+ mBytesWritten = 0;
+ }
+
+ if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
+ // we're about to wait, flush the binder command buffer
+ IPCThreadState::self()->flushCommands();
+
+ if (exitPending()) break;
+
+ LOGV("DirectOutputThread %p TID %d going to sleep\n", this, gettid());
+ mWaitWorkCV.wait(mLock);
+ LOGV("DirectOutputThread %p TID %d waking up in active mode\n", this, gettid());
+
+ if (mMasterMute == false) {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("ro.audio.silent", value, "0");
+ if (atoi(value)) {
+ LOGD("Silence is golden");
+ setMasterMute(true);
+ }
+ }
+
+ standbyTime = systemTime() + kStandbyTimeInNsecs;
+ continue;
+ }
+ }
+
+ // find out which tracks need to be processed
+ if (mActiveTracks.size() != 0) {
+ sp<Track> t = mActiveTracks[0].promote();
+ if (t == 0) continue;
+
+ Track* const track = t.get();
+ audio_track_cblk_t* cblk = track->cblk();
+
+ // The first time a track is added we wait
+ // for all its buffers to be filled before processing it
+ if (cblk->framesReady() && (track->isReady() || track->isStopped()) &&
+ !track->isPaused())
+ {
+ //LOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
+
+ // compute volume for this track
+ float left, right;
+ if (track->isMuted() || mMasterMute || track->isPausing() ||
+ mStreamTypes[track->type()].mute) {
+ left = right = 0;
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ } else {
+ float typeVolume = mStreamTypes[track->type()].volume;
+ float v = mMasterVolume * typeVolume;
+ float v_clamped = v * cblk->volume[0];
+ if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+ left = v_clamped/MAX_GAIN;
+ v_clamped = v * cblk->volume[1];
+ if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+ right = v_clamped/MAX_GAIN;
+ }
+
+ if (left != mLeftVolume || right != mRightVolume) {
+ mOutput->setVolume(left, right);
+ left = mLeftVolume;
+ right = mRightVolume;
+ }
+
+ if (track->mFillingUpStatus == Track::FS_FILLED) {
+ track->mFillingUpStatus = Track::FS_ACTIVE;
+ if (track->mState == TrackBase::RESUMING) {
+ track->mState = TrackBase::ACTIVE;
+ }
+ }
+
+ // reset retry count
+ track->mRetryCount = kMaxTrackRetries;
+ activeTrack = t;
+ } else {
+ //LOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
+ if (track->isStopped()) {
+ track->reset();
+ }
+ if (track->isTerminated() || track->isStopped() || track->isPaused()) {
+ // We have consumed all the buffers of this track.
+ // Remove it from the list of active tracks.
+ trackToRemove = track;
+ } else {
+ // No buffers for this track. Give it a few chances to
+ // fill a buffer, then remove it from active list.
+ if (--(track->mRetryCount) <= 0) {
+ LOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
+ trackToRemove = track;
+ }
+
+ // For tracks using static shared memry buffer, make sure that we have
+ // written enough data to audio hardware before disabling the track
+ // NOTE: this condition with arrive before track->mRetryCount <= 0 so we
+ // don't care about code removing track from active list above.
+ if ((track->mSharedBuffer != 0) && (mBytesWritten < mMinBytesToWrite)) {
+ activeTrack = t;
+ }
+ }
+ }
+ }
+
+ // remove all the tracks that need to be...
+ if (UNLIKELY(trackToRemove != 0)) {
+ mActiveTracks.remove(trackToRemove);
+ if (trackToRemove->isTerminated()) {
+ mTracks.remove(trackToRemove);
+ deleteTrackName_l(trackToRemove->mName);
+ }
+ }
+ }
+
+ if (activeTrack != 0) {
+ AudioBufferProvider::Buffer buffer;
+ size_t frameCount = mFrameCount;
+ curBuf = (int8_t *)mMixBuffer;
+ // output audio to hardware
+ mLastWriteTime = systemTime();
+ mInWrite = true;
+ while(frameCount) {
+ buffer.frameCount = frameCount;
+ activeTrack->getNextBuffer(&buffer);
+ if (UNLIKELY(buffer.raw == 0)) {
+ memset(curBuf, 0, frameCount * mFrameSize);
+ break;
+ }
+ memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
+ frameCount -= buffer.frameCount;
+ curBuf += buffer.frameCount * mFrameSize;
+ activeTrack->releaseBuffer(&buffer);
+ }
+ if (mSuspended) {
+ usleep(kMaxBufferRecoveryInUsecs);
+ } else {
+ int bytesWritten = (int)mOutput->write(mMixBuffer, mixBufferSize);
+ if (bytesWritten) mBytesWritten += bytesWritten;
+ mNumWrites++;
+ mInWrite = false;
+ mStandby = false;
+ nsecs_t temp = systemTime();
+ standbyTime = temp + kStandbyTimeInNsecs;
+ sleepTime = kBufferRecoveryInUsecs;
+ }
+ } else {
+ // There was nothing to mix this round, which means all
+ // active tracks were late. Sleep a little bit to give
+ // them another chance. If we're too late, the audio
+ // hardware will zero-fill for us.
+ //LOGV("no buffers - usleep(%lu)", sleepTime);
+ usleep(sleepTime);
+ if (sleepTime < kMaxBufferRecoveryInUsecs) {
+ sleepTime += kBufferRecoveryInUsecs;
+ }
+ }
+
+ // finally let go of removed track, without the lock held
+ // since we can't guarantee the destructors won't acquire that
+ // same lock.
+ trackToRemove.clear();
+ activeTrack.clear();
+ }
+
+ if (!mStandby) {
+ mOutput->standby();
+ }
+ sendConfigEvent(AudioSystem::OUTPUT_CLOSED);
+ processConfigEvents();
+
+ LOGV("DirectOutputThread %p exiting", this);
+ return false;
}
-status_t AudioFlinger::MixerThread::setStreamVolume(int stream, float value)
+// getTrackName_l() must be called with ThreadBase::mLock held
+int AudioFlinger::DirectOutputThread::getTrackName_l()
{
- mStreamTypes[stream].volume = value;
- return NO_ERROR;
+ return 0;
}
-status_t AudioFlinger::MixerThread::setStreamMute(int stream, bool muted)
+// deleteTrackName_l() must be called with ThreadBase::mLock held
+void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name)
{
- mStreamTypes[stream].mute = muted;
- return NO_ERROR;
}
-float AudioFlinger::MixerThread::streamVolume(int stream) const
+// checkForNewParameters_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::DirectOutputThread::checkForNewParameters_l()
{
- return mStreamTypes[stream].volume;
+ bool reconfig = false;
+
+ if (mNewParameters != "") {
+ status_t status = NO_ERROR;
+ AudioParameter param = AudioParameter(mNewParameters);
+ int value;
+ if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
+ // do not accept frame count changes if tracks are open as the track buffer
+ // size depends on frame count and correct behavior would not be garantied
+ // if frame count is changed after track creation
+ if (!mTracks.isEmpty()) {
+ status = INVALID_OPERATION;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (status == NO_ERROR) {
+ status = mOutput->setParameters(mNewParameters);
+ if (!mStandby && status == INVALID_OPERATION) {
+ mOutput->standby();
+ mStandby = true;
+ mBytesWritten = 0;
+ status = mOutput->setParameters(mNewParameters);
+ }
+ if (status == NO_ERROR && reconfig) {
+ readOutputParameters();
+ sendConfigEvent(AudioSystem::OUTPUT_CONFIG_CHANGED);
+ }
+ }
+ mParamStatus = status;
+ mNewParameters = "";
+ mParamCond.signal();
+ }
+ return reconfig;
}
-bool AudioFlinger::MixerThread::streamMute(int stream) const
+// ----------------------------------------------------------------------------
+
+AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, AudioFlinger::MixerThread* mainThread)
+ : MixerThread(audioFlinger, mainThread->getOutput())
{
- return mStreamTypes[stream].mute;
+ mType = PlaybackThread::DUPLICATING;
+ addOutputTrack(mainThread);
}
-// isMusicActive_l() must be called with AudioFlinger::mLock held
-bool AudioFlinger::MixerThread::isMusicActive_l() const
+AudioFlinger::DuplicatingThread::~DuplicatingThread()
{
- size_t count = mActiveTracks.size();
- for (size_t i = 0 ; i < count ; ++i) {
- sp<Track> t = mActiveTracks[i].promote();
- if (t == 0) continue;
- Track* const track = t.get();
- if (t->mStreamType == AudioSystem::MUSIC)
- return true;
- }
- return false;
+ mOutputTracks.clear();
}
-// addTrack_l() must be called with AudioFlinger::mLock held
-status_t AudioFlinger::MixerThread::addTrack_l(const sp<Track>& track)
+bool AudioFlinger::DuplicatingThread::threadLoop()
{
- status_t status = ALREADY_EXISTS;
+ unsigned long sleepTime = kBufferRecoveryInUsecs;
+ int16_t* curBuf = mMixBuffer;
+ Vector< sp<Track> > tracksToRemove;
+ size_t enabledTracks = 0;
+ nsecs_t standbyTime = systemTime();
+ size_t mixBufferSize = mFrameCount*mFrameSize;
+ SortedVector< sp<OutputTrack> > outputTracks;
- // here the track could be either new, or restarted
- // in both cases "unstop" the track
- if (track->isPaused()) {
- track->mState = TrackBase::RESUMING;
- LOGV("PAUSED => RESUMING (%d)", track->name());
- } else {
- track->mState = TrackBase::ACTIVE;
- LOGV("? => ACTIVE (%d)", track->name());
- }
- // set retry count for buffer fill
- track->mRetryCount = kMaxTrackStartupRetries;
- if (mActiveTracks.indexOf(track) < 0) {
- // the track is newly added, make sure it fills up all its
- // buffers before playing. This is to ensure the client will
- // effectively get the latency it requested.
- track->mFillingUpStatus = Track::FS_FILLING;
- track->mResetDone = false;
- addActiveTrack_l(track);
- status = NO_ERROR;
- }
-
- LOGV("mWaitWorkCV.broadcast");
- mAudioFlinger->mWaitWorkCV.broadcast();
+ while (!exitPending())
+ {
+ processConfigEvents();
- return status;
-}
+ enabledTracks = 0;
+ { // scope for the mLock
-// destroyTrack_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::MixerThread::destroyTrack_l(const sp<Track>& track)
-{
- track->mState = TrackBase::TERMINATED;
- if (mActiveTracks.indexOf(track) < 0) {
- LOGV("remove track (%d) and delete from mixer", track->name());
- mTracks.remove(track);
- deleteTrackName_l(track->name());
- }
-}
+ Mutex::Autolock _l(mLock);
-// addActiveTrack_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::MixerThread::addActiveTrack_l(const wp<Track>& t)
-{
- mActiveTracks.add(t);
+ if (checkForNewParameters_l()) {
+ mixBufferSize = mFrameCount*mFrameSize;
+ }
- // Force routing to speaker for certain stream types
- // The forced routing to speaker is managed by hardware mixer
- if (mOutputType == AudioSystem::AUDIO_OUTPUT_HARDWARE) {
- sp<Track> track = t.promote();
- if (track == NULL) return;
-
- if (streamForcedToSpeaker(track->type())) {
- mAudioFlinger->handleForcedSpeakerRoute(ACTIVE_TRACK_ADDED);
- }
- }
-}
+ const SortedVector< wp<Track> >& activeTracks = mActiveTracks;
-// removeActiveTrack_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::MixerThread::removeActiveTrack_l(const wp<Track>& t)
-{
- mActiveTracks.remove(t);
+ for (size_t i = 0; i < mOutputTracks.size(); i++) {
+ outputTracks.add(mOutputTracks[i]);
+ }
- // Force routing to speaker for certain stream types
- // The forced routing to speaker is managed by hardware mixer
- if (mOutputType == AudioSystem::AUDIO_OUTPUT_HARDWARE) {
- sp<Track> track = t.promote();
- if (track == NULL) return;
+ // put audio hardware into standby after short delay
+ if UNLIKELY((!activeTracks.size() && systemTime() > standbyTime) ||
+ mSuspended) {
+ if (!mStandby) {
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ mLock.unlock();
+ outputTracks[i]->stop();
+ mLock.lock();
+ }
+ mStandby = true;
+ mBytesWritten = 0;
+ }
- if (streamForcedToSpeaker(track->type())) {
- mAudioFlinger->handleForcedSpeakerRoute(ACTIVE_TRACK_REMOVED);
+ if (!activeTracks.size() && mConfigEvents.isEmpty()) {
+ // we're about to wait, flush the binder command buffer
+ IPCThreadState::self()->flushCommands();
+ outputTracks.clear();
+
+ if (exitPending()) break;
+
+ LOGV("DuplicatingThread %p TID %d going to sleep\n", this, gettid());
+ mWaitWorkCV.wait(mLock);
+ LOGV("DuplicatingThread %p TID %d waking up\n", this, gettid());
+ if (mMasterMute == false) {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("ro.audio.silent", value, "0");
+ if (atoi(value)) {
+ LOGD("Silence is golden");
+ setMasterMute(true);
+ }
+ }
+
+ standbyTime = systemTime() + kStandbyTimeInNsecs;
+ sleepTime = kBufferRecoveryInUsecs;
+ continue;
+ }
+ }
+
+ enabledTracks = prepareTracks_l(activeTracks, &tracksToRemove);
+ }
+
+ bool mustSleep = true;
+ if (LIKELY(enabledTracks)) {
+ // mix buffers...
+ mAudioMixer->process(curBuf);
+ if (!mSuspended) {
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ outputTracks[i]->write(curBuf, mFrameCount);
+ }
+ mStandby = false;
+ mustSleep = false;
+ mBytesWritten += mixBufferSize;
+ }
+ } else {
+ // flush remaining overflow buffers in output tracks
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ if (outputTracks[i]->isActive()) {
+ outputTracks[i]->write(curBuf, 0);
+ standbyTime = systemTime() + kStandbyTimeInNsecs;
+ mustSleep = false;
+ }
+ }
}
+ if (mustSleep) {
+// LOGV("threadLoop() sleeping %d", sleepTime);
+ usleep(sleepTime);
+ if (sleepTime < kMaxBufferRecoveryInUsecs) {
+ sleepTime += kBufferRecoveryInUsecs;
+ }
+ } else {
+ sleepTime = kBufferRecoveryInUsecs;
+ }
+
+ // finally let go of all our tracks, without the lock held
+ // since we can't guarantee the destructors won't acquire that
+ // same lock.
+ tracksToRemove.clear();
+ outputTracks.clear();
}
-}
-// getTrackName_l() must be called with AudioFlinger::mLock held
-int AudioFlinger::MixerThread::getTrackName_l()
-{
- return mAudioMixer->getTrackName();
+ if (!mStandby) {
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ mLock.unlock();
+ outputTracks[i]->stop();
+ mLock.lock();
+ }
+ }
+
+ sendConfigEvent(AudioSystem::OUTPUT_CLOSED);
+ processConfigEvents();
+
+ return false;
}
-// deleteTrackName_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::MixerThread::deleteTrackName_l(int name)
+void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
{
- mAudioMixer->deleteTrackName(name);
+ int frameCount = (3 * mFrameCount * mSampleRate) / thread->sampleRate();
+ OutputTrack *outputTrack = new OutputTrack((ThreadBase *)thread,
+ mSampleRate,
+ mFormat,
+ mChannelCount,
+ frameCount);
+ thread->setStreamVolume(AudioSystem::NUM_STREAM_TYPES, 1.0f);
+ mOutputTracks.add(outputTrack);
+ LOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
}
-size_t AudioFlinger::MixerThread::getOutputFrameCount()
+void AudioFlinger::DuplicatingThread::removeOutputTrack(MixerThread *thread)
{
- return mOutput->bufferSize() / mOutput->channelCount() / sizeof(int16_t);
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mOutputTracks.size(); i++) {
+ if (mOutputTracks[i]->thread() == (ThreadBase *)thread) {
+ mOutputTracks.removeAt(i);
+ return;
+ }
+ }
+ LOGV("removeOutputTrack(): unkonwn thread: %p", thread);
}
+
// ----------------------------------------------------------------------------
// TrackBase constructor must be called with AudioFlinger::mLock held
-AudioFlinger::MixerThread::TrackBase::TrackBase(
- const sp<MixerThread>& mixerThread,
+AudioFlinger::ThreadBase::TrackBase::TrackBase(
+ const wp<ThreadBase>& thread,
const sp<Client>& client,
uint32_t sampleRate,
int format,
@@ -1574,7 +1951,7 @@ AudioFlinger::MixerThread::TrackBase::TrackBase(
uint32_t flags,
const sp<IMemory>& sharedBuffer)
: RefBase(),
- mMixerThread(mixerThread),
+ mThread(thread),
mClient(client),
mFrameCount(0),
mState(IDLE),
@@ -1582,13 +1959,6 @@ AudioFlinger::MixerThread::TrackBase::TrackBase(
mFormat(format),
mFlags(flags & ~SYSTEM_FLAGS_MASK)
{
- mName = mixerThread->getTrackName_l();
- LOGV("TrackBase contructor name %d, calling thread %d", mName, IPCThreadState::self()->getCallingPid());
- if (mName < 0) {
- LOGE("no more track names availlable");
- return;
- }
-
LOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
// LOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
@@ -1642,16 +2012,19 @@ AudioFlinger::MixerThread::TrackBase::TrackBase(
}
}
-AudioFlinger::MixerThread::TrackBase::~TrackBase()
+AudioFlinger::PlaybackThread::TrackBase::~TrackBase()
{
if (mCblk) {
- mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
+ mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
+ if (mClient == NULL) {
+ delete mCblk;
+ }
}
mCblkMemory.clear(); // and free the shared memory
mClient.clear();
}
-void AudioFlinger::MixerThread::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+void AudioFlinger::PlaybackThread::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
{
buffer->raw = 0;
mFrameCount = buffer->frameCount;
@@ -1659,7 +2032,7 @@ void AudioFlinger::MixerThread::TrackBase::releaseBuffer(AudioBufferProvider::Bu
buffer->frameCount = 0;
}
-bool AudioFlinger::MixerThread::TrackBase::step() {
+bool AudioFlinger::PlaybackThread::TrackBase::step() {
bool result;
audio_track_cblk_t* cblk = this->cblk();
@@ -1671,7 +2044,7 @@ bool AudioFlinger::MixerThread::TrackBase::step() {
return result;
}
-void AudioFlinger::MixerThread::TrackBase::reset() {
+void AudioFlinger::PlaybackThread::TrackBase::reset() {
audio_track_cblk_t* cblk = this->cblk();
cblk->user = 0;
@@ -1682,27 +2055,27 @@ void AudioFlinger::MixerThread::TrackBase::reset() {
LOGV("TrackBase::reset");
}
-sp<IMemory> AudioFlinger::MixerThread::TrackBase::getCblk() const
+sp<IMemory> AudioFlinger::PlaybackThread::TrackBase::getCblk() const
{
return mCblkMemory;
}
-int AudioFlinger::MixerThread::TrackBase::sampleRate() const {
+int AudioFlinger::PlaybackThread::TrackBase::sampleRate() const {
return (int)mCblk->sampleRate;
}
-int AudioFlinger::MixerThread::TrackBase::channelCount() const {
+int AudioFlinger::PlaybackThread::TrackBase::channelCount() const {
return (int)mCblk->channels;
}
-void* AudioFlinger::MixerThread::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
+void* AudioFlinger::PlaybackThread::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
audio_track_cblk_t* cblk = this->cblk();
- int16_t *bufferStart = (int16_t *)mBuffer + (offset-cblk->serverBase)*cblk->channels;
- int16_t *bufferEnd = bufferStart + frames * cblk->channels;
+ int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*cblk->frameSize;
+ int8_t *bufferEnd = bufferStart + frames * cblk->frameSize;
// Check validity of returned pointer in case the track control block would have been corrupted.
- if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd ||
- (cblk->channels == 2 && ((unsigned long)bufferStart & 3))) {
+ if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd ||
+ ((unsigned long)bufferStart & (unsigned long)(cblk->frameSize - 1))) {
LOGE("TrackBase::getBuffer buffer out of range:\n start: %p, end %p , mBuffer %p mBufferEnd %p\n \
server %d, serverBase %d, user %d, userBase %d, channels %d",
bufferStart, bufferEnd, mBuffer, mBufferEnd,
@@ -1715,9 +2088,9 @@ void* AudioFlinger::MixerThread::TrackBase::getBuffer(uint32_t offset, uint32_t
// ----------------------------------------------------------------------------
-// Track constructor must be called with AudioFlinger::mLock held
-AudioFlinger::MixerThread::Track::Track(
- const sp<MixerThread>& mixerThread,
+// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
+AudioFlinger::PlaybackThread::Track::Track(
+ const wp<ThreadBase>& thread,
const sp<Client>& client,
int streamType,
uint32_t sampleRate,
@@ -1725,40 +2098,58 @@ AudioFlinger::MixerThread::Track::Track(
int channelCount,
int frameCount,
const sp<IMemory>& sharedBuffer)
- : TrackBase(mixerThread, client, sampleRate, format, channelCount, frameCount, 0, sharedBuffer)
+ : TrackBase(thread, client, sampleRate, format, channelCount, frameCount, 0, sharedBuffer),
+ mMute(false), mSharedBuffer(sharedBuffer), mName(-1)
{
+ sp<ThreadBase> baseThread = thread.promote();
+ if (baseThread != 0) {
+ PlaybackThread *playbackThread = (PlaybackThread *)baseThread.get();
+ mName = playbackThread->getTrackName_l();
+ }
+ LOGV("Track constructor name %d, calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+ if (mName < 0) {
+ LOGE("no more track names available");
+ }
mVolume[0] = 1.0f;
mVolume[1] = 1.0f;
- mMute = false;
- mSharedBuffer = sharedBuffer;
mStreamType = streamType;
+ // NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
+ // 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
+ mCblk->frameSize = AudioSystem::isLinearPCM(format) ? channelCount * sizeof(int16_t) : sizeof(int8_t);
}
-AudioFlinger::MixerThread::Track::~Track()
+AudioFlinger::PlaybackThread::Track::~Track()
{
- wp<Track> weak(this); // never create a strong ref from the dtor
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- mState = TERMINATED;
+ LOGV("PlaybackThread::Track destructor");
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ mState = TERMINATED;
+ }
}
-void AudioFlinger::MixerThread::Track::destroy()
+void AudioFlinger::PlaybackThread::Track::destroy()
{
- // NOTE: destroyTrack_l() can remove a strong reference to this Track
+ // NOTE: destroyTrack_l() can remove a strong reference to this Track
// by removing it from mTracks vector, so there is a risk that this Tracks's
- // desctructor is called. As the destructor needs to lock AudioFlinger::mLock,
- // we must acquire a strong reference on this Track before locking AudioFlinger::mLock
+ // desctructor is called. As the destructor needs to lock mLock,
+ // we must acquire a strong reference on this Track before locking mLock
// here so that the destructor is called only when exiting this function.
- // On the other hand, as long as Track::destroy() is only called by
- // TrackHandle destructor, the TrackHandle still holds a strong ref on
+ // On the other hand, as long as Track::destroy() is only called by
+ // TrackHandle destructor, the TrackHandle still holds a strong ref on
// this Track with its member mTrack.
sp<Track> keep(this);
- { // scope for AudioFlinger::mLock
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- mMixerThread->destroyTrack_l(this);
+ { // scope for mLock
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ playbackThread->destroyTrack_l(this);
+ }
}
}
-void AudioFlinger::MixerThread::Track::dump(char* buffer, size_t size)
+void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
{
snprintf(buffer, size, " %5d %5d %3u %3u %3u %3u %1d %1d %1d %5u %5u %5u %04x %04x\n",
mName - AudioMixer::TRACK0,
@@ -1777,7 +2168,7 @@ void AudioFlinger::MixerThread::Track::dump(char* buffer, size_t size)
mCblk->user);
}
-status_t AudioFlinger::MixerThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesReady;
@@ -1814,76 +2205,90 @@ status_t AudioFlinger::MixerThread::Track::getNextBuffer(AudioBufferProvider::Bu
getNextBuffer_exit:
buffer->raw = 0;
buffer->frameCount = 0;
+ LOGV("getNextBuffer() no more data");
return NOT_ENOUGH_DATA;
}
-bool AudioFlinger::MixerThread::Track::isReady() const {
+bool AudioFlinger::PlaybackThread::Track::isReady() const {
if (mFillingUpStatus != FS_FILLING) return true;
if (mCblk->framesReady() >= mCblk->frameCount ||
mCblk->forceReady) {
mFillingUpStatus = FS_FILLED;
mCblk->forceReady = 0;
- LOGV("Track::isReady() track %d for output %d", mName, mMixerThread->mOutputType);
return true;
}
return false;
}
-status_t AudioFlinger::MixerThread::Track::start()
+status_t AudioFlinger::PlaybackThread::Track::start()
{
- LOGV("start(%d), calling thread %d for output %d", mName, IPCThreadState::self()->getCallingPid(), mMixerThread->mOutputType);
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- mMixerThread->addTrack_l(this);
+ LOGV("start(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ playbackThread->addTrack_l(this);
+ }
return NO_ERROR;
}
-void AudioFlinger::MixerThread::Track::stop()
+void AudioFlinger::PlaybackThread::Track::stop()
{
- LOGV("stop(%d), calling thread %d for output %d", mName, IPCThreadState::self()->getCallingPid(), mMixerThread->mOutputType);
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- if (mState > STOPPED) {
- mState = STOPPED;
- // If the track is not active (PAUSED and buffers full), flush buffers
- if (mMixerThread->mActiveTracks.indexOf(this) < 0) {
- reset();
+ LOGV("stop(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ if (mState > STOPPED) {
+ mState = STOPPED;
+ // If the track is not active (PAUSED and buffers full), flush buffers
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+ reset();
+ }
+ LOGV("(> STOPPED) => STOPPED (%d)", mName);
}
- LOGV("(> STOPPED) => STOPPED (%d)", mName);
}
}
-void AudioFlinger::MixerThread::Track::pause()
+void AudioFlinger::PlaybackThread::Track::pause()
{
LOGV("pause(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- if (mState == ACTIVE || mState == RESUMING) {
- mState = PAUSING;
- LOGV("ACTIVE/RESUMING => PAUSING (%d)", mName);
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ if (mState == ACTIVE || mState == RESUMING) {
+ mState = PAUSING;
+ LOGV("ACTIVE/RESUMING => PAUSING (%d)", mName);
+ }
}
}
-void AudioFlinger::MixerThread::Track::flush()
+void AudioFlinger::PlaybackThread::Track::flush()
{
LOGV("flush(%d)", mName);
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- if (mState != STOPPED && mState != PAUSED && mState != PAUSING) {
- return;
- }
- // No point remaining in PAUSED state after a flush => go to
- // STOPPED state
- mState = STOPPED;
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ if (mState != STOPPED && mState != PAUSED && mState != PAUSING) {
+ return;
+ }
+ // No point remaining in PAUSED state after a flush => go to
+ // STOPPED state
+ mState = STOPPED;
- mCblk->lock.lock();
- // NOTE: reset() will reset cblk->user and cblk->server with
- // the risk that at the same time, the AudioMixer is trying to read
- // data. In this case, getNextBuffer() would return a NULL pointer
- // as audio buffer => the AudioMixer code MUST always test that pointer
- // returned by getNextBuffer() is not NULL!
- reset();
- mCblk->lock.unlock();
+ mCblk->lock.lock();
+ // NOTE: reset() will reset cblk->user and cblk->server with
+ // the risk that at the same time, the AudioMixer is trying to read
+ // data. In this case, getNextBuffer() would return a NULL pointer
+ // as audio buffer => the AudioMixer code MUST always test that pointer
+ // returned by getNextBuffer() is not NULL!
+ reset();
+ mCblk->lock.unlock();
+ }
}
-void AudioFlinger::MixerThread::Track::reset()
+void AudioFlinger::PlaybackThread::Track::reset()
{
// Do not reset twice to avoid discarding data written just after a flush and before
// the audioflinger thread detects the track is stopped.
@@ -1893,17 +2298,17 @@ void AudioFlinger::MixerThread::Track::reset()
// written to buffer
mCblk->flowControlFlag = 1;
mCblk->forceReady = 0;
- mFillingUpStatus = FS_FILLING;
+ mFillingUpStatus = FS_FILLING;
mResetDone = true;
}
}
-void AudioFlinger::MixerThread::Track::mute(bool muted)
+void AudioFlinger::PlaybackThread::Track::mute(bool muted)
{
mMute = muted;
}
-void AudioFlinger::MixerThread::Track::setVolume(float left, float right)
+void AudioFlinger::PlaybackThread::Track::setVolume(float left, float right)
{
mVolume[0] = left;
mVolume[1] = right;
@@ -1912,28 +2317,33 @@ void AudioFlinger::MixerThread::Track::setVolume(float left, float right)
// ----------------------------------------------------------------------------
// RecordTrack constructor must be called with AudioFlinger::mLock held
-AudioFlinger::MixerThread::RecordTrack::RecordTrack(
- const sp<MixerThread>& mixerThread,
+AudioFlinger::RecordThread::RecordTrack::RecordTrack(
+ const wp<ThreadBase>& thread,
const sp<Client>& client,
- int inputSource,
uint32_t sampleRate,
int format,
int channelCount,
int frameCount,
uint32_t flags)
- : TrackBase(mixerThread, client, sampleRate, format,
+ : TrackBase(thread, client, sampleRate, format,
channelCount, frameCount, flags, 0),
- mOverflow(false), mInputSource(inputSource)
+ mOverflow(false)
{
+ LOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+ if (format == AudioSystem::PCM_16_BIT) {
+ mCblk->frameSize = channelCount * sizeof(int16_t);
+ } else if (format == AudioSystem::PCM_8_BIT) {
+ mCblk->frameSize = channelCount * sizeof(int8_t);
+ } else {
+ mCblk->frameSize = sizeof(int8_t);
+ }
}
-AudioFlinger::MixerThread::RecordTrack::~RecordTrack()
+AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
{
- Mutex::Autolock _l(mMixerThread->mAudioFlinger->mLock);
- mMixerThread->deleteTrackName_l(mName);
}
-status_t AudioFlinger::MixerThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesAvail;
@@ -1972,180 +2382,231 @@ getNextBuffer_exit:
return NOT_ENOUGH_DATA;
}
-status_t AudioFlinger::MixerThread::RecordTrack::start()
+status_t AudioFlinger::RecordThread::RecordTrack::start()
{
- return mMixerThread->mAudioFlinger->startRecord(this);
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ RecordThread *recordThread = (RecordThread *)thread.get();
+ return recordThread->start(this);
+ }
+ return NO_INIT;
}
-void AudioFlinger::MixerThread::RecordTrack::stop()
+void AudioFlinger::RecordThread::RecordTrack::stop()
{
- mMixerThread->mAudioFlinger->stopRecord(this);
- TrackBase::reset();
- // Force overerrun condition to avoid false overrun callback until first data is
- // read from buffer
- mCblk->flowControlFlag = 1;
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ RecordThread *recordThread = (RecordThread *)thread.get();
+ recordThread->stop(this);
+ TrackBase::reset();
+ // Force overerrun condition to avoid false overrun callback until first data is
+ // read from buffer
+ mCblk->flowControlFlag = 1;
+ }
}
// ----------------------------------------------------------------------------
-AudioFlinger::MixerThread::OutputTrack::OutputTrack(
- const sp<MixerThread>& mixerThread,
+AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
+ const wp<ThreadBase>& thread,
uint32_t sampleRate,
int format,
int channelCount,
int frameCount)
- : Track(mixerThread, NULL, AudioSystem::SYSTEM, sampleRate, format, channelCount, frameCount, NULL),
- mOutputMixerThread(mixerThread)
+ : Track(thread, NULL, AudioSystem::NUM_STREAM_TYPES, sampleRate, format, channelCount, frameCount, NULL),
+ mActive(false)
{
-
+
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.unsafe_get();
mCblk->out = 1;
mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
mCblk->volume[0] = mCblk->volume[1] = 0x1000;
mOutBuffer.frameCount = 0;
- mCblk->bufferTimeoutMs = 10;
-
- LOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, mCblk->frameCount %d, mCblk->sampleRate %d, mCblk->channels %d mBufferEnd %p",
- mCblk, mBuffer, mCblk->buffers, mCblk->frameCount, mCblk->sampleRate, mCblk->channels, mBufferEnd);
-
+ mWaitTimeMs = (playbackThread->frameCount() * 2 * 1000) / playbackThread->sampleRate();
+
+ LOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, mCblk->frameCount %d, mCblk->sampleRate %d, mCblk->channels %d mBufferEnd %p mWaitTimeMs %d",
+ mCblk, mBuffer, mCblk->buffers, mCblk->frameCount, mCblk->sampleRate, mCblk->channels, mBufferEnd, mWaitTimeMs);
+
}
-AudioFlinger::MixerThread::OutputTrack::~OutputTrack()
+AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
{
stop();
}
-status_t AudioFlinger::MixerThread::OutputTrack::start()
+status_t AudioFlinger::PlaybackThread::OutputTrack::start()
{
status_t status = Track::start();
-
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ mActive = true;
mRetryCount = 127;
return status;
}
-void AudioFlinger::MixerThread::OutputTrack::stop()
+void AudioFlinger::PlaybackThread::OutputTrack::stop()
{
Track::stop();
clearBufferQueue();
mOutBuffer.frameCount = 0;
+ mActive = false;
}
-void AudioFlinger::MixerThread::OutputTrack::write(int16_t* data, uint32_t frames)
+bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
{
Buffer *pInBuffer;
Buffer inBuffer;
uint32_t channels = mCblk->channels;
-
+ bool outputBufferFull = false;
inBuffer.frameCount = frames;
inBuffer.i16 = data;
-
- if (mCblk->user == 0) {
- mOutputMixerThread->mAudioFlinger->mLock.lock();
- bool isMusicActive = mOutputMixerThread->isMusicActive_l();
- mOutputMixerThread->mAudioFlinger->mLock.unlock();
- if (isMusicActive) {
- mCblk->forceReady = 1;
- LOGV("OutputTrack::start() force ready");
- } else if (mCblk->frameCount > frames){
- if (mBufferQueue.size() < kMaxOutputTrackBuffers) {
- uint32_t startFrames = (mCblk->frameCount - frames);
- LOGV("OutputTrack::start() write %d frames", startFrames);
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[startFrames * channels];
- pInBuffer->frameCount = startFrames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, startFrames * channels * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
- } else {
- LOGW ("OutputTrack::write() no more buffers");
+
+ uint32_t waitTimeLeftMs = mWaitTimeMs;
+
+ if (!mActive) {
+ start();
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ MixerThread *mixerThread = (MixerThread *)thread.get();
+ if (mCblk->frameCount > frames){
+ if (mBufferQueue.size() < kMaxOverFlowBuffers) {
+ uint32_t startFrames = (mCblk->frameCount - frames);
+ pInBuffer = new Buffer;
+ pInBuffer->mBuffer = new int16_t[startFrames * channels];
+ pInBuffer->frameCount = startFrames;
+ pInBuffer->i16 = pInBuffer->mBuffer;
+ memset(pInBuffer->raw, 0, startFrames * channels * sizeof(int16_t));
+ mBufferQueue.add(pInBuffer);
+ } else {
+ LOGW ("OutputTrack::write() %p no more buffers in queue", this);
+ }
}
- }
+ }
}
- while (1) {
+ while (waitTimeLeftMs) {
// First write pending buffers, then new data
if (mBufferQueue.size()) {
pInBuffer = mBufferQueue.itemAt(0);
} else {
pInBuffer = &inBuffer;
}
-
+
if (pInBuffer->frameCount == 0) {
break;
}
-
+
if (mOutBuffer.frameCount == 0) {
mOutBuffer.frameCount = pInBuffer->frameCount;
- if (obtainBuffer(&mOutBuffer) == (status_t)AudioTrack::NO_MORE_BUFFERS) {
+ nsecs_t startTime = systemTime();
+ if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)AudioTrack::NO_MORE_BUFFERS) {
+ LOGV ("OutputTrack::write() %p no more output buffers", this);
+ outputBufferFull = true;
break;
}
+ uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
+// LOGV("OutputTrack::write() waitTimeMs %d waitTimeLeftMs %d", waitTimeMs, waitTimeLeftMs)
+ if (waitTimeLeftMs >= waitTimeMs) {
+ waitTimeLeftMs -= waitTimeMs;
+ } else {
+ waitTimeLeftMs = 0;
+ }
}
-
+
uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : pInBuffer->frameCount;
memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channels * sizeof(int16_t));
mCblk->stepUser(outFrames);
pInBuffer->frameCount -= outFrames;
pInBuffer->i16 += outFrames * channels;
mOutBuffer.frameCount -= outFrames;
- mOutBuffer.i16 += outFrames * channels;
-
+ mOutBuffer.i16 += outFrames * channels;
+
if (pInBuffer->frameCount == 0) {
if (mBufferQueue.size()) {
mBufferQueue.removeAt(0);
delete [] pInBuffer->mBuffer;
delete pInBuffer;
+ LOGV("OutputTrack::write() %p released overflow buffer %d", this, mBufferQueue.size());
} else {
break;
}
}
}
-
+
// If we could not write all frames, allocate a buffer and queue it for next time.
if (inBuffer.frameCount) {
- if (mBufferQueue.size() < kMaxOutputTrackBuffers) {
+ if (mBufferQueue.size() < kMaxOverFlowBuffers) {
pInBuffer = new Buffer;
pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channels];
pInBuffer->frameCount = inBuffer.frameCount;
pInBuffer->i16 = pInBuffer->mBuffer;
memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channels * sizeof(int16_t));
mBufferQueue.add(pInBuffer);
+ LOGV("OutputTrack::write() %p adding overflow buffer %d", this, mBufferQueue.size());
} else {
- LOGW("OutputTrack::write() no more buffers");
+ LOGW("OutputTrack::write() %p no more overflow buffers", this);
}
}
-
+
// Calling write() with a 0 length buffer, means that no more data will be written:
- // If no more buffers are pending, fill output track buffer to make sure it is started
+ // If no more buffers are pending, fill output track buffer to make sure it is started
// by output mixer.
- if (frames == 0 && mBufferQueue.size() == 0 && mCblk->user < mCblk->frameCount) {
- frames = mCblk->frameCount - mCblk->user;
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[frames * channels];
- pInBuffer->frameCount = frames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, frames * channels * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
+ if (frames == 0 && mBufferQueue.size() == 0) {
+ if (mCblk->user < mCblk->frameCount) {
+ frames = mCblk->frameCount - mCblk->user;
+ pInBuffer = new Buffer;
+ pInBuffer->mBuffer = new int16_t[frames * channels];
+ pInBuffer->frameCount = frames;
+ pInBuffer->i16 = pInBuffer->mBuffer;
+ memset(pInBuffer->raw, 0, frames * channels * sizeof(int16_t));
+ mBufferQueue.add(pInBuffer);
+ } else {
+ stop();
+ }
}
+ return outputBufferFull;
}
-status_t AudioFlinger::MixerThread::OutputTrack::obtainBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
{
int active;
- int timeout = 0;
status_t result;
audio_track_cblk_t* cblk = mCblk;
uint32_t framesReq = buffer->frameCount;
- LOGV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
+// LOGV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
buffer->frameCount = 0;
-
+
uint32_t framesAvail = cblk->framesAvailable();
+
if (framesAvail == 0) {
- return AudioTrack::NO_MORE_BUFFERS;
+ Mutex::Autolock _l(cblk->lock);
+ goto start_loop_here;
+ while (framesAvail == 0) {
+ active = mActive;
+ if (UNLIKELY(!active)) {
+ LOGV("Not active and NO_MORE_BUFFERS");
+ return AudioTrack::NO_MORE_BUFFERS;
+ }
+ result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
+ if (result != NO_ERROR) {
+ return AudioTrack::NO_MORE_BUFFERS;
+ }
+ // read the server count again
+ start_loop_here:
+ framesAvail = cblk->framesAvailable_l();
+ }
}
+// if (framesAvail < framesReq) {
+// return AudioTrack::NO_MORE_BUFFERS;
+// }
+
if (framesReq > framesAvail) {
framesReq = framesAvail;
}
@@ -2163,11 +2624,11 @@ status_t AudioFlinger::MixerThread::OutputTrack::obtainBuffer(AudioBufferProvide
}
-void AudioFlinger::MixerThread::OutputTrack::clearBufferQueue()
+void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
{
size_t size = mBufferQueue.size();
Buffer *pBuffer;
-
+
for (size_t i = 0; i < size; i++) {
pBuffer = mBufferQueue.itemAt(i);
delete [] pBuffer->mBuffer;
@@ -2199,7 +2660,7 @@ const sp<MemoryDealer>& AudioFlinger::Client::heap() const
// ----------------------------------------------------------------------------
-AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::MixerThread::Track>& track)
+AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
: BnAudioTrack(),
mTrack(track)
{
@@ -2251,7 +2712,7 @@ status_t AudioFlinger::TrackHandle::onTransact(
sp<IAudioRecord> AudioFlinger::openRecord(
pid_t pid,
- int inputSource,
+ void *input,
uint32_t sampleRate,
int format,
int channelCount,
@@ -2259,14 +2720,13 @@ sp<IAudioRecord> AudioFlinger::openRecord(
uint32_t flags,
status_t *status)
{
- sp<MixerThread::RecordTrack> recordTrack;
+ sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
wp<Client> wclient;
- AudioStreamIn* input = 0;
- int inFrameCount;
- size_t inputBufferSize;
status_t lStatus;
+ RecordThread *thread;
+ size_t inFrameCount;
// check calling permissions
if (!recordingAllowed()) {
@@ -2274,30 +2734,15 @@ sp<IAudioRecord> AudioFlinger::openRecord(
goto Exit;
}
- if (uint32_t(inputSource) >= AudioRecord::NUM_INPUT_SOURCES) {
- LOGE("invalid stream type");
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- if (mAudioRecordThread == 0) {
- LOGE("Audio record thread not started");
- lStatus = NO_INIT;
- goto Exit;
- }
-
-
- // Check that audio input stream accepts requested audio parameters
- inputBufferSize = mAudioHardware->getInputBufferSize(sampleRate, format, channelCount);
- if (inputBufferSize == 0) {
- lStatus = BAD_VALUE;
- LOGE("Bad audio input parameters: sampling rate %u, format %d, channels %d", sampleRate, format, channelCount);
- goto Exit;
- }
-
// add client to list
{ // scope for mLock
Mutex::Autolock _l(mLock);
+ thread = checkRecordThread_l(input);
+ if (thread == NULL) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
wclient = mClients.valueFor(pid);
if (wclient != NULL) {
client = wclient.promote();
@@ -2306,12 +2751,8 @@ sp<IAudioRecord> AudioFlinger::openRecord(
mClients.add(pid, client);
}
- // frameCount must be a multiple of input buffer size
- inFrameCount = inputBufferSize/channelCount/sizeof(short);
- frameCount = ((frameCount - 1)/inFrameCount + 1) * inFrameCount;
-
// create new record track. The record track uses one track in mHardwareMixerThread by convention.
- recordTrack = new MixerThread::RecordTrack(mHardwareMixerThread, client, inputSource, sampleRate,
+ recordTrack = new RecordThread::RecordTrack(thread, client, sampleRate,
format, channelCount, frameCount, flags);
}
if (recordTrack->getCblk() == NULL) {
@@ -2331,22 +2772,9 @@ Exit:
return recordHandle;
}
-status_t AudioFlinger::startRecord(MixerThread::RecordTrack* recordTrack) {
- if (mAudioRecordThread != 0) {
- return mAudioRecordThread->start(recordTrack);
- }
- return NO_INIT;
-}
-
-void AudioFlinger::stopRecord(MixerThread::RecordTrack* recordTrack) {
- if (mAudioRecordThread != 0) {
- mAudioRecordThread->stop(recordTrack);
- }
-}
-
// ----------------------------------------------------------------------------
-AudioFlinger::RecordHandle::RecordHandle(const sp<AudioFlinger::MixerThread::RecordTrack>& recordTrack)
+AudioFlinger::RecordHandle::RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
: BnAudioRecord(),
mRecordTrack(recordTrack)
{
@@ -2378,86 +2806,165 @@ status_t AudioFlinger::RecordHandle::onTransact(
// ----------------------------------------------------------------------------
-AudioFlinger::AudioRecordThread::AudioRecordThread(AudioHardwareInterface* audioHardware,
- const sp<AudioFlinger>& audioFlinger) :
- mAudioHardware(audioHardware),
- mAudioFlinger(audioFlinger),
- mActive(false)
+AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, AudioStreamIn *input, uint32_t sampleRate, uint32_t channels) :
+ ThreadBase(audioFlinger),
+ mInput(input), mResampler(0), mRsmpOutBuffer(0), mRsmpInBuffer(0)
{
+ mReqChannelCount = AudioSystem::popCount(channels);
+ mReqSampleRate = sampleRate;
+ readInputParameters();
+ sendConfigEvent(AudioSystem::INPUT_OPENED);
}
-AudioFlinger::AudioRecordThread::~AudioRecordThread()
+
+AudioFlinger::RecordThread::~RecordThread()
{
+ mAudioFlinger->mAudioHardware->closeInputStream(mInput);
+ delete[] mRsmpInBuffer;
+ if (mResampler != 0) {
+ delete mResampler;
+ delete[] mRsmpOutBuffer;
+ }
}
-bool AudioFlinger::AudioRecordThread::threadLoop()
+void AudioFlinger::RecordThread::onFirstRef()
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+
+ snprintf(buffer, SIZE, "Record Thread %p", this);
+
+ run(buffer, PRIORITY_URGENT_AUDIO);
+}
+bool AudioFlinger::RecordThread::threadLoop()
{
- LOGV("AudioRecordThread: start record loop");
AudioBufferProvider::Buffer buffer;
- int inBufferSize = 0;
- int inFrameCount = 0;
- AudioStreamIn* input = 0;
+ sp<RecordTrack> activeTrack;
- mActive = 0;
-
// start recording
while (!exitPending()) {
- if (!mActive) {
- mLock.lock();
- if (!mActive && !exitPending()) {
- LOGV("AudioRecordThread: loop stopping");
- if (input) {
- delete input;
- input = 0;
+
+ processConfigEvents();
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+ checkForNewParameters_l();
+ if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
+ if (!mStandby) {
+ mInput->standby();
+ mStandby = true;
}
- mRecordTrack.clear();
- mStopped.signal();
+ if (exitPending()) break;
+
+ LOGV("RecordThread: loop stopping");
+ // go to sleep
mWaitWorkCV.wait(mLock);
-
- LOGV("AudioRecordThread: loop starting");
- if (mRecordTrack != 0) {
- input = mAudioHardware->openInputStream(
- mRecordTrack->inputSource(),
- mRecordTrack->format(),
- mRecordTrack->channelCount(),
- mRecordTrack->sampleRate(),
- &mStartStatus,
- (AudioSystem::audio_in_acoustics)(mRecordTrack->mFlags >> 16));
- if (input != 0) {
- inBufferSize = input->bufferSize();
- inFrameCount = inBufferSize/input->frameSize();
+ LOGV("RecordThread: loop starting");
+ continue;
+ }
+ if (mActiveTrack != 0) {
+ if (mActiveTrack->mState == TrackBase::PAUSING) {
+ mActiveTrack.clear();
+ mStartStopCond.broadcast();
+ } else if (mActiveTrack->mState == TrackBase::RESUMING) {
+ mRsmpInIndex = mFrameCount;
+ if (mReqChannelCount != mActiveTrack->channelCount()) {
+ mActiveTrack.clear();
+ } else {
+ mActiveTrack->mState == TrackBase::ACTIVE;
}
- } else {
- mStartStatus = NO_INIT;
+ mStartStopCond.broadcast();
}
- if (mStartStatus !=NO_ERROR) {
- LOGW("record start failed, status %d", mStartStatus);
- mActive = false;
- mRecordTrack.clear();
- }
- mWaitWorkCV.signal();
+ mStandby = false;
}
- mLock.unlock();
- } else if (mRecordTrack != 0) {
-
- buffer.frameCount = inFrameCount;
- if (LIKELY(mRecordTrack->getNextBuffer(&buffer) == NO_ERROR &&
- (int)buffer.frameCount == inFrameCount)) {
- LOGV("AudioRecordThread read: %d frames", buffer.frameCount);
- ssize_t bytesRead = input->read(buffer.raw, inBufferSize);
- if (bytesRead < 0) {
- LOGE("Error reading audio input");
- sleep(1);
+ }
+
+ if (mActiveTrack != 0) {
+ buffer.frameCount = mFrameCount;
+ if (LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+ size_t framesOut = buffer.frameCount;
+ if (mResampler == 0) {
+ // no resampling
+ while (framesOut) {
+ size_t framesIn = mFrameCount - mRsmpInIndex;
+ if (framesIn) {
+ int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
+ int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * mActiveTrack->mCblk->frameSize;
+ if (framesIn > framesOut)
+ framesIn = framesOut;
+ mRsmpInIndex += framesIn;
+ framesOut -= framesIn;
+ if (mChannelCount == mReqChannelCount ||
+ mFormat != AudioSystem::PCM_16_BIT) {
+ memcpy(dst, src, framesIn * mFrameSize);
+ } else {
+ int16_t *src16 = (int16_t *)src;
+ int16_t *dst16 = (int16_t *)dst;
+ if (mChannelCount == 1) {
+ while (framesIn--) {
+ *dst16++ = *src16;
+ *dst16++ = *src16++;
+ }
+ } else {
+ while (framesIn--) {
+ *dst16++ = (int16_t)(((int32_t)*src16 + (int32_t)*(src16 + 1)) >> 1);
+ src16 += 2;
+ }
+ }
+ }
+ }
+ if (framesOut && mFrameCount == mRsmpInIndex) {
+ ssize_t bytesRead;
+ if (framesOut == mFrameCount &&
+ (mChannelCount == mReqChannelCount || mFormat != AudioSystem::PCM_16_BIT)) {
+ bytesRead = mInput->read(buffer.raw, mInputBytes);
+ framesOut = 0;
+ } else {
+ bytesRead = mInput->read(mRsmpInBuffer, mInputBytes);
+ mRsmpInIndex = 0;
+ }
+ if (bytesRead < 0) {
+ LOGE("Error reading audio input");
+ sleep(1);
+ mRsmpInIndex = mFrameCount;
+ framesOut = 0;
+ buffer.frameCount = 0;
+ }
+ }
+ }
+ } else {
+ // resampling
+
+ memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t));
+ // alter output frame count as if we were expecting stereo samples
+ if (mChannelCount == 1 && mReqChannelCount == 1) {
+ framesOut >>= 1;
+ }
+ mResampler->resample(mRsmpOutBuffer, framesOut, this);
+ // ditherAndClamp() works as long as all buffers returned by mActiveTrack->getNextBuffer()
+ // are 32 bit aligned which should be always true.
+ if (mChannelCount == 2 && mReqChannelCount == 1) {
+ AudioMixer::ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
+ // the resampler always outputs stereo samples: do post stereo to mono conversion
+ int16_t *src = (int16_t *)mRsmpOutBuffer;
+ int16_t *dst = buffer.i16;
+ while (framesOut--) {
+ *dst++ = (int16_t)(((int32_t)*src + (int32_t)*(src + 1)) >> 1);
+ src += 2;
+ }
+ } else {
+ AudioMixer::ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
+ }
+
}
- mRecordTrack->releaseBuffer(&buffer);
- mRecordTrack->overflow();
+ mActiveTrack->releaseBuffer(&buffer);
+ mActiveTrack->overflow();
}
-
// client isn't retrieving buffers fast enough
else {
- if (!mRecordTrack->setOverflow())
- LOGW("AudioRecordThread: buffer overflow");
+ if (!mActiveTrack->setOverflow())
+ LOGW("RecordThread: buffer overflow");
// Release the processor for a while before asking for a new buffer.
// This will give the application more chance to read from the buffer and
// clear the overflow.
@@ -2466,65 +2973,64 @@ bool AudioFlinger::AudioRecordThread::threadLoop()
}
}
-
- if (input) {
- delete input;
+ if (!mStandby) {
+ mInput->standby();
}
- mRecordTrack.clear();
-
+ mActiveTrack.clear();
+
+ sendConfigEvent(AudioSystem::INPUT_CLOSED);
+ processConfigEvents();
+
+ LOGV("RecordThread %p exiting", this);
return false;
}
-status_t AudioFlinger::AudioRecordThread::start(MixerThread::RecordTrack* recordTrack)
+status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack)
{
- LOGV("AudioRecordThread::start");
+ LOGV("RecordThread::start");
AutoMutex lock(&mLock);
- mActive = true;
- // If starting the active track, just reset mActive in case a stop
- // was pending and exit
- if (recordTrack == mRecordTrack.get()) return NO_ERROR;
- if (mRecordTrack != 0) return -EBUSY;
+ if (mActiveTrack != 0) {
+ if (recordTrack != mActiveTrack.get()) return -EBUSY;
- mRecordTrack = recordTrack;
+ if (mActiveTrack->mState == TrackBase::PAUSING) mActiveTrack->mState = TrackBase::RESUMING;
+ return NO_ERROR;
+ }
+
+ mActiveTrack = recordTrack;
+ mActiveTrack->mState = TrackBase::RESUMING;
// signal thread to start
LOGV("Signal record thread");
mWaitWorkCV.signal();
- mWaitWorkCV.wait(mLock);
- LOGV("Record started, status %d", mStartStatus);
- return mStartStatus;
-}
-
-void AudioFlinger::AudioRecordThread::stop(MixerThread::RecordTrack* recordTrack) {
- LOGV("AudioRecordThread::stop");
- AutoMutex lock(&mLock);
- if (mActive && (recordTrack == mRecordTrack.get())) {
- mActive = false;
- mStopped.wait(mLock);
+ mStartStopCond.wait(mLock);
+ if (mActiveTrack != 0) {
+ LOGV("Record started OK");
+ return NO_ERROR;
+ } else {
+ LOGV("Record failed to start");
+ return BAD_VALUE;
}
}
-void AudioFlinger::AudioRecordThread::exit()
-{
- LOGV("AudioRecordThread::exit");
- {
- AutoMutex lock(&mLock);
- requestExit();
- mWaitWorkCV.signal();
+void AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
+ LOGV("RecordThread::stop");
+ AutoMutex lock(&mLock);
+ if (mActiveTrack != 0 && recordTrack == mActiveTrack.get()) {
+ mActiveTrack->mState = TrackBase::PAUSING;
+ mStartStopCond.wait(mLock);
}
- requestExitAndWait();
}
-status_t AudioFlinger::AudioRecordThread::dump(int fd, const Vector<String16>& args)
+status_t AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
pid_t pid = 0;
- if (mRecordTrack != 0 && mRecordTrack->mClient != 0) {
- snprintf(buffer, SIZE, "Record client pid: %d\n", mRecordTrack->mClient->pid());
+ if (mActiveTrack != 0 && mActiveTrack->mClient != 0) {
+ snprintf(buffer, SIZE, "Record client pid: %d\n", mActiveTrack->mClient->pid());
result.append(buffer);
} else {
result.append("No record client\n");
@@ -2533,6 +3039,463 @@ status_t AudioFlinger::AudioRecordThread::dump(int fd, const Vector<String16>& a
return NO_ERROR;
}
+status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ size_t framesReq = buffer->frameCount;
+ size_t framesReady = mFrameCount - mRsmpInIndex;
+ int channelCount;
+
+ if (framesReady == 0) {
+ ssize_t bytesRead = mInput->read(mRsmpInBuffer, mInputBytes);
+ if (bytesRead < 0) {
+ LOGE("RecordThread::getNextBuffer() Error reading audio input");
+ sleep(1);
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+ return NOT_ENOUGH_DATA;
+ }
+ mRsmpInIndex = 0;
+ framesReady = mFrameCount;
+ }
+
+ if (framesReq > framesReady) {
+ framesReq = framesReady;
+ }
+
+ if (mChannelCount == 1 && mReqChannelCount == 2) {
+ channelCount = 1;
+ } else {
+ channelCount = 2;
+ }
+ buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount;
+ buffer->frameCount = framesReq;
+ return NO_ERROR;
+}
+
+void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ mRsmpInIndex += buffer->frameCount;
+ buffer->frameCount = 0;
+}
+
+bool AudioFlinger::RecordThread::checkForNewParameters_l()
+{
+ bool reconfig = false;
+
+ if (mNewParameters != "") {
+ status_t status = NO_ERROR;
+ AudioParameter param = AudioParameter(mNewParameters);
+ int value;
+ int reqFormat = mFormat;
+ int reqSamplingRate = mReqSampleRate;
+ int reqChannelCount = mReqChannelCount;
+ if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
+ reqSamplingRate = value;
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
+ reqFormat = value;
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
+ reqChannelCount = AudioSystem::popCount(value);
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
+ // do not accept frame count changes if tracks are open as the track buffer
+ // size depends on frame count and correct behavior would not be garantied
+ // if frame count is changed after track creation
+ if (mActiveTrack != 0) {
+ status = INVALID_OPERATION;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (status == NO_ERROR) {
+ status = mInput->setParameters(mNewParameters);
+ if (status == INVALID_OPERATION) {
+ mInput->standby();
+ status = mInput->setParameters(mNewParameters);
+ }
+ if (reconfig) {
+ if (status == BAD_VALUE &&
+ reqFormat == mInput->format() && reqFormat == AudioSystem::PCM_16_BIT &&
+ ((int)mInput->sampleRate() <= 2 * reqSamplingRate) &&
+ (AudioSystem::popCount(mInput->channels()) < 3) && (reqChannelCount < 3)) {
+ status = NO_ERROR;
+ }
+ if (status == NO_ERROR) {
+ readInputParameters();
+ sendConfigEvent(AudioSystem::INPUT_CONFIG_CHANGED);
+ }
+ }
+ }
+ mNewParameters = "";
+ mParamStatus = status;
+ mParamCond.signal();
+ }
+ return reconfig;
+}
+
+String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
+{
+ return mInput->getParameters(keys);
+}
+
+void AudioFlinger::RecordThread::audioConfigChanged(int event, int param) {
+ AudioSystem::OutputDescriptor desc;
+ void *param2 = 0;
+
+ switch (event) {
+ case AudioSystem::INPUT_OPENED:
+ case AudioSystem::INPUT_CONFIG_CHANGED:
+ desc.channels = mChannelCount;
+ desc.samplingRate = mSampleRate;
+ desc.format = mFormat;
+ desc.frameCount = mFrameCount;
+ desc.latency = 0;
+ param2 = &desc;
+ break;
+
+ case AudioSystem::INPUT_CLOSED:
+ default:
+ break;
+ }
+ mAudioFlinger->audioConfigChanged(event, this, param2);
+}
+
+void AudioFlinger::RecordThread::readInputParameters()
+{
+ if (mRsmpInBuffer) delete mRsmpInBuffer;
+ if (mRsmpOutBuffer) delete mRsmpOutBuffer;
+ if (mResampler) delete mResampler;
+ mResampler = 0;
+
+ mSampleRate = mInput->sampleRate();
+ mChannelCount = AudioSystem::popCount(mInput->channels());
+ mFormat = mInput->format();
+ mFrameSize = mInput->frameSize();
+ mInputBytes = mInput->bufferSize();
+ mFrameCount = mInputBytes / mFrameSize;
+ mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
+
+ if (mSampleRate != mReqSampleRate && mChannelCount < 3 && mReqChannelCount < 3)
+ {
+ int channelCount;
+ // optmization: if mono to mono, use the resampler in stereo to stereo mode to avoid
+ // stereo to mono post process as the resampler always outputs stereo.
+ if (mChannelCount == 1 && mReqChannelCount == 2) {
+ channelCount = 1;
+ } else {
+ channelCount = 2;
+ }
+ mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
+ mResampler->setSampleRate(mSampleRate);
+ mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
+ mRsmpOutBuffer = new int32_t[mFrameCount * 2];
+
+ // optmization: if mono to mono, alter input frame count as if we were inputing stereo samples
+ if (mChannelCount == 1 && mReqChannelCount == 1) {
+ mFrameCount >>= 1;
+ }
+
+ }
+ mRsmpInIndex = mFrameCount;
+}
+
+// ----------------------------------------------------------------------------
+
+void *AudioFlinger::openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ uint32_t flags)
+{
+ status_t status;
+ PlaybackThread *thread = NULL;
+ mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
+ uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
+ uint32_t format = pFormat ? *pFormat : 0;
+ uint32_t channels = pChannels ? *pChannels : 0;
+ uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
+
+ LOGV("openOutput(), Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
+ pDevices ? *pDevices : 0,
+ samplingRate,
+ format,
+ channels,
+ flags);
+
+ if (pDevices == NULL || *pDevices == 0) {
+ return NULL;
+ }
+ Mutex::Autolock _l(mLock);
+
+ AudioStreamOut *output = mAudioHardware->openOutputStream(*pDevices,
+ (int *)&format,
+ &channels,
+ &samplingRate,
+ &status);
+ LOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
+ output,
+ samplingRate,
+ format,
+ channels,
+ status);
+
+ mHardwareStatus = AUDIO_HW_IDLE;
+ if (output != 0) {
+ if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
+ (format != AudioSystem::PCM_16_BIT) ||
+ (channels != AudioSystem::CHANNEL_OUT_STEREO)) {
+ thread = new DirectOutputThread(this, output);
+ LOGV("openOutput() created direct output %p", thread);
+ } else {
+ thread = new MixerThread(this, output);
+ LOGV("openOutput() created mixer output %p", thread);
+ }
+ mPlaybackThreads.add(thread);
+
+ if (pSamplingRate) *pSamplingRate = samplingRate;
+ if (pFormat) *pFormat = format;
+ if (pChannels) *pChannels = channels;
+ if (pLatencyMs) *pLatencyMs = thread->latency();
+ }
+
+ return thread;
+}
+
+void *AudioFlinger::openDuplicateOutput(void *output1, void *output2)
+{
+ Mutex::Autolock _l(mLock);
+
+ if (checkMixerThread_l(output1) == NULL ||
+ checkMixerThread_l(output2) == NULL) {
+ LOGW("openDuplicateOutput() wrong output mixer type %p or %p", output1, output2);
+ return NULL;
+ }
+
+ DuplicatingThread *thread = new DuplicatingThread(this, (MixerThread *)output1);
+ thread->addOutputTrack( (MixerThread *)output2);
+ mPlaybackThreads.add(thread);
+ return thread;
+}
+
+status_t AudioFlinger::closeOutput(void *output)
+{
+ PlaybackThread *thread;
+ {
+ Mutex::Autolock _l(mLock);
+ thread = checkPlaybackThread_l(output);
+ if (thread == NULL) {
+ return BAD_VALUE;
+ }
+
+ LOGV("closeOutput() %p", thread);
+
+ if (thread->type() == PlaybackThread::MIXER) {
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads[i]->type() == PlaybackThread::DUPLICATING) {
+ DuplicatingThread *dupThread = (DuplicatingThread *)mPlaybackThreads[i].get();
+ dupThread->removeOutputTrack((MixerThread *)thread);
+ }
+ }
+ }
+ mPlaybackThreads.remove(thread);
+ }
+ thread->exit();
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::suspendOutput(void *output)
+{
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+
+ if (thread == NULL) {
+ return BAD_VALUE;
+ }
+
+ LOGV("suspendOutput() %p", output);
+ thread->suspend();
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::restoreOutput(void *output)
+{
+ Mutex::Autolock _l(mLock);
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+
+ if (thread == NULL) {
+ return BAD_VALUE;
+ }
+
+ LOGV("restoreOutput() %p", output);
+
+ thread->restore();
+
+ return NO_ERROR;
+}
+
+void *AudioFlinger::openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics)
+{
+ status_t status;
+ RecordThread *thread = NULL;
+ uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
+ uint32_t format = pFormat ? *pFormat : 0;
+ uint32_t channels = pChannels ? *pChannels : 0;
+ uint32_t reqSamplingRate = samplingRate;
+ uint32_t reqFormat = format;
+ uint32_t reqChannels = channels;
+
+ if (pDevices == NULL || *pDevices == 0) {
+ return NULL;
+ }
+ Mutex::Autolock _l(mLock);
+
+ AudioStreamIn *input = mAudioHardware->openInputStream(*pDevices,
+ (int *)&format,
+ &channels,
+ &samplingRate,
+ &status,
+ (AudioSystem::audio_in_acoustics)acoustics);
+ LOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, acoustics %x, status %d",
+ input,
+ samplingRate,
+ format,
+ channels,
+ acoustics,
+ status);
+
+ // If the input could not be opened with the requested parameters and we can handle the conversion internally,
+ // try to open again with the proposed parameters. The AudioFlinger can resample the input and do mono to stereo
+ // or stereo to mono conversions on 16 bit PCM inputs.
+ if (input == 0 && status == BAD_VALUE &&
+ reqFormat == format && format == AudioSystem::PCM_16_BIT &&
+ (samplingRate <= 2 * reqSamplingRate) &&
+ (AudioSystem::popCount(channels) < 3) && (AudioSystem::popCount(reqChannels) < 3)) {
+ LOGV("openInput() reopening with proposed sampling rate and channels");
+ input = mAudioHardware->openInputStream(*pDevices,
+ (int *)&format,
+ &channels,
+ &samplingRate,
+ &status,
+ (AudioSystem::audio_in_acoustics)acoustics);
+ }
+
+ if (input != 0) {
+ // Start record thread
+ thread = new RecordThread(this, input, reqSamplingRate, reqChannels);
+ mRecordThreads.add(thread);
+
+ if (pSamplingRate) *pSamplingRate = reqSamplingRate;
+ if (pFormat) *pFormat = format;
+ if (pChannels) *pChannels = reqChannels;
+
+ input->standby();
+ }
+
+ return thread;
+}
+
+status_t AudioFlinger::closeInput(void *input)
+{
+ RecordThread *thread;
+ {
+ Mutex::Autolock _l(mLock);
+ thread = checkRecordThread_l(input);
+ if (thread == NULL) {
+ return BAD_VALUE;
+ }
+
+ LOGV("closeInput() %p", thread);
+ mRecordThreads.remove(thread);
+ }
+ thread->exit();
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::setStreamOutput(uint32_t stream, void *output)
+{
+ Mutex::Autolock _l(mLock);
+ MixerThread *dstThread = checkMixerThread_l(output);
+ if (dstThread == NULL) {
+ LOGW("setStreamOutput() bad output thread %p", output);
+ return BAD_VALUE;
+ }
+
+ LOGV("setStreamOutput() stream %d to output %p", stream, dstThread);
+
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ PlaybackThread *thread = mPlaybackThreads[i].get();
+ if (thread != dstThread &&
+ thread->type() != PlaybackThread::DIRECT) {
+ MixerThread *srcThread = (MixerThread *)thread;
+ SortedVector < sp<MixerThread::Track> > tracks;
+ SortedVector < wp<MixerThread::Track> > activeTracks;
+ srcThread->getTracks(tracks, activeTracks, stream);
+ if (tracks.size()) {
+ dstThread->putTracks(tracks, activeTracks);
+ dstThread->sendConfigEvent(AudioSystem::STREAM_CONFIG_CHANGED, stream);
+ }
+ }
+ }
+
+ return NO_ERROR;
+}
+
+// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(void *output) const
+{
+ PlaybackThread *thread = NULL;
+
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads[i] == output) {
+ thread = (PlaybackThread *)output;
+ break;
+ }
+ }
+
+ return thread;
+}
+
+// checkMixerThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::MixerThread *AudioFlinger::checkMixerThread_l(void *output) const
+{
+ PlaybackThread *thread = checkPlaybackThread_l(output);
+ if (thread != NULL) {
+ if (thread->type() == PlaybackThread::DIRECT) {
+ thread = NULL;
+ }
+ }
+ return (MixerThread *)thread;
+}
+
+// checkRecordThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::RecordThread *AudioFlinger::checkRecordThread_l(void *input) const
+{
+ RecordThread *thread = NULL;
+
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ if (mRecordThreads[i] == input) {
+ thread = (RecordThread *)input;
+ break;
+ }
+ }
+
+ return thread;
+}
+
+// ----------------------------------------------------------------------------
+
status_t AudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
@@ -2540,6 +3503,7 @@ status_t AudioFlinger::onTransact(
}
// ----------------------------------------------------------------------------
+
void AudioFlinger::instantiate() {
defaultServiceManager()->addService(
String16("media.audio_flinger"), new AudioFlinger());
diff --git a/libs/audioflinger/AudioFlinger.h b/libs/audioflinger/AudioFlinger.h
index 3531a58..7d78749 100644
--- a/libs/audioflinger/AudioFlinger.h
+++ b/libs/audioflinger/AudioFlinger.h
@@ -31,7 +31,6 @@
#include <utils/Errors.h>
#include <utils/threads.h>
#include <binder/MemoryDealer.h>
-#include <utils/KeyedVector.h>
#include <utils/SortedVector.h>
#include <utils/Vector.h>
@@ -44,6 +43,7 @@ namespace android {
class audio_track_cblk_t;
class AudioMixer;
class AudioBuffer;
+class AudioResampler;
// ----------------------------------------------------------------------------
@@ -56,7 +56,7 @@ class AudioBuffer;
static const nsecs_t kStandbyTimeInNsecs = seconds(3);
-class AudioFlinger : public BnAudioFlinger, public IBinder::DeathRecipient
+class AudioFlinger : public BnAudioFlinger, public IBinder::DeathRecipient
{
public:
static void instantiate();
@@ -73,13 +73,14 @@ public:
int frameCount,
uint32_t flags,
const sp<IMemory>& sharedBuffer,
+ void *output,
status_t *status);
- virtual uint32_t sampleRate(int output) const;
- virtual int channelCount(int output) const;
- virtual int format(int output) const;
- virtual size_t frameCount(int output) const;
- virtual uint32_t latency(int output) const;
+ virtual uint32_t sampleRate(void *output) const;
+ virtual int channelCount(void *output) const;
+ virtual int format(void *output) const;
+ virtual size_t frameCount(void *output) const;
+ virtual uint32_t latency(void *output) const;
virtual status_t setMasterVolume(float value);
virtual status_t setMasterMute(bool muted);
@@ -87,33 +88,51 @@ public:
virtual float masterVolume() const;
virtual bool masterMute() const;
- virtual status_t setStreamVolume(int stream, float value);
+ virtual status_t setStreamVolume(int stream, float value, void *output);
virtual status_t setStreamMute(int stream, bool muted);
- virtual float streamVolume(int stream) const;
+ virtual float streamVolume(int stream, void *output) const;
virtual bool streamMute(int stream) const;
- virtual status_t setRouting(int mode, uint32_t routes, uint32_t mask);
- virtual uint32_t getRouting(int mode) const;
-
virtual status_t setMode(int mode);
- virtual int getMode() const;
virtual status_t setMicMute(bool state);
virtual bool getMicMute() const;
virtual bool isMusicActive() const;
- virtual bool isA2dpEnabled() const;
-
- virtual status_t setParameter(const char* key, const char* value);
+ virtual status_t setParameters(void *ioHandle, const String8& keyValuePairs);
+ virtual String8 getParameters(void *ioHandle, const String8& keys);
virtual void registerClient(const sp<IAudioFlingerClient>& client);
-
+
virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount);
-
- virtual void wakeUp() { mWaitWorkCV.broadcast(); }
-
+
+ virtual void *openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ uint32_t flags);
+
+ virtual void *openDuplicateOutput(void *output1, void *output2);
+
+ virtual status_t closeOutput(void *output);
+
+ virtual status_t suspendOutput(void *output);
+
+ virtual status_t restoreOutput(void *output);
+
+ virtual void *openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics);
+
+ virtual status_t closeInput(void *input);
+
+ virtual status_t setStreamOutput(uint32_t stream, void *output);
+
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -139,7 +158,7 @@ public:
// record interface
virtual sp<IAudioRecord> openRecord(
pid_t pid,
- int inputSource,
+ void *input,
uint32_t sampleRate,
int format,
int channelCount,
@@ -153,30 +172,12 @@ public:
Parcel* reply,
uint32_t flags);
+ void audioConfigChanged(int event, void *param1, void *param2);
+
private:
AudioFlinger();
virtual ~AudioFlinger();
-
- void setOutput(int outputType);
- void doSetOutput(int outputType);
-
-#ifdef WITH_A2DP
- void setA2dpEnabled_l(bool enable);
- void checkA2dpEnabledChange_l();
-#endif
- static bool streamForcedToSpeaker(int streamType);
-
- // Management of forced route to speaker for certain track types.
- enum force_speaker_command {
- ACTIVE_TRACK_ADDED = 0,
- ACTIVE_TRACK_REMOVED,
- CHECK_ROUTE_RESTORE_TIME,
- FORCE_ROUTE_RESTORE
- };
- void handleForcedSpeakerRoute(int command);
-#ifdef WITH_A2DP
- void handleRouteDisablesA2dp_l(int routes);
-#endif
+
// Internal dump utilites.
status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
@@ -201,14 +202,17 @@ private:
class TrackHandle;
class RecordHandle;
- class AudioRecordThread;
-
-
- // --- MixerThread ---
- class MixerThread : public Thread {
+ class RecordThread;
+ class PlaybackThread;
+ class MixerThread;
+ class DirectOutputThread;
+ class Track;
+ class RecordTrack;
+
+ class ThreadBase : public Thread {
public:
-
- // --- Track ---
+ ThreadBase (const sp<AudioFlinger>& audioFlinger);
+ virtual ~ThreadBase();
// base for record and playback
class TrackBase : public AudioBufferProvider, public RefBase {
@@ -230,7 +234,7 @@ private:
// The upper 16 bits are used for track-specific flags.
};
- TrackBase(const sp<MixerThread>& mixerThread,
+ TrackBase(const wp<ThreadBase>& thread,
const sp<Client>& client,
uint32_t sampleRate,
int format,
@@ -245,9 +249,8 @@ private:
sp<IMemory> getCblk() const;
protected:
- friend class MixerThread;
+ friend class ThreadBase;
friend class RecordHandle;
- friend class AudioRecordThread;
TrackBase(const TrackBase&);
TrackBase& operator = (const TrackBase&);
@@ -269,10 +272,6 @@ private:
void* getBuffer(uint32_t offset, uint32_t frames) const;
- int name() const {
- return mName;
- }
-
bool isStopped() const {
return mState == STOPPED;
}
@@ -284,14 +283,13 @@ private:
bool step();
void reset();
- sp<MixerThread> mMixerThread;
+ wp<ThreadBase> mThread;
sp<Client> mClient;
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk;
void* mBuffer;
void* mBufferEnd;
uint32_t mFrameCount;
- int mName;
// we don't really need a lock for these
int mState;
int mClientTid;
@@ -299,10 +297,67 @@ private:
uint32_t mFlags;
};
+ class ConfigEvent {
+ public:
+ ConfigEvent() : mEvent(0), mParam(0) {}
+
+ int mEvent;
+ int mParam;
+ };
+
+ uint32_t sampleRate() const;
+ int channelCount() const;
+ int format() const;
+ size_t frameCount() const;
+ void wakeUp() { mWaitWorkCV.broadcast(); }
+ void exit();
+ virtual bool checkForNewParameters_l() = 0;
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys) = 0;
+ virtual void audioConfigChanged(int event, int param = 0) = 0;
+ void sendConfigEvent(int event, int param = 0);
+ void processConfigEvents();
+
+ protected:
+
+ friend class Track;
+ friend class TrackBase;
+ friend class PlaybackThread;
+ friend class MixerThread;
+ friend class DirectOutputThread;
+ friend class DuplicatingThread;
+ friend class RecordThread;
+ friend class RecordTrack;
+
+ mutable Mutex mLock;
+ Condition mWaitWorkCV;
+ sp<AudioFlinger> mAudioFlinger;
+ uint32_t mSampleRate;
+ size_t mFrameCount;
+ int mChannelCount;
+ int mFormat;
+ uint32_t mFrameSize;
+ Condition mParamCond;
+ String8 mNewParameters;
+ status_t mParamStatus;
+ Vector<ConfigEvent *> mConfigEvents;
+ bool mStandby;
+ };
+
+ // --- PlaybackThread ---
+ class PlaybackThread : public ThreadBase {
+ public:
+
+ enum type {
+ MIXER,
+ DIRECT,
+ DUPLICATING
+ };
+
// playback track
class Track : public TrackBase {
public:
- Track( const sp<MixerThread>& mixerThread,
+ Track( const wp<ThreadBase>& thread,
const sp<Client>& client,
int streamType,
uint32_t sampleRate,
@@ -321,6 +376,9 @@ private:
void destroy();
void mute(bool);
void setVolume(float left, float right);
+ int name() const {
+ return mName;
+ }
int type() const {
return mStreamType;
@@ -328,7 +386,7 @@ private:
protected:
- friend class MixerThread;
+ friend class ThreadBase;
friend class AudioFlinger;
friend class AudioFlinger::TrackHandle;
@@ -336,21 +394,14 @@ private:
Track& operator = (const Track&);
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
-
- bool isMuted() const {
- return (mMute || mMixerThread->mStreamTypes[mStreamType].mute);
- }
-
+ bool isMuted() { return mMute; }
bool isPausing() const {
return mState == PAUSING;
}
-
bool isPaused() const {
return mState == PAUSED;
}
-
bool isReady() const;
-
void setPaused() { mState = PAUSED; }
void reset();
@@ -364,54 +415,20 @@ private:
sp<IMemory> mSharedBuffer;
bool mResetDone;
int mStreamType;
+ int mName;
}; // end of Track
- // record track
- class RecordTrack : public TrackBase {
- public:
- RecordTrack(const sp<MixerThread>& mixerThread,
- const sp<Client>& client,
- int inputSource,
- uint32_t sampleRate,
- int format,
- int channelCount,
- int frameCount,
- uint32_t flags);
- ~RecordTrack();
-
- virtual status_t start();
- virtual void stop();
-
- bool overflow() { bool tmp = mOverflow; mOverflow = false; return tmp; }
- bool setOverflow() { bool tmp = mOverflow; mOverflow = true; return tmp; }
-
- int inputSource() const { return mInputSource; }
-
- private:
- friend class AudioFlinger;
- friend class AudioFlinger::RecordHandle;
- friend class AudioFlinger::AudioRecordThread;
- friend class MixerThread;
-
- RecordTrack(const Track&);
- RecordTrack& operator = (const Track&);
-
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
-
- bool mOverflow;
- int mInputSource;
- };
// playback track
class OutputTrack : public Track {
public:
-
+
class Buffer: public AudioBufferProvider::Buffer {
public:
int16_t *mBuffer;
};
-
- OutputTrack( const sp<MixerThread>& mixerThread,
+
+ OutputTrack( const wp<ThreadBase>& thread,
uint32_t sampleRate,
int format,
int channelCount,
@@ -420,35 +437,35 @@ private:
virtual status_t start();
virtual void stop();
- void write(int16_t* data, uint32_t frames);
+ bool write(int16_t* data, uint32_t frames);
bool bufferQueueEmpty() { return (mBufferQueue.size() == 0) ? true : false; }
+ bool isActive() { return mActive; }
+ wp<ThreadBase>& thread() { return mThread; }
private:
- status_t obtainBuffer(AudioBufferProvider::Buffer* buffer);
+ status_t obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs);
void clearBufferQueue();
-
- sp<MixerThread> mOutputMixerThread;
+
+ // Maximum number of pending buffers allocated by OutputTrack::write()
+ static const uint8_t kMaxOverFlowBuffers = 3;
+
Vector < Buffer* > mBufferQueue;
AudioBufferProvider::Buffer mOutBuffer;
- uint32_t mFramesWritten;
-
- }; // end of OutputTrack
+ uint32_t mWaitTimeMs;
+ bool mActive;
- MixerThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int outputType);
- virtual ~MixerThread();
+ }; // end of OutputTrack
+
+ PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output);
+ virtual ~PlaybackThread();
virtual status_t dump(int fd, const Vector<String16>& args);
// Thread virtuals
- virtual bool threadLoop();
virtual status_t readyToRun();
virtual void onFirstRef();
- virtual uint32_t sampleRate() const;
- virtual int channelCount() const;
- virtual int format() const;
- virtual size_t frameCount() const;
virtual uint32_t latency() const;
virtual status_t setMasterVolume(float value);
@@ -463,9 +480,8 @@ private:
virtual float streamVolume(int stream) const;
virtual bool streamMute(int stream) const;
- bool isMusicActive_l() const;
-
-
+ bool isMusicActive() const;
+
sp<Track> createTrack_l(
const sp<AudioFlinger::Client>& client,
int streamType,
@@ -475,13 +491,15 @@ private:
int frameCount,
const sp<IMemory>& sharedBuffer,
status_t *status);
-
- void getTracks_l(SortedVector < sp<Track> >& tracks,
- SortedVector < wp<Track> >& activeTracks);
- void putTracks_l(SortedVector < sp<Track> >& tracks,
- SortedVector < wp<Track> >& activeTracks);
- void setOuputTrack(OutputTrack *track) { mOutputTrack = track; }
-
+
+ AudioStreamOut* getOutput() { return mOutput; }
+
+ virtual int type() const { return mType; }
+ void suspend() { mSuspended = true; }
+ void restore() { mSuspended = false; }
+ virtual String8 getParameters(const String8& keys);
+ virtual void audioConfigChanged(int event, int param = 0);
+
struct stream_type_t {
stream_type_t()
: volume(1.0f),
@@ -494,54 +512,109 @@ private:
private:
-
friend class AudioFlinger;
friend class Track;
friend class TrackBase;
- friend class RecordTrack;
-
- MixerThread(const Client&);
- MixerThread& operator = (const MixerThread&);
-
+ friend class MixerThread;
+ friend class DirectOutputThread;
+ friend class DuplicatingThread;
+
+ PlaybackThread(const Client&);
+ PlaybackThread& operator = (const PlaybackThread&);
+
status_t addTrack_l(const sp<Track>& track);
void destroyTrack_l(const sp<Track>& track);
- int getTrackName_l();
- void deleteTrackName_l(int name);
- void addActiveTrack_l(const wp<Track>& t);
- void removeActiveTrack_l(const wp<Track>& t);
- size_t getOutputFrameCount();
+ virtual int getTrackName_l() = 0;
+ virtual void deleteTrackName_l(int name) = 0;
+ void readOutputParameters();
- status_t dumpInternals(int fd, const Vector<String16>& args);
+ virtual status_t dumpInternals(int fd, const Vector<String16>& args);
status_t dumpTracks(int fd, const Vector<String16>& args);
-
- sp<AudioFlinger> mAudioFlinger;
+
SortedVector< wp<Track> > mActiveTracks;
SortedVector< sp<Track> > mTracks;
- stream_type_t mStreamTypes[AudioSystem::NUM_STREAM_TYPES];
- AudioMixer* mAudioMixer;
+ // mStreamTypes[] uses 1 additionnal stream type internally for the OutputTrack used by DuplicatingThread
+ stream_type_t mStreamTypes[AudioSystem::NUM_STREAM_TYPES + 1];
AudioStreamOut* mOutput;
- int mOutputType;
- uint32_t mSampleRate;
- size_t mFrameCount;
- int mChannelCount;
- int mFormat;
- int16_t* mMixBuffer;
float mMasterVolume;
bool mMasterMute;
nsecs_t mLastWriteTime;
int mNumWrites;
int mNumDelayedWrites;
- bool mStandby;
bool mInWrite;
- sp <OutputTrack> mOutputTrack;
+ int16_t* mMixBuffer;
+ bool mSuspended;
+ int mType;
+ int mBytesWritten;
+ int mMinBytesToWrite;
+ };
+
+ class MixerThread : public PlaybackThread {
+ public:
+ MixerThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output);
+ virtual ~MixerThread();
+
+ // Thread virtuals
+ virtual bool threadLoop();
+
+ void getTracks(SortedVector < sp<Track> >& tracks,
+ SortedVector < wp<Track> >& activeTracks,
+ int streamType);
+ void putTracks(SortedVector < sp<Track> >& tracks,
+ SortedVector < wp<Track> >& activeTracks);
+ virtual int getTrackName_l();
+ virtual void deleteTrackName_l(int name);
+ virtual bool checkForNewParameters_l();
+ virtual status_t dumpInternals(int fd, const Vector<String16>& args);
+
+ protected:
+ size_t prepareTracks_l(const SortedVector< wp<Track> >& activeTracks, Vector< sp<Track> > *tracksToRemove);
+
+ AudioMixer* mAudioMixer;
+ };
+
+ class DirectOutputThread : public PlaybackThread {
+ public:
+
+ DirectOutputThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output);
+ ~DirectOutputThread();
+
+ // Thread virtuals
+ virtual bool threadLoop();
+
+ virtual int getTrackName_l();
+ virtual void deleteTrackName_l(int name);
+ virtual bool checkForNewParameters_l();
+
+ private:
+ float mLeftVolume;
+ float mRightVolume;
};
-
+ class DuplicatingThread : public MixerThread {
+ public:
+ DuplicatingThread (const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread);
+ ~DuplicatingThread();
+
+ // Thread virtuals
+ virtual bool threadLoop();
+ void addOutputTrack(MixerThread* thread);
+ void removeOutputTrack(MixerThread* thread);
+
+ private:
+ SortedVector < sp<OutputTrack> > mOutputTracks;
+ };
+
+ PlaybackThread *checkPlaybackThread_l(void *output) const;
+ MixerThread *checkMixerThread_l(void *output) const;
+ RecordThread *checkRecordThread_l(void *input) const;
+ float streamVolumeInternal(int stream) const { return mStreamTypes[stream].volume; }
+
friend class AudioBuffer;
class TrackHandle : public android::BnAudioTrack {
public:
- TrackHandle(const sp<MixerThread::Track>& track);
+ TrackHandle(const sp<PlaybackThread::Track>& track);
virtual ~TrackHandle();
virtual status_t start();
virtual void stop();
@@ -553,20 +626,90 @@ private:
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
private:
- sp<MixerThread::Track> mTrack;
+ sp<PlaybackThread::Track> mTrack;
};
friend class Client;
- friend class MixerThread::Track;
+ friend class PlaybackThread::Track;
void removeClient(pid_t pid);
+ // record thread
+ class RecordThread : public ThreadBase, public AudioBufferProvider
+ {
+ public:
+
+ // record track
+ class RecordTrack : public TrackBase {
+ public:
+ RecordTrack(const wp<ThreadBase>& thread,
+ const sp<Client>& client,
+ uint32_t sampleRate,
+ int format,
+ int channelCount,
+ int frameCount,
+ uint32_t flags);
+ ~RecordTrack();
+
+ virtual status_t start();
+ virtual void stop();
+
+ bool overflow() { bool tmp = mOverflow; mOverflow = false; return tmp; }
+ bool setOverflow() { bool tmp = mOverflow; mOverflow = true; return tmp; }
+
+ private:
+ friend class AudioFlinger;
+
+ RecordTrack(const RecordTrack&);
+ RecordTrack& operator = (const RecordTrack&);
+
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+
+ bool mOverflow;
+ };
+
+
+ RecordThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamIn *input,
+ uint32_t sampleRate,
+ uint32_t channels);
+ ~RecordThread();
+
+ virtual bool threadLoop();
+ virtual status_t readyToRun() { return NO_ERROR; }
+ virtual void onFirstRef();
+
+ status_t start(RecordTrack* recordTrack);
+ void stop(RecordTrack* recordTrack);
+ status_t dump(int fd, const Vector<String16>& args);
+ AudioStreamIn* getInput() { return mInput; }
+
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+ virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+ virtual bool checkForNewParameters_l();
+ virtual String8 getParameters(const String8& keys);
+ virtual void audioConfigChanged(int event, int param = 0);
+ void readInputParameters();
+
+ private:
+ RecordThread();
+ AudioStreamIn *mInput;
+ sp<RecordTrack> mActiveTrack;
+ Condition mStartStopCond;
+ AudioResampler *mResampler;
+ int32_t *mRsmpOutBuffer;
+ int16_t *mRsmpInBuffer;
+ size_t mRsmpInIndex;
+ size_t mInputBytes;
+ int mReqChannelCount;
+ uint32_t mReqSampleRate;
+ };
class RecordHandle : public android::BnAudioRecord {
public:
- RecordHandle(const sp<MixerThread::RecordTrack>& recordTrack);
+ RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
virtual ~RecordHandle();
virtual status_t start();
virtual void stop();
@@ -574,66 +717,30 @@ private:
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
private:
- sp<MixerThread::RecordTrack> mRecordTrack;
+ sp<RecordThread::RecordTrack> mRecordTrack;
};
- // record thread
- class AudioRecordThread : public Thread
- {
- public:
- AudioRecordThread(AudioHardwareInterface* audioHardware, const sp<AudioFlinger>& audioFlinger);
- virtual ~AudioRecordThread();
- virtual bool threadLoop();
- virtual status_t readyToRun() { return NO_ERROR; }
- virtual void onFirstRef() {}
+ friend class RecordThread;
+ friend class PlaybackThread;
- status_t start(MixerThread::RecordTrack* recordTrack);
- void stop(MixerThread::RecordTrack* recordTrack);
- void exit();
- status_t dump(int fd, const Vector<String16>& args);
-
- private:
- AudioRecordThread();
- AudioHardwareInterface *mAudioHardware;
- sp<AudioFlinger> mAudioFlinger;
- sp<MixerThread::RecordTrack> mRecordTrack;
- Mutex mLock;
- Condition mWaitWorkCV;
- Condition mStopped;
- volatile bool mActive;
- status_t mStartStatus;
- };
-
- friend class AudioRecordThread;
- friend class MixerThread;
- status_t startRecord(MixerThread::RecordTrack* recordTrack);
- void stopRecord(MixerThread::RecordTrack* recordTrack);
-
- mutable Mutex mHardwareLock;
mutable Mutex mLock;
- mutable Condition mWaitWorkCV;
DefaultKeyedVector< pid_t, wp<Client> > mClients;
- sp<MixerThread> mA2dpMixerThread;
- sp<MixerThread> mHardwareMixerThread;
+ mutable Mutex mHardwareLock;
AudioHardwareInterface* mAudioHardware;
- AudioHardwareInterface* mA2dpAudioInterface;
- sp<AudioRecordThread> mAudioRecordThread;
- bool mA2dpEnabled;
- bool mNotifyA2dpChange;
mutable int mHardwareStatus;
- SortedVector< wp<IBinder> > mNotificationClients;
- int mForcedSpeakerCount;
- int mA2dpDisableCount;
-
- // true if A2DP should resume when mA2dpDisableCount returns to zero
- bool mA2dpSuppressed;
- uint32_t mSavedRoute;
- uint32_t mForcedRoute;
- nsecs_t mRouteRestoreTime;
- bool mMusicMuteSaved;
+
+
+ SortedVector< sp<PlaybackThread> > mPlaybackThreads;
+ PlaybackThread::stream_type_t mStreamTypes[AudioSystem::NUM_STREAM_TYPES];
+ float mMasterVolume;
+ bool mMasterMute;
+
+ SortedVector< sp<RecordThread> > mRecordThreads;
+
+ SortedVector< sp<IBinder> > mNotificationClients;
};
// ----------------------------------------------------------------------------
diff --git a/libs/audioflinger/AudioHardwareGeneric.cpp b/libs/audioflinger/AudioHardwareGeneric.cpp
index 1e159b8..57874f3 100644
--- a/libs/audioflinger/AudioHardwareGeneric.cpp
+++ b/libs/audioflinger/AudioHardwareGeneric.cpp
@@ -49,8 +49,8 @@ AudioHardwareGeneric::AudioHardwareGeneric()
AudioHardwareGeneric::~AudioHardwareGeneric()
{
if (mFd >= 0) ::close(mFd);
- delete mOutput;
- delete mInput;
+ closeOutputStream((AudioStreamOut *)mOutput);
+ closeInputStream((AudioStreamIn *)mInput);
}
status_t AudioHardwareGeneric::initCheck()
@@ -63,7 +63,7 @@ status_t AudioHardwareGeneric::initCheck()
}
AudioStreamOut* AudioHardwareGeneric::openOutputStream(
- int format, int channelCount, uint32_t sampleRate, status_t *status)
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
{
AutoMutex lock(mLock);
@@ -77,7 +77,7 @@ AudioStreamOut* AudioHardwareGeneric::openOutputStream(
// create new output stream
AudioStreamOutGeneric* out = new AudioStreamOutGeneric();
- status_t lStatus = out->set(this, mFd, format, channelCount, sampleRate);
+ status_t lStatus = out->set(this, mFd, devices, format, channels, sampleRate);
if (status) {
*status = lStatus;
}
@@ -89,17 +89,19 @@ AudioStreamOut* AudioHardwareGeneric::openOutputStream(
return mOutput;
}
-void AudioHardwareGeneric::closeOutputStream(AudioStreamOutGeneric* out) {
- if (out == mOutput) mOutput = 0;
+void AudioHardwareGeneric::closeOutputStream(AudioStreamOut* out) {
+ if (mOutput && out == mOutput) {
+ delete mOutput;
+ mOutput = 0;
+ }
}
AudioStreamIn* AudioHardwareGeneric::openInputStream(
- int inputSource, int format, int channelCount, uint32_t sampleRate,
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate,
status_t *status, AudioSystem::audio_in_acoustics acoustics)
{
// check for valid input source
- if ((inputSource < AudioRecord::DEFAULT_INPUT) ||
- (inputSource >= AudioRecord::NUM_INPUT_SOURCES)) {
+ if (!AudioSystem::isInputDevice((AudioSystem::audio_devices)devices)) {
return 0;
}
@@ -115,7 +117,7 @@ AudioStreamIn* AudioHardwareGeneric::openInputStream(
// create new output stream
AudioStreamInGeneric* in = new AudioStreamInGeneric();
- status_t lStatus = in->set(this, mFd, format, channelCount, sampleRate, acoustics);
+ status_t lStatus = in->set(this, mFd, devices, format, channels, sampleRate, acoustics);
if (status) {
*status = lStatus;
}
@@ -127,8 +129,11 @@ AudioStreamIn* AudioHardwareGeneric::openInputStream(
return mInput;
}
-void AudioHardwareGeneric::closeInputStream(AudioStreamInGeneric* in) {
- if (in == mInput) mInput = 0;
+void AudioHardwareGeneric::closeInputStream(AudioStreamIn* in) {
+ if (mInput && in == mInput) {
+ delete mInput;
+ mInput = 0;
+ }
}
status_t AudioHardwareGeneric::setVoiceVolume(float v)
@@ -185,30 +190,42 @@ status_t AudioHardwareGeneric::dump(int fd, const Vector<String16>& args)
status_t AudioStreamOutGeneric::set(
AudioHardwareGeneric *hw,
int fd,
- int format,
- int channels,
- uint32_t rate)
+ uint32_t devices,
+ int *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pRate)
{
+ int lFormat = pFormat ? *pFormat : 0;
+ uint32_t lChannels = pChannels ? *pChannels : 0;
+ uint32_t lRate = pRate ? *pRate : 0;
+
// fix up defaults
- if (format == 0) format = AudioSystem::PCM_16_BIT;
- if (channels == 0) channels = channelCount();
- if (rate == 0) rate = sampleRate();
+ if (lFormat == 0) lFormat = format();
+ if (lChannels == 0) lChannels = channels();
+ if (lRate == 0) lRate = sampleRate();
// check values
- if ((format != AudioSystem::PCM_16_BIT) ||
- (channels != channelCount()) ||
- (rate != sampleRate()))
+ if ((lFormat != format()) ||
+ (lChannels != channels()) ||
+ (lRate != sampleRate())) {
+ if (pFormat) *pFormat = format();
+ if (pChannels) *pChannels = channels();
+ if (pRate) *pRate = sampleRate();
return BAD_VALUE;
+ }
+
+ if (pFormat) *pFormat = lFormat;
+ if (pChannels) *pChannels = lChannels;
+ if (pRate) *pRate = lRate;
mAudioHardware = hw;
mFd = fd;
+ mDevice = devices;
return NO_ERROR;
}
AudioStreamOutGeneric::~AudioStreamOutGeneric()
{
- if (mAudioHardware)
- mAudioHardware->closeOutputStream(this);
}
ssize_t AudioStreamOutGeneric::write(const void* buffer, size_t bytes)
@@ -234,10 +251,12 @@ status_t AudioStreamOutGeneric::dump(int fd, const Vector<String16>& args)
result.append(buffer);
snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
result.append(buffer);
- snprintf(buffer, SIZE, "\tchannel count: %d\n", channelCount());
+ snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
result.append(buffer);
snprintf(buffer, SIZE, "\tformat: %d\n", format());
result.append(buffer);
+ snprintf(buffer, SIZE, "\tdevice: %d\n", mDevice);
+ result.append(buffer);
snprintf(buffer, SIZE, "\tmAudioHardware: %p\n", mAudioHardware);
result.append(buffer);
snprintf(buffer, SIZE, "\tmFd: %d\n", mFd);
@@ -246,29 +265,68 @@ status_t AudioStreamOutGeneric::dump(int fd, const Vector<String16>& args)
return NO_ERROR;
}
+status_t AudioStreamOutGeneric::setParameters(const String8& keyValuePairs)
+{
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 key = String8(AudioParameter::keyRouting);
+ status_t status = NO_ERROR;
+ int device;
+ LOGV("setParameters() %s", keyValuePairs.string());
+
+ if (param.getInt(key, device) == NO_ERROR) {
+ mDevice = device;
+ param.remove(key);
+ }
+
+ if (param.size()) {
+ status = BAD_VALUE;
+ }
+ return status;
+}
+
+String8 AudioStreamOutGeneric::getParameters(const String8& keys)
+{
+ AudioParameter param = AudioParameter(keys);
+ String8 value;
+ String8 key = String8(AudioParameter::keyRouting);
+
+ if (param.get(key, value) == NO_ERROR) {
+ param.addInt(key, (int)mDevice);
+ }
+
+ LOGV("getParameters() %s", param.toString().string());
+ return param.toString();
+}
+
// ----------------------------------------------------------------------------
// record functions
status_t AudioStreamInGeneric::set(
AudioHardwareGeneric *hw,
int fd,
- int format,
- int channels,
- uint32_t rate,
+ uint32_t devices,
+ int *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pRate,
AudioSystem::audio_in_acoustics acoustics)
{
// FIXME: remove logging
- LOGD("AudioStreamInGeneric::set(%p, %d, %d, %d, %u)", hw, fd, format, channels, rate);
+ if (pFormat == 0 || pChannels == 0 || pRate == 0) return BAD_VALUE;
+ LOGD("AudioStreamInGeneric::set(%p, %d, %d, %d, %u)", hw, fd, *pFormat, *pChannels, *pRate);
// check values
- if ((format != AudioSystem::PCM_16_BIT) ||
- (channels != channelCount()) ||
- (rate != sampleRate())) {
+ if ((*pFormat != format()) ||
+ (*pChannels != channels()) ||
+ (*pRate != sampleRate())) {
LOGE("Error opening input channel");
+ *pFormat = format();
+ *pChannels = channels();
+ *pRate = sampleRate();
return BAD_VALUE;
}
mAudioHardware = hw;
mFd = fd;
+ mDevice = devices;
return NO_ERROR;
}
@@ -276,14 +334,12 @@ AudioStreamInGeneric::~AudioStreamInGeneric()
{
// FIXME: remove logging
LOGD("AudioStreamInGeneric destructor");
- if (mAudioHardware)
- mAudioHardware->closeInputStream(this);
}
ssize_t AudioStreamInGeneric::read(void* buffer, ssize_t bytes)
{
// FIXME: remove logging
- LOGD("AudioStreamInGeneric::read(%p, %d) from fd %d", buffer, bytes, mFd);
+ LOGD("AudioStreamInGeneric::read(%p, %d) from fd %d", buffer, (int)bytes, mFd);
AutoMutex lock(mLock);
if (mFd < 0) {
LOGE("Attempt to read from unopened device");
@@ -303,10 +359,12 @@ status_t AudioStreamInGeneric::dump(int fd, const Vector<String16>& args)
result.append(buffer);
snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
result.append(buffer);
- snprintf(buffer, SIZE, "\tchannel count: %d\n", channelCount());
+ snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
result.append(buffer);
snprintf(buffer, SIZE, "\tformat: %d\n", format());
result.append(buffer);
+ snprintf(buffer, SIZE, "\tdevice: %d\n", mDevice);
+ result.append(buffer);
snprintf(buffer, SIZE, "\tmAudioHardware: %p\n", mAudioHardware);
result.append(buffer);
snprintf(buffer, SIZE, "\tmFd: %d\n", mFd);
@@ -315,6 +373,39 @@ status_t AudioStreamInGeneric::dump(int fd, const Vector<String16>& args)
return NO_ERROR;
}
+status_t AudioStreamInGeneric::setParameters(const String8& keyValuePairs)
+{
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 key = String8(AudioParameter::keyRouting);
+ status_t status = NO_ERROR;
+ int device;
+ LOGV("setParameters() %s", keyValuePairs.string());
+
+ if (param.getInt(key, device) == NO_ERROR) {
+ mDevice = device;
+ param.remove(key);
+ }
+
+ if (param.size()) {
+ status = BAD_VALUE;
+ }
+ return status;
+}
+
+String8 AudioStreamInGeneric::getParameters(const String8& keys)
+{
+ AudioParameter param = AudioParameter(keys);
+ String8 value;
+ String8 key = String8(AudioParameter::keyRouting);
+
+ if (param.get(key, value) == NO_ERROR) {
+ param.addInt(key, (int)mDevice);
+ }
+
+ LOGV("getParameters() %s", param.toString().string());
+ return param.toString();
+}
+
// ----------------------------------------------------------------------------
}; // namespace android
diff --git a/libs/audioflinger/AudioHardwareGeneric.h b/libs/audioflinger/AudioHardwareGeneric.h
index c89df87..42da413 100644
--- a/libs/audioflinger/AudioHardwareGeneric.h
+++ b/libs/audioflinger/AudioHardwareGeneric.h
@@ -39,24 +39,28 @@ public:
virtual status_t set(
AudioHardwareGeneric *hw,
int mFd,
- int format,
- int channelCount,
- uint32_t sampleRate);
+ uint32_t devices,
+ int *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pRate);
virtual uint32_t sampleRate() const { return 44100; }
virtual size_t bufferSize() const { return 4096; }
- virtual int channelCount() const { return 2; }
+ virtual uint32_t channels() const { return AudioSystem::CHANNEL_OUT_STEREO; }
virtual int format() const { return AudioSystem::PCM_16_BIT; }
virtual uint32_t latency() const { return 20; }
- virtual status_t setVolume(float volume) { return INVALID_OPERATION; }
+ virtual status_t setVolume(float left, float right) { return INVALID_OPERATION; }
virtual ssize_t write(const void* buffer, size_t bytes);
virtual status_t standby();
virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
private:
AudioHardwareGeneric *mAudioHardware;
Mutex mLock;
int mFd;
+ uint32_t mDevice;
};
class AudioStreamInGeneric : public AudioStreamIn {
@@ -67,24 +71,28 @@ public:
virtual status_t set(
AudioHardwareGeneric *hw,
int mFd,
- int format,
- int channelCount,
- uint32_t sampleRate,
+ uint32_t devices,
+ int *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pRate,
AudioSystem::audio_in_acoustics acoustics);
- uint32_t sampleRate() const { return 8000; }
+ virtual uint32_t sampleRate() const { return 8000; }
virtual size_t bufferSize() const { return 320; }
- virtual int channelCount() const { return 1; }
+ virtual uint32_t channels() const { return AudioSystem::CHANNEL_IN_MONO; }
virtual int format() const { return AudioSystem::PCM_16_BIT; }
virtual status_t setGain(float gain) { return INVALID_OPERATION; }
virtual ssize_t read(void* buffer, ssize_t bytes);
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t standby() { return NO_ERROR; }
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
private:
AudioHardwareGeneric *mAudioHardware;
Mutex mLock;
int mFd;
+ uint32_t mDevice;
};
@@ -101,28 +109,27 @@ public:
virtual status_t setMicMute(bool state);
virtual status_t getMicMute(bool* state);
- virtual status_t setParameter(const char* key, const char* value)
- { return NO_ERROR; }
-
// create I/O streams
virtual AudioStreamOut* openOutputStream(
- int format=0,
- int channelCount=0,
- uint32_t sampleRate=0,
+ uint32_t devices,
+ int *format=0,
+ uint32_t *channels=0,
+ uint32_t *sampleRate=0,
status_t *status=0);
+ virtual void closeOutputStream(AudioStreamOut* out);
virtual AudioStreamIn* openInputStream(
- int inputSource,
- int format,
- int channelCount,
- uint32_t sampleRate,
+ uint32_t devices,
+ int *format,
+ uint32_t *channels,
+ uint32_t *sampleRate,
status_t *status,
AudioSystem::audio_in_acoustics acoustics);
+ virtual void closeInputStream(AudioStreamIn* in);
void closeOutputStream(AudioStreamOutGeneric* out);
void closeInputStream(AudioStreamInGeneric* in);
protected:
- virtual status_t doRouting() { return NO_ERROR; }
virtual status_t dump(int fd, const Vector<String16>& args);
private:
diff --git a/libs/audioflinger/AudioHardwareInterface.cpp b/libs/audioflinger/AudioHardwareInterface.cpp
index cc1bd8f..37be329 100644
--- a/libs/audioflinger/AudioHardwareInterface.cpp
+++ b/libs/audioflinger/AudioHardwareInterface.cpp
@@ -18,6 +18,7 @@
#include <cutils/properties.h>
#include <string.h>
#include <unistd.h>
+//#define LOG_NDEBUG 0
#define LOG_TAG "AudioHardwareInterface"
#include <utils/Log.h>
@@ -25,15 +26,17 @@
#include "AudioHardwareStub.h"
#include "AudioHardwareGeneric.h"
+#ifdef WITH_A2DP
+#include "A2dpAudioInterface.h"
+#endif
-//#define DUMP_FLINGER_OUT // if defined allows recording samples in a file
-#ifdef DUMP_FLINGER_OUT
+#ifdef ENABLE_AUDIO_DUMP
#include "AudioDumpInterface.h"
#endif
// change to 1 to log routing calls
-#define LOG_ROUTING_CALLS 0
+#define LOG_ROUTING_CALLS 1
namespace android {
@@ -48,14 +51,6 @@ static const char* routingModeStrings[] =
"IN_CALL"
};
-static const char* routeStrings[] =
-{
- "EARPIECE ",
- "SPEAKER ",
- "BLUETOOTH ",
- "HEADSET ",
- "BLUETOOTH_A2DP "
-};
static const char* routeNone = "NONE";
static const char* displayMode(int mode)
@@ -64,22 +59,6 @@ static const char* displayMode(int mode)
return routingModeStrings[0];
return routingModeStrings[mode+3];
}
-
-static const char* displayRoutes(uint32_t routes)
-{
- static char routeStr[80];
- if (routes == 0)
- return routeNone;
- routeStr[0] = 0;
- int bitMask = 1;
- for (int i = 0; i < 4; ++i, bitMask <<= 1) {
- if (routes & bitMask) {
- strcat(routeStr, routeStrings[i]);
- }
- }
- routeStr[strlen(routeStr)-1] = 0;
- return routeStr;
-}
#endif
// ----------------------------------------------------------------------------
@@ -112,13 +91,17 @@ AudioHardwareInterface* AudioHardwareInterface::create()
hw = new AudioHardwareStub();
}
-#ifdef DUMP_FLINGER_OUT
+#ifdef WITH_A2DP
+ hw = new A2dpAudioInterface(hw);
+#endif
+
+#ifdef ENABLE_AUDIO_DUMP
// This code adds a record of buffers in a file to write calls made by AudioFlinger.
// It replaces the current AudioHardwareInterface object by an intermediate one which
// will record buffers in a file (after sending them to hardware) for testing purpose.
- // This feature is enabled by defining symbol DUMP_FLINGER_OUT.
- // The output file is FLINGER_DUMP_NAME. Pause are not recorded in the file.
-
+ // This feature is enabled by defining symbol ENABLE_AUDIO_DUMP.
+ // The output file is set with setParameters("test_cmd_file_name=<name>"). Pause are not recorded in the file.
+ LOGV("opening PCM dump interface");
hw = new AudioDumpInterface(hw); // replace interface
#endif
return hw;
@@ -132,48 +115,9 @@ AudioStreamIn::~AudioStreamIn() {}
AudioHardwareBase::AudioHardwareBase()
{
- // force a routing update on initialization
- memset(&mRoutes, 0, sizeof(mRoutes));
mMode = 0;
}
-// generics for audio routing - the real work is done in doRouting
-status_t AudioHardwareBase::setRouting(int mode, uint32_t routes)
-{
-#if LOG_ROUTING_CALLS
- LOGD("setRouting: mode=%s, routes=[%s]", displayMode(mode), displayRoutes(routes));
-#endif
- if (mode == AudioSystem::MODE_CURRENT)
- mode = mMode;
- if ((mode < 0) || (mode >= AudioSystem::NUM_MODES))
- return BAD_VALUE;
- uint32_t old = mRoutes[mode];
- mRoutes[mode] = routes;
- if ((mode != mMode) || (old == routes))
- return NO_ERROR;
-#if LOG_ROUTING_CALLS
- const char* oldRouteStr = strdup(displayRoutes(old));
- LOGD("doRouting: mode=%s, old route=[%s], new route=[%s]",
- displayMode(mode), oldRouteStr, displayRoutes(routes));
- delete oldRouteStr;
-#endif
- return doRouting();
-}
-
-status_t AudioHardwareBase::getRouting(int mode, uint32_t* routes)
-{
- if (mode == AudioSystem::MODE_CURRENT)
- mode = mMode;
- if ((mode < 0) || (mode >= AudioSystem::NUM_MODES))
- return BAD_VALUE;
- *routes = mRoutes[mode];
-#if LOG_ROUTING_CALLS
- LOGD("getRouting: mode=%s, routes=[%s]",
- displayMode(mode), displayRoutes(*routes));
-#endif
- return NO_ERROR;
-}
-
status_t AudioHardwareBase::setMode(int mode)
{
#if LOG_ROUTING_CALLS
@@ -182,29 +126,24 @@ status_t AudioHardwareBase::setMode(int mode)
if ((mode < 0) || (mode >= AudioSystem::NUM_MODES))
return BAD_VALUE;
if (mMode == mode)
- return NO_ERROR;
-#if LOG_ROUTING_CALLS
- LOGD("doRouting: old mode=%s, new mode=%s route=[%s]",
- displayMode(mMode), displayMode(mode), displayRoutes(mRoutes[mode]));
-#endif
+ return ALREADY_EXISTS;
mMode = mode;
- return doRouting();
+ return NO_ERROR;
}
-status_t AudioHardwareBase::getMode(int* mode)
+// default implementation
+status_t AudioHardwareBase::setParameters(const String8& keyValuePairs)
{
- // Implement: set audio routing
- *mode = mMode;
return NO_ERROR;
}
-status_t AudioHardwareBase::setParameter(const char* key, const char* value)
+// default implementation
+String8 AudioHardwareBase::getParameters(const String8& keys)
{
- // default implementation is to ignore
- return NO_ERROR;
+ String8 result = String8("");
+ return result;
}
-
// default implementation
size_t AudioHardwareBase::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
{
@@ -233,10 +172,6 @@ status_t AudioHardwareBase::dumpState(int fd, const Vector<String16>& args)
result.append(buffer);
snprintf(buffer, SIZE, "\tmMode: %d\n", mMode);
result.append(buffer);
- for (int i = 0, n = AudioSystem::NUM_MODES; i < n; ++i) {
- snprintf(buffer, SIZE, "\tmRoutes[%d]: %d\n", i, mRoutes[i]);
- result.append(buffer);
- }
::write(fd, result.string(), result.size());
dump(fd, args); // Dump the state of the concrete child.
return NO_ERROR;
diff --git a/libs/audioflinger/AudioHardwareStub.cpp b/libs/audioflinger/AudioHardwareStub.cpp
index 0ab4c60..1a03059 100644
--- a/libs/audioflinger/AudioHardwareStub.cpp
+++ b/libs/audioflinger/AudioHardwareStub.cpp
@@ -43,10 +43,10 @@ status_t AudioHardwareStub::initCheck()
}
AudioStreamOut* AudioHardwareStub::openOutputStream(
- int format, int channelCount, uint32_t sampleRate, status_t *status)
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
{
AudioStreamOutStub* out = new AudioStreamOutStub();
- status_t lStatus = out->set(format, channelCount, sampleRate);
+ status_t lStatus = out->set(format, channels, sampleRate);
if (status) {
*status = lStatus;
}
@@ -56,18 +56,22 @@ AudioStreamOut* AudioHardwareStub::openOutputStream(
return 0;
}
+void AudioHardwareStub::closeOutputStream(AudioStreamOut* out)
+{
+ delete out;
+}
+
AudioStreamIn* AudioHardwareStub::openInputStream(
- int inputSource, int format, int channelCount, uint32_t sampleRate,
+ uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate,
status_t *status, AudioSystem::audio_in_acoustics acoustics)
{
// check for valid input source
- if ((inputSource < AudioRecord::DEFAULT_INPUT) ||
- (inputSource >= AudioRecord::NUM_INPUT_SOURCES)) {
+ if (!AudioSystem::isInputDevice((AudioSystem::audio_devices)devices)) {
return 0;
}
AudioStreamInStub* in = new AudioStreamInStub();
- status_t lStatus = in->set(format, channelCount, sampleRate, acoustics);
+ status_t lStatus = in->set(format, channels, sampleRate, acoustics);
if (status) {
*status = lStatus;
}
@@ -77,6 +81,11 @@ AudioStreamIn* AudioHardwareStub::openInputStream(
return 0;
}
+void AudioHardwareStub::closeInputStream(AudioStreamIn* in)
+{
+ delete in;
+}
+
status_t AudioHardwareStub::setVoiceVolume(float volume)
{
return NO_ERROR;
@@ -107,24 +116,19 @@ status_t AudioHardwareStub::dump(int fd, const Vector<String16>& args)
// ----------------------------------------------------------------------------
-status_t AudioStreamOutStub::set(int format, int channels, uint32_t rate)
+status_t AudioStreamOutStub::set(int *pFormat, uint32_t *pChannels, uint32_t *pRate)
{
- // fix up defaults
- if (format == 0) format = AudioSystem::PCM_16_BIT;
- if (channels == 0) channels = channelCount();
- if (rate == 0) rate = sampleRate();
+ if (pFormat) *pFormat = format();
+ if (pChannels) *pChannels = channels();
+ if (pRate) *pRate = sampleRate();
- if ((format == AudioSystem::PCM_16_BIT) &&
- (channels == channelCount()) &&
- (rate == sampleRate()))
- return NO_ERROR;
- return BAD_VALUE;
+ return NO_ERROR;
}
ssize_t AudioStreamOutStub::write(const void* buffer, size_t bytes)
{
// fake timing for audio output
- usleep(bytes * 1000000 / sizeof(int16_t) / channelCount() / sampleRate());
+ usleep(bytes * 1000000 / sizeof(int16_t) / AudioSystem::popCount(channels()) / sampleRate());
return bytes;
}
@@ -141,7 +145,7 @@ status_t AudioStreamOutStub::dump(int fd, const Vector<String16>& args)
snprintf(buffer, SIZE, "AudioStreamOutStub::dump\n");
snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate());
snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
- snprintf(buffer, SIZE, "\tchannel count: %d\n", channelCount());
+ snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
snprintf(buffer, SIZE, "\tformat: %d\n", format());
result.append(buffer);
::write(fd, result.string(), result.size());
@@ -150,20 +154,16 @@ status_t AudioStreamOutStub::dump(int fd, const Vector<String16>& args)
// ----------------------------------------------------------------------------
-status_t AudioStreamInStub::set(int format, int channels, uint32_t rate,
+status_t AudioStreamInStub::set(int *pFormat, uint32_t *pChannels, uint32_t *pRate,
AudioSystem::audio_in_acoustics acoustics)
{
- if ((format == AudioSystem::PCM_16_BIT) &&
- (channels == channelCount()) &&
- (rate == sampleRate()))
- return NO_ERROR;
- return BAD_VALUE;
+ return NO_ERROR;
}
ssize_t AudioStreamInStub::read(void* buffer, ssize_t bytes)
{
// fake timing for audio input
- usleep(bytes * 1000000 / sizeof(int16_t) / channelCount() / sampleRate());
+ usleep(bytes * 1000000 / sizeof(int16_t) / AudioSystem::popCount(channels()) / sampleRate());
memset(buffer, 0, bytes);
return bytes;
}
@@ -179,7 +179,7 @@ status_t AudioStreamInStub::dump(int fd, const Vector<String16>& args)
result.append(buffer);
snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
result.append(buffer);
- snprintf(buffer, SIZE, "\tchannel count: %d\n", channelCount());
+ snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
result.append(buffer);
snprintf(buffer, SIZE, "\tformat: %d\n", format());
result.append(buffer);
diff --git a/libs/audioflinger/AudioHardwareStub.h b/libs/audioflinger/AudioHardwareStub.h
index bf63cc5..8f43259 100644
--- a/libs/audioflinger/AudioHardwareStub.h
+++ b/libs/audioflinger/AudioHardwareStub.h
@@ -29,29 +29,33 @@ namespace android {
class AudioStreamOutStub : public AudioStreamOut {
public:
- virtual status_t set(int format, int channelCount, uint32_t sampleRate);
+ virtual status_t set(int *pFormat, uint32_t *pChannels, uint32_t *pRate);
virtual uint32_t sampleRate() const { return 44100; }
virtual size_t bufferSize() const { return 4096; }
- virtual int channelCount() const { return 2; }
+ virtual uint32_t channels() const { return AudioSystem::CHANNEL_OUT_STEREO; }
virtual int format() const { return AudioSystem::PCM_16_BIT; }
virtual uint32_t latency() const { return 0; }
- virtual status_t setVolume(float volume) { return NO_ERROR; }
+ virtual status_t setVolume(float left, float right) { return NO_ERROR; }
virtual ssize_t write(const void* buffer, size_t bytes);
virtual status_t standby();
virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR;}
+ virtual String8 getParameters(const String8& keys) {String8 result = String8(""); return result;}
};
class AudioStreamInStub : public AudioStreamIn {
public:
- virtual status_t set(int format, int channelCount, uint32_t sampleRate, AudioSystem::audio_in_acoustics acoustics);
+ virtual status_t set(int *pFormat, uint32_t *pChannels, uint32_t *pRate, AudioSystem::audio_in_acoustics acoustics);
virtual uint32_t sampleRate() const { return 8000; }
virtual size_t bufferSize() const { return 320; }
- virtual int channelCount() const { return 1; }
+ virtual uint32_t channels() const { return AudioSystem::CHANNEL_IN_MONO; }
virtual int format() const { return AudioSystem::PCM_16_BIT; }
virtual status_t setGain(float gain) { return NO_ERROR; }
virtual ssize_t read(void* buffer, ssize_t bytes);
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t standby() { return NO_ERROR; }
+ virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR;}
+ virtual String8 getParameters(const String8& keys) {String8 result = String8(""); return result;}
};
class AudioHardwareStub : public AudioHardwareBase
@@ -67,26 +71,25 @@ public:
virtual status_t setMicMute(bool state) { mMicMute = state; return NO_ERROR; }
virtual status_t getMicMute(bool* state) { *state = mMicMute ; return NO_ERROR; }
- virtual status_t setParameter(const char* key, const char* value)
- { return NO_ERROR; }
-
// create I/O streams
virtual AudioStreamOut* openOutputStream(
- int format=0,
- int channelCount=0,
- uint32_t sampleRate=0,
+ uint32_t devices,
+ int *format=0,
+ uint32_t *channels=0,
+ uint32_t *sampleRate=0,
status_t *status=0);
+ virtual void closeOutputStream(AudioStreamOut* out);
virtual AudioStreamIn* openInputStream(
- int inputSource,
- int format,
- int channelCount,
- uint32_t sampleRate,
+ uint32_t devices,
+ int *format,
+ uint32_t *channels,
+ uint32_t *sampleRate,
status_t *status,
- AudioSystem::audio_in_acoustics acoustics);
+ AudioSystem::audio_in_acoustics acoustics);
+ virtual void closeInputStream(AudioStreamIn* in);
protected:
- virtual status_t doRouting() { return NO_ERROR; }
virtual status_t dump(int fd, const Vector<String16>& args);
bool mMicMute;
diff --git a/libs/audioflinger/AudioMixer.cpp b/libs/audioflinger/AudioMixer.cpp
index b02efcc..19a442a 100644
--- a/libs/audioflinger/AudioMixer.cpp
+++ b/libs/audioflinger/AudioMixer.cpp
@@ -610,7 +610,6 @@ void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount,
t->in = in;
}
-inline
void AudioMixer::ditherAndClamp(int32_t* out, int32_t const *sums, size_t c)
{
for (size_t i=0 ; i<c ; i++) {
diff --git a/libs/audioflinger/AudioMixer.h b/libs/audioflinger/AudioMixer.h
index 72ca28a..15766cd 100644
--- a/libs/audioflinger/AudioMixer.h
+++ b/libs/audioflinger/AudioMixer.h
@@ -85,6 +85,8 @@ public:
uint32_t trackNames() const { return mTrackNames; }
+ static void ditherAndClamp(int32_t* out, int32_t const *sums, size_t c);
+
private:
enum {
@@ -176,7 +178,6 @@ private:
static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp);
static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp);
static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp);
- static void ditherAndClamp(int32_t* out, int32_t const *sums, size_t c);
static void process__validate(state_t* state, void* output);
static void process__nop(state_t* state, void* output);
diff --git a/libs/audioflinger/AudioPolicyManagerGeneric.cpp b/libs/audioflinger/AudioPolicyManagerGeneric.cpp
new file mode 100644
index 0000000..cf9ab88
--- /dev/null
+++ b/libs/audioflinger/AudioPolicyManagerGeneric.cpp
@@ -0,0 +1,764 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyManagerGeneric"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include "AudioPolicyManagerGeneric.h"
+#include <media/mediarecorder.h>
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+// AudioPolicyInterface implementation
+// ----------------------------------------------------------------------------
+
+
+status_t AudioPolicyManagerGeneric::setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address)
+{
+
+ LOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address);
+
+ // connect/disconnect only 1 device at a time
+ if (AudioSystem::popCount(device) != 1) return BAD_VALUE;
+
+ if (strlen(device_address) >= MAX_DEVICE_ADDRESS_LEN) {
+ LOGE("setDeviceConnectionState() invalid address: %s", device_address);
+ return BAD_VALUE;
+ }
+
+ // handle output devices
+ if (AudioSystem::isOutputDevice(device)) {
+ switch (state)
+ {
+ // handle output device connection
+ case AudioSystem::DEVICE_STATE_AVAILABLE:
+ if (mAvailableOutputDevices & device) {
+ LOGW("setDeviceConnectionState() device already connected: %x", device);
+ return INVALID_OPERATION;
+ }
+ LOGV("setDeviceConnectionState() connecting device %x", device);
+
+ // register new device as available
+ mAvailableOutputDevices |= device;
+ break;
+ // handle output device disconnection
+ case AudioSystem::DEVICE_STATE_UNAVAILABLE:
+ if (!(mAvailableOutputDevices & device)) {
+ LOGW("setDeviceConnectionState() device not connected: %x", device);
+ return INVALID_OPERATION;
+ }
+ LOGV("setDeviceConnectionState() disconnecting device %x", device);
+ // remove device from available output devices
+ mAvailableOutputDevices &= ~device;
+ break;
+
+ default:
+ LOGE("setDeviceConnectionState() invalid state: %x", state);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+ }
+ // handle input devices
+ if (AudioSystem::isInputDevice(device)) {
+ switch (state)
+ {
+ // handle input device connection
+ case AudioSystem::DEVICE_STATE_AVAILABLE:
+ if (mAvailableInputDevices & device) {
+ LOGW("setDeviceConnectionState() device already connected: %d", device);
+ return INVALID_OPERATION;
+ }
+ mAvailableInputDevices |= device;
+ break;
+
+ // handle input device disconnection
+ case AudioSystem::DEVICE_STATE_UNAVAILABLE:
+ if (!(mAvailableInputDevices & device)) {
+ LOGW("setDeviceConnectionState() device not connected: %d", device);
+ return INVALID_OPERATION;
+ }
+ mAvailableInputDevices &= ~device;
+ break;
+
+ default:
+ LOGE("setDeviceConnectionState() invalid state: %x", state);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+ }
+
+ LOGW("setDeviceConnectionState() invalid device: %x", device);
+ return BAD_VALUE;
+}
+
+AudioSystem::device_connection_state AudioPolicyManagerGeneric::getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address)
+{
+ AudioSystem::device_connection_state state = AudioSystem::DEVICE_STATE_UNAVAILABLE;
+ String8 address = String8(device_address);
+ if (AudioSystem::isOutputDevice(device)) {
+ if (device & mAvailableOutputDevices) {
+ state = AudioSystem::DEVICE_STATE_AVAILABLE;
+ }
+ } else if (AudioSystem::isInputDevice(device)) {
+ if (device & mAvailableInputDevices) {
+ state = AudioSystem::DEVICE_STATE_AVAILABLE;
+ }
+ }
+
+ return state;
+}
+
+void AudioPolicyManagerGeneric::setPhoneState(int state)
+{
+ LOGV("setPhoneState() state %d", state);
+ uint32_t newDevice = 0;
+ if (state < 0 || state >= AudioSystem::NUM_MODES) {
+ LOGW("setPhoneState() invalid state %d", state);
+ return;
+ }
+
+ if (state == mPhoneState ) {
+ LOGW("setPhoneState() setting same state %d", state);
+ return;
+ }
+ // store previous phone state for management of sonification strategy below
+ int oldState = mPhoneState;
+ mPhoneState = state;
+
+ // if leaving or entering in call state, handle special case of active streams
+ // pertaining to sonification strategy see handleIncallSonification()
+ if (state == AudioSystem::MODE_IN_CALL ||
+ oldState == AudioSystem::MODE_IN_CALL) {
+ bool starting = (state == AudioSystem::MODE_IN_CALL) ? true : false;
+ LOGV("setPhoneState() in call state management: new state is %d", state);
+ for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ handleIncallSonification(stream, starting);
+ }
+ }
+}
+
+void AudioPolicyManagerGeneric::setRingerMode(uint32_t mode, uint32_t mask)
+{
+ LOGV("setRingerMode() mode %x, mask %x", mode, mask);
+
+ mRingerMode = mode;
+}
+
+void AudioPolicyManagerGeneric::setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+{
+ LOGV("setForceUse) usage %d, config %d, mPhoneState %d", usage, config, mPhoneState);
+ mForceUse[usage] = config;
+}
+
+AudioSystem::forced_config AudioPolicyManagerGeneric::getForceUse(AudioSystem::force_use usage)
+{
+ return mForceUse[usage];
+}
+
+void AudioPolicyManagerGeneric::setSystemProperty(const char* property, const char* value)
+{
+ LOGV("setSystemProperty() property %s, value %s", property, value);
+ if (strcmp(property, "ro.camera.sound.forced") == 0) {
+ if (atoi(value)) {
+ LOGV("ENFORCED_AUDIBLE cannot be muted");
+ mStreams[AudioSystem::ENFORCED_AUDIBLE].mCanBeMuted = false;
+ } else {
+ LOGV("ENFORCED_AUDIBLE can be muted");
+ mStreams[AudioSystem::ENFORCED_AUDIBLE].mCanBeMuted = true;
+ }
+ }
+}
+
+audio_io_handle_t AudioPolicyManagerGeneric::getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags)
+{
+ LOGV("getOutput() stream %d, samplingRate %d, format %d, channels %x, flags %x", stream, samplingRate, format, channels, flags);
+
+#ifdef AUDIO_POLICY_TEST
+ if (mCurOutput != 0) {
+ LOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelcount %d, mDirectOutput %d",
+ mCurOutput, mTestSamplingRate, mTestFormat, mTestChannelcount, mDirectOutput);
+
+ if (mTestOutputs[mCurOutput] == 0) {
+ LOGV("getOutput() opening test output");
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = mTestDevice;
+ outputDesc->mSamplingRate = mTestSamplingRate;
+ outputDesc->mFormat = mTestFormat;
+ outputDesc->mChannels = (mTestChannelcount == 1) ? AudioSystem::CHANNEL_OUT_MONO : AudioSystem::CHANNEL_OUT_STEREO;
+ outputDesc->mLatency = mTestLatencyMs;
+ outputDesc->mFlags = (AudioSystem::output_flags)(mDirectOutput ? AudioSystem::OUTPUT_FLAG_DIRECT : 0);
+ outputDesc->mRefCount[stream] = 0;
+ mTestOutputs[mCurOutput] = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+ mOutputs.add(mTestOutputs[mCurOutput], outputDesc);
+ }
+ return mTestOutputs[mCurOutput];
+ }
+#endif //AUDIO_POLICY_TEST
+ if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
+ (format != 0 && !AudioSystem::isLinearPCM(format)) ||
+ (channels != 0 && channels != AudioSystem::CHANNEL_OUT_MONO && channels != AudioSystem::CHANNEL_OUT_STEREO)) {
+ return NULL;
+ }
+
+ return mHardwareOutput;
+}
+
+status_t AudioPolicyManagerGeneric::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ LOGV("startOutput() output %p, stream %d", output, stream);
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("startOutput() unknow output %p", output);
+ return BAD_VALUE;
+ }
+
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+
+ // handle special case for sonification while in call
+ if (mPhoneState == AudioSystem::MODE_IN_CALL) {
+ handleIncallSonification(stream, true);
+ }
+
+ // incremenent usage count for this stream on the requested output:
+ outputDesc->changeRefCount(stream, 1);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerGeneric::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ LOGV("stopOutput() output %p, stream %d", output, stream);
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("stopOutput() unknow output %p", output);
+ return BAD_VALUE;
+ }
+
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+
+ // handle special case for sonification while in call
+ if (mPhoneState == AudioSystem::MODE_IN_CALL) {
+ handleIncallSonification(stream, false);
+ }
+
+ if (outputDesc->isUsedByStream(stream)) {
+ // decrement usage count of this stream on the output
+ outputDesc->changeRefCount(stream, -1);
+ return NO_ERROR;
+ } else {
+ LOGW("stopOutput() refcount is already 0 for output %p", output);
+ return INVALID_OPERATION;
+ }
+}
+
+void AudioPolicyManagerGeneric::releaseOutput(audio_io_handle_t output)
+{
+ LOGV("releaseOutput() %p", output);
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("releaseOutput() releasing unknown output %p", output);
+ return;
+ }
+
+#ifdef AUDIO_POLICY_TEST
+ int testIndex = testOutputIndex(output);
+ if (testIndex != 0) {
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
+ if (outputDesc->refCount() == 0) {
+ mpClientInterface->closeOutput(output);
+ delete mOutputs.valueAt(index);
+ mOutputs.removeItem(output);
+ mTestOutputs[testIndex] = 0;
+ }
+ }
+#endif //AUDIO_POLICY_TEST
+}
+
+audio_io_handle_t AudioPolicyManagerGeneric::getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics)
+{
+ audio_io_handle_t input = 0;
+ uint32_t device;
+
+ LOGV("getInput() inputSource %d, samplingRate %d, format %d, channels %x, acoustics %x", inputSource, samplingRate, format, channels, acoustics);
+
+ AudioInputDescriptor *inputDesc = new AudioInputDescriptor();
+ inputDesc->mDevice = AudioSystem::DEVICE_IN_BUILTIN_MIC;
+ inputDesc->mSamplingRate = samplingRate;
+ inputDesc->mFormat = format;
+ inputDesc->mChannels = channels;
+ inputDesc->mAcoustics = acoustics;
+ inputDesc->mRefCount = 0;
+ input = mpClientInterface->openInput(&inputDesc->mDevice,
+ &inputDesc->mSamplingRate,
+ &inputDesc->mFormat,
+ &inputDesc->mChannels,
+ inputDesc->mAcoustics);
+
+ // only accept input with the exact requested set of parameters
+ if ((samplingRate != inputDesc->mSamplingRate) ||
+ (format != inputDesc->mFormat) ||
+ (channels != inputDesc->mChannels)) {
+ LOGV("getOutput() failed opening input: samplingRate %d, format %d, channels %d",
+ samplingRate, format, channels);
+ mpClientInterface->closeInput(input);
+ delete inputDesc;
+ return NULL;
+ }
+ mInputs.add(input, inputDesc);
+ return input;
+}
+
+status_t AudioPolicyManagerGeneric::startInput(audio_io_handle_t input)
+{
+ LOGV("startInput() input %p", input);
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ LOGW("startInput() unknow input %p", input);
+ return BAD_VALUE;
+ }
+ AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
+
+#ifdef AUDIO_POLICY_TEST
+ if (mTestInput == 0)
+#endif //AUDIO_POLICY_TEST
+ {
+ // refuse 2 active AudioRecord clients at the same time
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ if (mInputs.valueAt(i)->mRefCount > 0) {
+ LOGW("startInput() input %p, other input %p already started", input, mInputs.keyAt(i));
+ return INVALID_OPERATION;
+ }
+ }
+ }
+
+ inputDesc->mRefCount = 1;
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerGeneric::stopInput(audio_io_handle_t input)
+{
+ LOGV("stopInput() input %p", input);
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ LOGW("stopInput() unknow input %p", input);
+ return BAD_VALUE;
+ }
+ AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
+
+ if (inputDesc->mRefCount == 0) {
+ LOGW("stopInput() input %p already stopped", input);
+ return INVALID_OPERATION;
+ } else {
+ inputDesc->mRefCount = 0;
+ return NO_ERROR;
+ }
+}
+
+void AudioPolicyManagerGeneric::releaseInput(audio_io_handle_t input)
+{
+ LOGV("releaseInput() %p", input);
+ ssize_t index = mInputs.indexOfKey(input);
+ if (index < 0) {
+ LOGW("releaseInput() releasing unknown input %p", input);
+ return;
+ }
+ mpClientInterface->closeInput(input);
+ delete mInputs.valueAt(index);
+ mInputs.removeItem(input);
+}
+
+
+
+void AudioPolicyManagerGeneric::initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax)
+{
+ LOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+ mStreams[stream].mIndexMin = indexMin;
+ mStreams[stream].mIndexMax = indexMax;
+}
+
+status_t AudioPolicyManagerGeneric::setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+{
+
+ if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) {
+ return BAD_VALUE;
+ }
+
+ LOGV("setStreamVolumeIndex() stream %d, index %d", stream, index);
+ mStreams[stream].mIndexCur = index;
+
+ // do not change actual stream volume if the stream is muted
+ if (mStreams[stream].mMuteCount != 0) {
+ return NO_ERROR;
+ }
+
+ // Do not changed in call volume if bluetooth is connected and vice versa
+ if ((stream == AudioSystem::VOICE_CALL && mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
+ (stream == AudioSystem::BLUETOOTH_SCO && mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO)) {
+ LOGV("setStreamVolumeIndex() cannot set stream %d volume with force use = %d for comm",
+ stream, mForceUse[AudioSystem::FOR_COMMUNICATION]);
+ return INVALID_OPERATION;
+ }
+
+ // compute and apply stream volume on all outputs according to connected device
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
+ uint32_t device = outputDesc->device();
+
+ float volume = computeVolume((int)stream, index, device);
+
+ LOGV("setStreamVolume() for output %p stream %d, volume %f", mOutputs.keyAt(i), stream, volume);
+ mpClientInterface->setStreamVolume(stream, volume, mOutputs.keyAt(i));
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerGeneric::getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+{
+ if (index == 0) {
+ return BAD_VALUE;
+ }
+ LOGV("getStreamVolumeIndex() stream %d", stream);
+ *index = mStreams[stream].mIndexCur;
+ return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManagerGeneric
+// ----------------------------------------------------------------------------
+
+// --- class factory
+
+
+extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
+{
+ return new AudioPolicyManagerGeneric(clientInterface);
+}
+
+extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
+{
+ delete interface;
+}
+
+AudioPolicyManagerGeneric::AudioPolicyManagerGeneric(AudioPolicyClientInterface *clientInterface)
+ :
+#ifdef AUDIO_POLICY_TEST
+ Thread(false),
+#endif //AUDIO_POLICY_TEST
+ mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0)
+{
+ mpClientInterface = clientInterface;
+
+ for (int i = 0; i < AudioSystem::NUM_FORCE_USE; i++) {
+ mForceUse[i] = AudioSystem::FORCE_NONE;
+ }
+
+ // devices available by default are speaker, ear piece and microphone
+ mAvailableOutputDevices = AudioSystem::DEVICE_OUT_SPEAKER;
+ mAvailableInputDevices = AudioSystem::DEVICE_IN_BUILTIN_MIC;
+
+ // open hardware output
+ AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
+ outputDesc->mDevice = (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER;
+ mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
+ &outputDesc->mSamplingRate,
+ &outputDesc->mFormat,
+ &outputDesc->mChannels,
+ &outputDesc->mLatency,
+ outputDesc->mFlags);
+
+ if (mHardwareOutput == 0) {
+ LOGE("Failed to initialize hardware output stream, samplingRate: %d, format %d, channels %d",
+ outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannels);
+ } else {
+ mOutputs.add(mHardwareOutput, outputDesc);
+ }
+
+#ifdef AUDIO_POLICY_TEST
+ mTestDevice = AudioSystem::DEVICE_OUT_SPEAKER;
+ mTestSamplingRate = 44100;
+ mTestFormat = AudioSystem::PCM_16_BIT;
+ mTestChannelcount = 2;
+ mTestLatencyMs = 0;
+ mCurOutput = 0;
+ mDirectOutput = false;
+ for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+ mTestOutputs[i] = 0;
+ }
+
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ snprintf(buffer, SIZE, "AudioPolicyManagerTest");
+ run(buffer, ANDROID_PRIORITY_AUDIO);
+#endif //AUDIO_POLICY_TEST
+}
+
+AudioPolicyManagerGeneric::~AudioPolicyManagerGeneric()
+{
+#ifdef AUDIO_POLICY_TEST
+ exit();
+#endif //AUDIO_POLICY_TEST
+
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ mpClientInterface->closeOutput(mOutputs.keyAt(i));
+ delete mOutputs.valueAt(i);
+ }
+ mOutputs.clear();
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ mpClientInterface->closeInput(mInputs.keyAt(i));
+ delete mInputs.valueAt(i);
+ }
+ mInputs.clear();
+}
+
+#ifdef AUDIO_POLICY_TEST
+bool AudioPolicyManagerGeneric::threadLoop()
+{
+ LOGV("entering threadLoop()");
+ while (!exitPending())
+ {
+ Mutex::Autolock _l(mLock);
+ mWaitWorkCV.waitRelative(mLock, milliseconds(50));
+ String8 command;
+ command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
+ if (command != "") {
+ LOGV("Test command %s received", command.string());
+ AudioParameter param = AudioParameter(command);
+ int valueInt;
+ String8 value;
+ if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_output"));
+ mCurOutput = valueInt;
+ }
+ if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_direct"));
+ if (value == "false") {
+ mDirectOutput = false;
+ } else if (value == "true") {
+ mDirectOutput = true;
+ }
+ }
+ if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_input"));
+ mTestInput = valueInt;
+ }
+
+ if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_format"));
+ if (value == "PCM 16 bits") {
+ mTestFormat = AudioSystem::PCM_16_BIT;
+ } else if (value == "PCM 8 bits") {
+ mTestFormat = AudioSystem::PCM_8_BIT;
+ } else if (value == "Compressed MP3") {
+ mTestFormat = AudioSystem::MP3;
+ }
+ }
+ if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_channels"));
+ if (value == "Channels Stereo") {
+ mTestChannelcount = 2;
+ } else if (value == "Channels Mono") {
+ mTestChannelcount = 1;
+ }
+ }
+ if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
+ param.remove(String8("test_cmd_policy_sampleRate"));
+ if (valueInt >= 0 && valueInt <= 96000) {
+ mTestSamplingRate = valueInt;
+ }
+ }
+ mpClientInterface->setParameters(0, String8("test_cmd_policy="));
+ }
+ }
+ return false;
+}
+
+void AudioPolicyManagerGeneric::exit()
+{
+ {
+ AutoMutex _l(mLock);
+ requestExit();
+ mWaitWorkCV.signal();
+ }
+ requestExitAndWait();
+}
+
+int AudioPolicyManagerGeneric::testOutputIndex(audio_io_handle_t output)
+{
+ for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+ if (output == mTestOutputs[i]) return i;
+ }
+ return 0;
+}
+#endif //AUDIO_POLICY_TEST
+
+// ---
+
+AudioPolicyManagerGeneric::routing_strategy AudioPolicyManagerGeneric::getStrategy(AudioSystem::stream_type stream)
+{
+ // stream to strategy mapping
+ switch (stream) {
+ case AudioSystem::VOICE_CALL:
+ case AudioSystem::BLUETOOTH_SCO:
+ return STRATEGY_PHONE;
+ case AudioSystem::RING:
+ case AudioSystem::NOTIFICATION:
+ case AudioSystem::ALARM:
+ case AudioSystem::ENFORCED_AUDIBLE:
+ return STRATEGY_SONIFICATION;
+ case AudioSystem::DTMF:
+ return STRATEGY_DTMF;
+ default:
+ LOGE("unknown stream type");
+ case AudioSystem::SYSTEM:
+ // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
+ // while key clicks are played produces a poor result
+ case AudioSystem::TTS:
+ case AudioSystem::MUSIC:
+ return STRATEGY_MEDIA;
+ }
+}
+
+
+float AudioPolicyManagerGeneric::computeVolume(int stream, int index, uint32_t device)
+{
+ float volume = 1.0;
+
+ StreamDescriptor &streamDesc = mStreams[stream];
+
+ // Force max volume if stream cannot be muted
+ if (!streamDesc.mCanBeMuted) index = streamDesc.mIndexMax;
+
+ int volInt = (100 * (index - streamDesc.mIndexMin)) / (streamDesc.mIndexMax - streamDesc.mIndexMin);
+ volume = AudioSystem::linearToLog(volInt);
+
+ return volume;
+}
+
+void AudioPolicyManagerGeneric::setStreamMute(int stream, bool on, audio_io_handle_t output)
+{
+ LOGV("setStreamMute() stream %d, mute %d, output %p", stream, on, output);
+
+ StreamDescriptor &streamDesc = mStreams[stream];
+
+ if (on) {
+ if (streamDesc.mMuteCount++ == 0) {
+ if (streamDesc.mCanBeMuted) {
+ mpClientInterface->setStreamVolume((AudioSystem::stream_type)stream, 0, output);
+ }
+ }
+ } else {
+ if (streamDesc.mMuteCount == 0) {
+ LOGW("setStreamMute() unmuting non muted stream!");
+ return;
+ }
+ if (--streamDesc.mMuteCount == 0) {
+ uint32_t device = mOutputs.valueFor(output)->mDevice;
+ float volume = computeVolume(stream, streamDesc.mIndexCur, device);
+ mpClientInterface->setStreamVolume((AudioSystem::stream_type)stream, volume, output);
+ }
+ }
+}
+
+void AudioPolicyManagerGeneric::handleIncallSonification(int stream, bool starting)
+{
+ // if the stream pertains to sonification strategy and we are in call we must
+ // mute the stream if it is low visibility. If it is high visibility, we must play a tone
+ // in the device used for phone strategy and play the tone if the selected device does not
+ // interfere with the device used for phone strategy
+ if (getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) {
+ AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mHardwareOutput);
+ LOGV("handleIncallSonification() stream %d starting %d device %x", stream, starting, outputDesc->mDevice);
+ if (outputDesc->isUsedByStream((AudioSystem::stream_type)stream)) {
+ if (AudioSystem::isLowVisibility((AudioSystem::stream_type)stream)) {
+ LOGV("handleIncallSonification() low visibility");
+ setStreamMute(stream, starting, mHardwareOutput);
+ } else {
+ if (starting) {
+ mpClientInterface->startTone(ToneGenerator::TONE_SUP_CALL_WAITING, AudioSystem::VOICE_CALL);
+ } else {
+ mpClientInterface->stopTone();
+ }
+ }
+ }
+ }
+}
+
+
+// --- AudioOutputDescriptor class implementation
+
+AudioPolicyManagerGeneric::AudioOutputDescriptor::AudioOutputDescriptor()
+ : mSamplingRate(0), mFormat(0), mChannels(0), mLatency(0),
+ mFlags((AudioSystem::output_flags)0), mDevice(0)
+{
+ // clear usage count for all stream types
+ for (int i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
+ mRefCount[i] = 0;
+ }
+}
+
+uint32_t AudioPolicyManagerGeneric::AudioOutputDescriptor::device()
+{
+ return mDevice;
+}
+
+void AudioPolicyManagerGeneric::AudioOutputDescriptor::changeRefCount(AudioSystem::stream_type stream, int delta)
+{
+ if ((delta + (int)mRefCount[stream]) < 0) {
+ LOGW("changeRefCount() invalid delta %d for stream %d, refCount %d", delta, stream, mRefCount[stream]);
+ mRefCount[stream] = 0;
+ return;
+ }
+ mRefCount[stream] += delta;
+ LOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]);
+}
+
+uint32_t AudioPolicyManagerGeneric::AudioOutputDescriptor::refCount()
+{
+ uint32_t refcount = 0;
+ for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
+ refcount += mRefCount[i];
+ }
+ return refcount;
+}
+
+// --- AudioInputDescriptor class implementation
+
+AudioPolicyManagerGeneric::AudioInputDescriptor::AudioInputDescriptor()
+ : mSamplingRate(0), mFormat(0), mChannels(0),
+ mAcoustics((AudioSystem::audio_in_acoustics)0), mDevice(0), mRefCount(0)
+{
+}
+
+}; // namespace android
diff --git a/libs/audioflinger/AudioPolicyManagerGeneric.h b/libs/audioflinger/AudioPolicyManagerGeneric.h
new file mode 100644
index 0000000..ddcb306
--- /dev/null
+++ b/libs/audioflinger/AudioPolicyManagerGeneric.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <hardware_legacy/AudioPolicyInterface.h>
+#include <utils/threads.h>
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+#define MAX_DEVICE_ADDRESS_LEN 20
+#define NUM_TEST_OUTPUTS 5
+
+class AudioPolicyManagerGeneric: public AudioPolicyInterface
+#ifdef AUDIO_POLICY_TEST
+ , public Thread
+#endif //AUDIO_POLICY_TEST
+{
+
+public:
+ AudioPolicyManagerGeneric(AudioPolicyClientInterface *clientInterface);
+ virtual ~AudioPolicyManagerGeneric();
+
+ // AudioPolicyInterface
+ virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address);
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address);
+ virtual void setPhoneState(int state);
+ virtual void setRingerMode(uint32_t mode, uint32_t mask);
+ virtual void setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config);
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage);
+ virtual void setSystemProperty(const char* property, const char* value);
+ virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags);
+ virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ virtual void releaseOutput(audio_io_handle_t output);
+ virtual audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics);
+ // indicates to the audio policy manager that the input starts being used.
+ virtual status_t startInput(audio_io_handle_t input);
+ // indicates to the audio policy manager that the input stops being used.
+ virtual status_t stopInput(audio_io_handle_t input);
+ virtual void releaseInput(audio_io_handle_t input);
+ virtual void initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax);
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index);
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index);
+
+private:
+
+ enum routing_strategy {
+ STRATEGY_MEDIA,
+ STRATEGY_PHONE,
+ STRATEGY_SONIFICATION,
+ STRATEGY_DTMF,
+ NUM_STRATEGIES
+ };
+
+ // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
+ // and keep track of the usage of this output by each audio stream type.
+ class AudioOutputDescriptor
+ {
+ public:
+ AudioOutputDescriptor();
+
+
+ uint32_t device();
+ void changeRefCount(AudioSystem::stream_type, int delta);
+ bool isUsedByStream(AudioSystem::stream_type stream) { return mRefCount[stream] > 0 ? true : false; }
+ uint32_t refCount();
+
+ uint32_t mSamplingRate; //
+ uint32_t mFormat; //
+ uint32_t mChannels; // output configuration
+ uint32_t mLatency; //
+ AudioSystem::output_flags mFlags; //
+ uint32_t mDevice; // current device this output is routed to
+ uint32_t mRefCount[AudioSystem::NUM_STREAM_TYPES]; // number of streams of each type using this output
+ };
+
+ // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
+ // and keep track of the usage of this input.
+ class AudioInputDescriptor
+ {
+ public:
+ AudioInputDescriptor();
+
+ uint32_t mSamplingRate; //
+ uint32_t mFormat; // input configuration
+ uint32_t mChannels; //
+ AudioSystem::audio_in_acoustics mAcoustics; //
+ uint32_t mDevice; // current device this input is routed to
+ uint32_t mRefCount; // number of AudioRecord clients using this output
+ };
+
+ // stream descriptor used for volume control
+ class StreamDescriptor
+ {
+ public:
+ StreamDescriptor()
+ : mIndexMin(0), mIndexMax(1), mIndexCur(1), mMuteCount(0), mCanBeMuted(true) {}
+
+ int mIndexMin; // min volume index
+ int mIndexMax; // max volume index
+ int mIndexCur; // current volume index
+ int mMuteCount; // mute request counter
+ bool mCanBeMuted; // true is the stream can be muted
+ };
+
+ // return the strategy corresponding to a given stream type
+ static routing_strategy getStrategy(AudioSystem::stream_type stream);
+ // return the output handle of an output routed to the specified device, 0 if no output
+ // is routed to the device
+ float computeVolume(int stream, int index, uint32_t device);
+ // Mute or unmute the stream on the specified output
+ void setStreamMute(int stream, bool on, audio_io_handle_t output);
+ // handle special cases for sonification strategy while in call: mute streams or replace by
+ // a special tone in the device used for communication
+ void handleIncallSonification(int stream, bool starting);
+
+
+#ifdef AUDIO_POLICY_TEST
+ virtual bool threadLoop();
+ void exit();
+ int testOutputIndex(audio_io_handle_t output);
+#endif //AUDIO_POLICY_TEST
+
+
+ AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
+ audio_io_handle_t mHardwareOutput; // hardware output handler
+
+ KeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mOutputs; // list ot output descritors
+ KeyedVector<audio_io_handle_t, AudioInputDescriptor *> mInputs; // list of input descriptors
+ uint32_t mAvailableOutputDevices; // bit field of all available output devices
+ uint32_t mAvailableInputDevices; // bit field of all available input devices
+ int mPhoneState; // current phone state
+ uint32_t mRingerMode; // current ringer mode
+ AudioSystem::forced_config mForceUse[AudioSystem::NUM_FORCE_USE]; // current forced use configuration
+
+ StreamDescriptor mStreams[AudioSystem::NUM_STREAM_TYPES]; // stream descriptors for volume control
+
+#ifdef AUDIO_POLICY_TEST
+ Mutex mLock;
+ Condition mWaitWorkCV;
+
+ int mCurOutput;
+ bool mDirectOutput;
+ audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS];
+ int mTestInput;
+ uint32_t mTestDevice;
+ uint32_t mTestSamplingRate;
+ uint32_t mTestFormat;
+ uint32_t mTestChannelcount;
+ uint32_t mTestLatencyMs;
+#endif //AUDIO_POLICY_TEST
+
+};
+
+};
diff --git a/libs/audioflinger/AudioPolicyService.cpp b/libs/audioflinger/AudioPolicyService.cpp
new file mode 100644
index 0000000..4810a44
--- /dev/null
+++ b/libs/audioflinger/AudioPolicyService.cpp
@@ -0,0 +1,677 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyService"
+//#define LOG_NDEBUG 0
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+#include <cutils/properties.h>
+#include <binder/IPCThreadState.h>
+#include <utils/String16.h>
+#include <utils/threads.h>
+#include "AudioPolicyService.h"
+#include <cutils/properties.h>
+#include <dlfcn.h>
+
+// ----------------------------------------------------------------------------
+// the sim build doesn't have gettid
+
+#ifndef HAVE_GETTID
+# define gettid getpid
+#endif
+
+namespace android {
+
+const char *AudioPolicyService::sAudioPolicyLibrary = "/system/lib/libaudiopolicy.so";
+const char *AudioPolicyService::sAudioPolicyGenericLibrary = "/system/lib/libaudiopolicygeneric.so";
+
+static bool checkPermission() {
+#ifndef HAVE_ANDROID_OS
+ return true;
+#endif
+ if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
+ bool ok = checkCallingPermission(String16("android.permission.MODIFY_AUDIO_SETTINGS"));
+ if (!ok) LOGE("Request requires android.permission.MODIFY_AUDIO_SETTINGS");
+ return ok;
+}
+
+// ----------------------------------------------------------------------------
+
+AudioPolicyService::AudioPolicyService()
+ : BnAudioPolicyService() , mpPolicyManager(NULL), mpPolicyManagerLibHandle(NULL)
+{
+ const char *audioPolicyLibrary;
+ char value[PROPERTY_VALUE_MAX];
+
+#if (defined GENERIC_AUDIO) || (defined AUDIO_POLICY_TEST)
+ audioPolicyLibrary = sAudioPolicyGenericLibrary;
+ LOGV("build for GENERIC_AUDIO - using generic audio policy");
+#else
+ // if running in emulation - use the emulator driver
+ if (property_get("ro.kernel.qemu", value, 0)) {
+ LOGV("Running in emulation - using generic audio policy");
+ audioPolicyLibrary = sAudioPolicyGenericLibrary;
+ }
+ else {
+ LOGV("Using hardware specific audio policy");
+ audioPolicyLibrary = sAudioPolicyLibrary;
+ }
+#endif
+
+
+ mpPolicyManagerLibHandle = dlopen(audioPolicyLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (mpPolicyManagerLibHandle == NULL) {
+ LOGW("Could not load libaudio policy library");
+ return;
+ }
+
+ AudioPolicyInterface *(*createManager)(AudioPolicyClientInterface *) =
+ reinterpret_cast<AudioPolicyInterface* (*)(AudioPolicyClientInterface *)>(dlsym(mpPolicyManagerLibHandle, "createAudioPolicyManager"));
+
+ if (createManager == NULL ) {
+ LOGW("Could not get createAudioPolicyManager method");
+ return;
+ }
+
+ // start tone playback thread
+ mTonePlaybacThread = new AudioCommandThread();
+ // start audio commands thread
+ mAudioCommandThread = new AudioCommandThread();
+
+ mpPolicyManager = (*createManager)(this);
+
+ // load properties
+ property_get("ro.camera.sound.forced", value, "0");
+ mpPolicyManager->setSystemProperty("ro.camera.sound.forced", value);
+}
+
+AudioPolicyService::~AudioPolicyService()
+{
+ mTonePlaybacThread->exit();
+ mTonePlaybacThread.clear();
+ mAudioCommandThread->exit();
+ mAudioCommandThread.clear();
+
+ if (mpPolicyManager) {
+ void(*destroyManager)(AudioPolicyInterface *) =
+ reinterpret_cast<void(*)(AudioPolicyInterface *)>(dlsym(mpPolicyManagerLibHandle, "destroyAudioPolicyManager"));
+
+ if (destroyManager == NULL ) {
+ LOGW("Could not get destroyAudioPolicyManager method");
+ return;
+ }
+ (*destroyManager)(mpPolicyManager);
+ }
+}
+
+
+status_t AudioPolicyService::setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+ if (!AudioSystem::isOutputDevice(device) && !AudioSystem::isInputDevice(device)) {
+ return BAD_VALUE;
+ }
+ if (state != AudioSystem::DEVICE_STATE_AVAILABLE && state != AudioSystem::DEVICE_STATE_UNAVAILABLE) {
+ return BAD_VALUE;
+ }
+
+ LOGV("setDeviceConnectionState() tid %d", gettid());
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->setDeviceConnectionState(device, state, device_address);
+}
+
+AudioSystem::device_connection_state AudioPolicyService::getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address)
+{
+ if (mpPolicyManager == NULL) {
+ return AudioSystem::DEVICE_STATE_UNAVAILABLE;
+ }
+ if (!checkPermission()) {
+ return AudioSystem::DEVICE_STATE_UNAVAILABLE;
+ }
+ return mpPolicyManager->getDeviceConnectionState(device, device_address);
+}
+
+status_t AudioPolicyService::setPhoneState(int state)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+ if (state < 0 || state >= AudioSystem::NUM_MODES) {
+ return BAD_VALUE;
+ }
+
+ LOGV("setPhoneState() tid %d", gettid());
+
+ // TODO: check if it is more appropriate to do it in platform specific policy manager
+ AudioSystem::setMode(state);
+
+ Mutex::Autolock _l(mLock);
+ mpPolicyManager->setPhoneState(state);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::setRingerMode(uint32_t mode, uint32_t mask)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+
+ mpPolicyManager->setRingerMode(mode, mask);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+ if (usage < 0 || usage >= AudioSystem::NUM_FORCE_USE) {
+ return BAD_VALUE;
+ }
+ if (config < 0 || config >= AudioSystem::NUM_FORCE_CONFIG) {
+ return BAD_VALUE;
+ }
+ LOGV("setForceUse() tid %d", gettid());
+ Mutex::Autolock _l(mLock);
+ mpPolicyManager->setForceUse(usage, config);
+ return NO_ERROR;
+}
+
+AudioSystem::forced_config AudioPolicyService::getForceUse(AudioSystem::force_use usage)
+{
+ if (mpPolicyManager == NULL) {
+ return AudioSystem::FORCE_NONE;
+ }
+ if (!checkPermission()) {
+ return AudioSystem::FORCE_NONE;
+ }
+ if (usage < 0 || usage >= AudioSystem::NUM_FORCE_USE) {
+ return AudioSystem::FORCE_NONE;
+ }
+ return mpPolicyManager->getForceUse(usage);
+}
+
+audio_io_handle_t AudioPolicyService::getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags)
+{
+ if (mpPolicyManager == NULL) {
+ return NULL;
+ }
+ LOGV("getOutput() tid %d", gettid());
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags);
+}
+
+status_t AudioPolicyService::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ LOGV("startOutput() tid %d", gettid());
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->startOutput(output, stream);
+}
+
+status_t AudioPolicyService::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ LOGV("stopOutput() tid %d", gettid());
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->stopOutput(output, stream);
+}
+
+void AudioPolicyService::releaseOutput(audio_io_handle_t output)
+{
+ if (mpPolicyManager == NULL) {
+ return;
+ }
+ LOGV("releaseOutput() tid %d", gettid());
+ Mutex::Autolock _l(mLock);
+ mpPolicyManager->releaseOutput(output);
+}
+
+audio_io_handle_t AudioPolicyService::getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics)
+{
+ if (mpPolicyManager == NULL) {
+ return NULL;
+ }
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->getInput(inputSource, samplingRate, format, channels, acoustics);
+}
+
+status_t AudioPolicyService::startInput(audio_io_handle_t input)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->startInput(input);
+}
+
+status_t AudioPolicyService::stopInput(audio_io_handle_t input)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->stopInput(input);
+}
+
+void AudioPolicyService::releaseInput(audio_io_handle_t input)
+{
+ if (mpPolicyManager == NULL) {
+ return;
+ }
+ Mutex::Autolock _l(mLock);
+ mpPolicyManager->releaseInput(input);
+}
+
+status_t AudioPolicyService::initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+ if (stream < 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ return BAD_VALUE;
+ }
+ mpPolicyManager->initStreamVolume(stream, indexMin, indexMax);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+ if (stream < 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ return BAD_VALUE;
+ }
+
+ return mpPolicyManager->setStreamVolumeIndex(stream, index);
+}
+
+status_t AudioPolicyService::getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!checkPermission()) {
+ return PERMISSION_DENIED;
+ }
+ if (stream < 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ return BAD_VALUE;
+ }
+ return mpPolicyManager->getStreamVolumeIndex(stream, index);
+}
+
+void AudioPolicyService::binderDied(const wp<IBinder>& who) {
+ LOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(), IPCThreadState::self()->getCallingPid());
+}
+
+status_t AudioPolicyService::dump(int fd, const Vector<String16>& args)
+{
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ dumpPermissionDenial(fd, args);
+ } else {
+
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::dumpPermissionDenial(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+ snprintf(buffer, SIZE, "Permission Denial: "
+ "can't dump AudioPolicyService from pid=%d, uid=%d\n",
+ IPCThreadState::self()->getCallingPid(),
+ IPCThreadState::self()->getCallingUid());
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnAudioPolicyService::onTransact(code, data, reply, flags);
+}
+
+
+// ----------------------------------------------------------------------------
+void AudioPolicyService::instantiate() {
+ defaultServiceManager()->addService(
+ String16("media.audio_policy"), new AudioPolicyService());
+}
+
+
+// ----------------------------------------------------------------------------
+// AudioPolicyClientInterface implementation
+// ----------------------------------------------------------------------------
+
+
+audio_io_handle_t AudioPolicyService::openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ AudioSystem::output_flags flags)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ LOGW("openOutput() could not get AudioFlinger");
+ return NULL;
+ }
+
+ return af->openOutput(pDevices, pSamplingRate, (uint32_t *)pFormat, pChannels, pLatencyMs, flags);
+}
+
+audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ LOGW("openDuplicateOutput() could not get AudioFlinger");
+ return NULL;
+ }
+ return af->openDuplicateOutput(output1, output2);
+}
+
+status_t AudioPolicyService::closeOutput(audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+
+ return af->closeOutput(output);
+}
+
+
+status_t AudioPolicyService::suspendOutput(audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ LOGW("suspendOutput() could not get AudioFlinger");
+ return PERMISSION_DENIED;
+ }
+
+ return af->suspendOutput(output);
+}
+
+status_t AudioPolicyService::restoreOutput(audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ LOGW("restoreOutput() could not get AudioFlinger");
+ return PERMISSION_DENIED;
+ }
+
+ return af->restoreOutput(output);
+}
+
+audio_io_handle_t AudioPolicyService::openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ LOGW("openInput() could not get AudioFlinger");
+ return NULL;
+ }
+
+ return af->openInput(pDevices, pSamplingRate, (uint32_t *)pFormat, pChannels, acoustics);
+}
+
+status_t AudioPolicyService::closeInput(audio_io_handle_t input)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+
+ return af->closeInput(input);
+}
+
+status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output)
+{
+ return mAudioCommandThread->volumeCommand((int)stream, volume, (void *)output);
+}
+
+status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+
+ return af->setStreamOutput(stream, output);
+}
+
+
+void AudioPolicyService::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
+{
+ mAudioCommandThread->parametersCommand((void *)ioHandle, keyValuePairs);
+}
+
+String8 AudioPolicyService::getParameters(audio_io_handle_t ioHandle, const String8& keys)
+{
+ String8 result = AudioSystem::getParameters(ioHandle, keys);
+ return result;
+}
+
+status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream)
+{
+ mTonePlaybacThread->startToneCommand(tone, stream);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::stopTone()
+{
+ mTonePlaybacThread->stopToneCommand();
+ return NO_ERROR;
+}
+
+
+// ----------- AudioPolicyService::AudioCommandThread implementation ----------
+
+AudioPolicyService::AudioCommandThread::AudioCommandThread()
+ : Thread(false)
+{
+ mpToneGenerator = NULL;
+}
+
+
+AudioPolicyService::AudioCommandThread::~AudioCommandThread()
+{
+ mAudioCommands.clear();
+ if (mpToneGenerator != NULL) delete mpToneGenerator;
+}
+
+void AudioPolicyService::AudioCommandThread::onFirstRef()
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+
+ snprintf(buffer, SIZE, "AudioCommandThread");
+
+ run(buffer, ANDROID_PRIORITY_AUDIO);
+}
+
+bool AudioPolicyService::AudioCommandThread::threadLoop()
+{
+ mLock.lock();
+ while (!exitPending())
+ {
+ while(!mAudioCommands.isEmpty()) {
+ AudioCommand *command = mAudioCommands[0];
+ mAudioCommands.removeAt(0);
+ switch (command->mCommand) {
+ case START_TONE: {
+ mLock.unlock();
+ ToneData *data = (ToneData *)command->mParam;
+ LOGV("AudioCommandThread() processing start tone %d on stream %d",
+ data->mType, data->mStream);
+ if (mpToneGenerator != NULL)
+ delete mpToneGenerator;
+ mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
+ mpToneGenerator->startTone(data->mType);
+ delete data;
+ mLock.lock();
+ }break;
+ case STOP_TONE: {
+ mLock.unlock();
+ LOGV("AudioCommandThread() processing stop tone");
+ if (mpToneGenerator != NULL) {
+ mpToneGenerator->stopTone();
+ delete mpToneGenerator;
+ mpToneGenerator = NULL;
+ }
+ mLock.lock();
+ }break;
+ case SET_VOLUME: {
+ VolumeData *data = (VolumeData *)command->mParam;
+ LOGV("AudioCommandThread() processing set volume stream %d, volume %f, output %p", data->mStream, data->mVolume, data->mIO);
+ mCommandStatus = AudioSystem::setStreamVolume(data->mStream, data->mVolume, data->mIO);
+ mCommandCond.signal();
+ mWaitWorkCV.wait(mLock);
+ delete data;
+ }break;
+ case SET_PARAMETERS: {
+ ParametersData *data = (ParametersData *)command->mParam;
+ LOGV("AudioCommandThread() processing set parameters string %s, io %p", data->mKeyValuePairs.string(), data->mIO);
+ mCommandStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
+ mCommandCond.signal();
+ mWaitWorkCV.wait(mLock);
+ delete data;
+ }break;
+ default:
+ LOGW("AudioCommandThread() unknown command %d", command->mCommand);
+ }
+ delete command;
+ }
+ LOGV("AudioCommandThread() going to sleep");
+ mWaitWorkCV.wait(mLock);
+ LOGV("AudioCommandThread() waking up");
+ }
+ mLock.unlock();
+ return false;
+}
+
+void AudioPolicyService::AudioCommandThread::startToneCommand(int type, int stream)
+{
+ Mutex::Autolock _l(mLock);
+ AudioCommand *command = new AudioCommand();
+ command->mCommand = START_TONE;
+ ToneData *data = new ToneData();
+ data->mType = type;
+ data->mStream = stream;
+ command->mParam = (void *)data;
+ mAudioCommands.add(command);
+ LOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
+ mWaitWorkCV.signal();
+}
+
+void AudioPolicyService::AudioCommandThread::stopToneCommand()
+{
+ Mutex::Autolock _l(mLock);
+ AudioCommand *command = new AudioCommand();
+ command->mCommand = STOP_TONE;
+ command->mParam = NULL;
+ mAudioCommands.add(command);
+ LOGV("AudioCommandThread() adding tone stop");
+ mWaitWorkCV.signal();
+}
+
+status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream, float volume, void *output)
+{
+ Mutex::Autolock _l(mLock);
+ AudioCommand *command = new AudioCommand();
+ command->mCommand = SET_VOLUME;
+ VolumeData *data = new VolumeData();
+ data->mStream = stream;
+ data->mVolume = volume;
+ data->mIO = output;
+ command->mParam = data;
+ mAudioCommands.add(command);
+ LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %p", stream, volume, output);
+ mWaitWorkCV.signal();
+ mCommandCond.wait(mLock);
+ status_t status = mCommandStatus;
+ mWaitWorkCV.signal();
+ return status;
+}
+
+status_t AudioPolicyService::AudioCommandThread::parametersCommand(void *ioHandle, const String8& keyValuePairs)
+{
+ Mutex::Autolock _l(mLock);
+ AudioCommand *command = new AudioCommand();
+ command->mCommand = SET_PARAMETERS;
+ ParametersData *data = new ParametersData();
+ data->mIO = ioHandle;
+ data->mKeyValuePairs = keyValuePairs;
+ command->mParam = data;
+ mAudioCommands.add(command);
+ LOGV("AudioCommandThread() adding set parameter string %s, io %p", keyValuePairs.string(), ioHandle);
+ mWaitWorkCV.signal();
+ mCommandCond.wait(mLock);
+ status_t status = mCommandStatus;
+ mWaitWorkCV.signal();
+ return status;
+}
+
+void AudioPolicyService::AudioCommandThread::exit()
+{
+ LOGV("AudioCommandThread::exit");
+ {
+ AutoMutex _l(mLock);
+ requestExit();
+ mWaitWorkCV.signal();
+ }
+ requestExitAndWait();
+}
+
+}; // namespace android
diff --git a/libs/audioflinger/AudioPolicyService.h b/libs/audioflinger/AudioPolicyService.h
new file mode 100644
index 0000000..47173dd
--- /dev/null
+++ b/libs/audioflinger/AudioPolicyService.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPOLICYSERVICE_H
+#define ANDROID_AUDIOPOLICYSERVICE_H
+
+#include <media/IAudioPolicyService.h>
+#include <hardware_legacy/AudioPolicyInterface.h>
+#include <media/ToneGenerator.h>
+
+namespace android {
+
+class String8;
+
+// ----------------------------------------------------------------------------
+
+class AudioPolicyService: public BnAudioPolicyService, public AudioPolicyClientInterface, public IBinder::DeathRecipient
+{
+
+public:
+ static void instantiate();
+
+ virtual status_t dump(int fd, const Vector<String16>& args);
+
+ //
+ // BnAudioPolicyService (see AudioPolicyInterface for method descriptions)
+ //
+
+ virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address);
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ const char *device_address);
+ virtual status_t setPhoneState(int state);
+ virtual status_t setRingerMode(uint32_t mode, uint32_t mask);
+ virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config);
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage);
+ virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ uint32_t samplingRate = 0,
+ uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT);
+ virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ virtual void releaseOutput(audio_io_handle_t output);
+ virtual audio_io_handle_t getInput(int inputSource,
+ uint32_t samplingRate = 0,
+ uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t channels = 0,
+ AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0);
+ virtual status_t startInput(audio_io_handle_t input);
+ virtual status_t stopInput(audio_io_handle_t input);
+ virtual void releaseInput(audio_io_handle_t input);
+ virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax);
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index);
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index);
+
+ virtual status_t onTransact(
+ uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags);
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ //
+ // AudioPolicyClientInterface
+ //
+ virtual audio_io_handle_t openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ AudioSystem::output_flags flags);
+ virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2);
+ virtual status_t closeOutput(audio_io_handle_t output);
+ virtual status_t suspendOutput(audio_io_handle_t output);
+ virtual status_t restoreOutput(audio_io_handle_t output);
+ virtual audio_io_handle_t openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics);
+ virtual status_t closeInput(audio_io_handle_t input);
+ virtual status_t setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output);
+ virtual status_t setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output);
+ virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
+ virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+ virtual status_t startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream);
+ virtual status_t stopTone();
+
+private:
+ AudioPolicyService();
+ virtual ~AudioPolicyService();
+
+ static const char *sAudioPolicyLibrary;
+ static const char *sAudioPolicyGenericLibrary;
+ // Thread used for tone playback and to send audio config commands to audio flinger
+ // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because startTone()
+ // and stopTone() are normally called with mLock locked and requesting a tone start or stop will cause
+ // calls to AudioPolicyService and an attempt to lock mLock.
+ // For audio config commands, it is necessary because audio flinger requires that the calling process (user)
+ // has permission to modify audio settings.
+ class AudioCommandThread : public Thread {
+ public:
+
+ // commands for tone AudioCommand
+ enum {
+ START_TONE,
+ STOP_TONE,
+ SET_VOLUME,
+ SET_PARAMETERS
+ };
+
+ AudioCommandThread ();
+ virtual ~AudioCommandThread();
+
+ // Thread virtuals
+ virtual void onFirstRef();
+ virtual bool threadLoop();
+
+ void exit();
+ void startToneCommand(int type = 0, int stream = 0);
+ void stopToneCommand();
+ status_t volumeCommand(int stream, float volume, void *output);
+ status_t parametersCommand(void *ioHandle, const String8& keyValuePairs);
+
+ private:
+ // descriptor for requested tone playback event
+ class AudioCommand {
+ public:
+ int mCommand; // START_TONE, STOP_TONE ...
+ void *mParam;
+ };
+
+ class ToneData {
+ public:
+ int mType; // tone type (START_TONE only)
+ int mStream; // stream type (START_TONE only)
+ };
+
+ class VolumeData {
+ public:
+ int mStream;
+ float mVolume;
+ void *mIO;
+ };
+ class ParametersData {
+ public:
+ void *mIO;
+ String8 mKeyValuePairs;
+ };
+
+
+ Mutex mLock;
+ Condition mWaitWorkCV;
+ Vector<AudioCommand *> mAudioCommands; // list of pending tone events
+ Condition mCommandCond;
+ status_t mCommandStatus;
+ ToneGenerator *mpToneGenerator; // the tone generator
+ };
+
+ // Internal dump utilities.
+ status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
+
+
+ Mutex mLock; // prevents concurrent access to AudioPolicy manager functions changing device
+ // connection stated our routing
+ AudioPolicyInterface* mpPolicyManager; // the platform specific policy manager
+ sp <AudioCommandThread> mAudioCommandThread; // audio commands thread
+ sp <AudioCommandThread> mTonePlaybacThread; // tone playback thread
+ void *mpPolicyManagerLibHandle;
+};
+
+}; // namespace android
+
+#endif // ANDROID_AUDIOPOLICYSERVICE_H
+
+
+
+
+
+
+
+
diff --git a/media/java/android/media/AudioFormat.java b/media/java/android/media/AudioFormat.java
index 0732b61..500f6a4 100644
--- a/media/java/android/media/AudioFormat.java
+++ b/media/java/android/media/AudioFormat.java
@@ -37,15 +37,58 @@ public class AudioFormat {
public static final int ENCODING_PCM_8BIT = 3; // accessed by native code
/** Invalid audio channel configuration */
- public static final int CHANNEL_CONFIGURATION_INVALID = 0;
+ /** @deprecated use CHANNEL_INVALID instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_INVALID = 0;
/** Default audio channel configuration */
- public static final int CHANNEL_CONFIGURATION_DEFAULT = 1;
+ /** @deprecated use CHANNEL_OUT_DEFAULT or CHANNEL_IN_DEFAULT instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_DEFAULT = 1;
/** Mono audio configuration */
- public static final int CHANNEL_CONFIGURATION_MONO = 2;
+ /** @deprecated use CHANNEL_OUT_MONO or CHANNEL_IN_MONO instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_MONO = 2;
/** Stereo (2 channel) audio configuration */
- public static final int CHANNEL_CONFIGURATION_STEREO = 3;
-
-}
+ /** @deprecated use CHANNEL_OUT_STEREO or CHANNEL_IN_STEREO instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_STEREO = 3;
+ /** Invalid audio channel mask */
+ public static final int CHANNEL_INVALID = -1;
+ /** Default audio channel mask */
+ // Channel mask definitions must be kept in sync with native values in include/media/AudioSystem.h
+ public static final int CHANNEL_OUT_DEFAULT = 0;
+ public static final int CHANNEL_OUT_FRONT_LEFT = 0x1;
+ public static final int CHANNEL_OUT_FRONT_RIGHT = 0x2;
+ public static final int CHANNEL_OUT_FRONT_CENTER = 0x4;
+ public static final int CHANNEL_OUT_LOW_FREQUENCY = 0x8;
+ public static final int CHANNEL_OUT_BACK_LEFT = 0x10;
+ public static final int CHANNEL_OUT_BACK_RIGHT = 0x20;
+ public static final int CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40;
+ public static final int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80;
+ public static final int CHANNEL_OUT_BACK_CENTER = 0x100;
+ public static final int CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT;
+ public static final int CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT);
+ public static final int CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
+ public static final int CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER);
+ public static final int CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
+ public static final int CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
+ public static final int CHANNEL_IN_DEFAULT = 0;
+ public static final int CHANNEL_IN_LEFT = 0x10000;
+ public static final int CHANNEL_IN_RIGHT = 0x20000;
+ public static final int CHANNEL_IN_FRONT = 0x40000;
+ public static final int CHANNEL_IN_BACK = 0x80000;
+ public static final int CHANNEL_IN_LEFT_PROCESSED = 0x100000;
+ public static final int CHANNEL_IN_RIGHT_PROCESSED = 0x200000;
+ public static final int CHANNEL_IN_FRONT_PROCESSED = 0x400000;
+ public static final int CHANNEL_IN_BACK_PROCESSED = 0x800000;
+ public static final int CHANNEL_IN_PRESSURE = 0x1000000;
+ public static final int CHANNEL_IN_X_AXIS = 0x2000000;
+ public static final int CHANNEL_IN_Y_AXIS = 0x4000000;
+ public static final int CHANNEL_IN_Z_AXIS = 0x8000000;
+ public static final int CHANNEL_IN_MONO = CHANNEL_IN_FRONT;
+ public static final int CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT);
+}
diff --git a/media/java/android/media/AudioManager.java b/media/java/android/media/AudioManager.java
index a65a417..040d4bc 100644
--- a/media/java/android/media/AudioManager.java
+++ b/media/java/android/media/AudioManager.java
@@ -140,11 +140,17 @@ public class AudioManager {
public static final int STREAM_NOTIFICATION = AudioSystem.STREAM_NOTIFICATION;
/** @hide The audio stream for phone calls when connected to bluetooth */
public static final int STREAM_BLUETOOTH_SCO = AudioSystem.STREAM_BLUETOOTH_SCO;
+ /** @hide The audio stream for enforced system sounds in certain countries (e.g camera in Japan) */
+ public static final int STREAM_SYSTEM_ENFORCED = AudioSystem.STREAM_SYSTEM_ENFORCED;
+ /** The audio stream for DTMF Tones */
+ public static final int STREAM_DTMF = AudioSystem.STREAM_DTMF;
+ /** @hide The audio stream for text to speech (TTS) */
+ public static final int STREAM_TTS = AudioSystem.STREAM_TTS;
/** Number of audio streams */
/**
* @deprecated Use AudioSystem.getNumStreamTypes() instead
*/
- public static final int NUM_STREAMS = AudioSystem.NUM_STREAMS;
+ @Deprecated public static final int NUM_STREAMS = AudioSystem.NUM_STREAMS;
/** @hide Maximum volume index values for audio streams */
@@ -156,6 +162,9 @@ public class AudioManager {
8, // STREAM_ALARM
8, // STREAM_NOTIFICATION
16, // STREAM_BLUETOOTH_SCO
+ 8, // STREAM_SYSTEM_ENFORCED
+ 16, // STREAM_DTMF
+ 16 // STREAM_TTS
};
/** @hide Default volume index values for audio streams */
@@ -166,7 +175,10 @@ public class AudioManager {
11, // STREAM_MUSIC
6, // STREAM_ALARM
5, // STREAM_NOTIFICATION
- 7 // STREAM_BLUETOOTH_SCO
+ 7, // STREAM_BLUETOOTH_SCO
+ 5, // STREAM_SYSTEM_ENFORCED
+ 11, // STREAM_DTMF
+ 11 // STREAM_TTS
};
/**
@@ -637,9 +649,11 @@ public class AudioManager {
* <var>false</var> to turn it off
*/
public void setSpeakerphoneOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_SPEAKER: 0, ROUTE_SPEAKER);
+ if (on) {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_SPEAKER);
+ } else {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_NONE);
+ }
}
/**
@@ -648,41 +662,47 @@ public class AudioManager {
* @return true if speakerphone is on, false if it's off
*/
public boolean isSpeakerphoneOn() {
- return (getRoutingP(MODE_IN_CALL) & ROUTE_SPEAKER) == 0 ? false : true;
+ if (AudioSystem.getForceUse(AudioSystem.FOR_COMMUNICATION) == AudioSystem.FORCE_SPEAKER) {
+ return true;
+ } else {
+ return false;
+ }
}
/**
- * Sets audio routing to the Bluetooth headset on or off.
+ * Request use of Bluetooth SCO headset for communications.
*
- * @param on set <var>true</var> to route SCO (voice) audio to/from Bluetooth
- * headset; <var>false</var> to route audio to/from phone earpiece
+ * @param on set <var>true</var> to use bluetooth SCO for communications;
+ * <var>false</var> to not use bluetooth SCO for communications
*/
public void setBluetoothScoOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_BLUETOOTH_SCO: 0, ROUTE_BLUETOOTH_SCO);
+ if (on) {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_BT_SCO);
+ } else {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_NONE);
+ }
}
/**
- * Checks whether audio routing to the Bluetooth headset is on or off.
+ * Checks whether communications use Bluetooth SCO.
*
- * @return true if SCO audio is being routed to/from Bluetooth headset;
+ * @return true if SCO is used for communications;
* false if otherwise
*/
public boolean isBluetoothScoOn() {
- return (getRoutingP(MODE_IN_CALL) & ROUTE_BLUETOOTH_SCO) == 0 ? false : true;
+ if (AudioSystem.getForceUse(AudioSystem.FOR_COMMUNICATION) == AudioSystem.FORCE_BT_SCO) {
+ return true;
+ } else {
+ return false;
+ }
}
/**
- * Sets A2DP audio routing to the Bluetooth headset on or off.
- *
* @param on set <var>true</var> to route A2DP audio to/from Bluetooth
* headset; <var>false</var> disable A2DP audio
+ * @deprecated Do not use.
*/
- public void setBluetoothA2dpOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_BLUETOOTH_A2DP: 0, ROUTE_BLUETOOTH_A2DP);
+ @Deprecated public void setBluetoothA2dpOn(boolean on){
}
/**
@@ -692,7 +712,12 @@ public class AudioManager {
* false if otherwise
*/
public boolean isBluetoothA2dpOn() {
- return (getRoutingP(MODE_NORMAL) & ROUTE_BLUETOOTH_A2DP) == 0 ? false : true;
+ if (AudioSystem.getDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP,"")
+ == AudioSystem.DEVICE_STATE_UNAVAILABLE) {
+ return false;
+ } else {
+ return true;
+ }
}
/**
@@ -700,12 +725,9 @@ public class AudioManager {
*
* @param on set <var>true</var> to route audio to/from wired
* headset; <var>false</var> disable wired headset audio
- * @hide
+ * @deprecated Do not use.
*/
- public void setWiredHeadsetOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_HEADSET: 0, ROUTE_HEADSET);
+ @Deprecated public void setWiredHeadsetOn(boolean on){
}
/**
@@ -713,10 +735,14 @@ public class AudioManager {
*
* @return true if audio is being routed to/from wired headset;
* false if otherwise
- * @hide
*/
public boolean isWiredHeadsetOn() {
- return (getRoutingP(MODE_NORMAL) & ROUTE_HEADSET) == 0 ? false : true;
+ if (AudioSystem.getDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADSET,"")
+ == AudioSystem.DEVICE_STATE_UNAVAILABLE) {
+ return false;
+ } else {
+ return true;
+ }
}
/**
@@ -726,12 +752,7 @@ public class AudioManager {
* <var>false</var> to turn mute off
*/
public void setMicrophoneMute(boolean on){
- IAudioService service = getService();
- try {
- service.setMicrophoneMute(on);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setMicrophoneMute", e);
- }
+ AudioSystem.muteMicrophone(on);
}
/**
@@ -740,13 +761,7 @@ public class AudioManager {
* @return true if microphone is muted, false if it's not
*/
public boolean isMicrophoneMute() {
- IAudioService service = getService();
- try {
- return service.isMicrophoneMute();
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in isMicrophoneMute", e);
- return false;
- }
+ return AudioSystem.isMicrophoneMuted();
}
/**
@@ -809,32 +824,39 @@ public class AudioManager {
/* Routing bits for setRouting/getRouting API */
/**
* Routing audio output to earpiece
+ * @deprecated
*/
- public static final int ROUTE_EARPIECE = AudioSystem.ROUTE_EARPIECE;
+ @Deprecated public static final int ROUTE_EARPIECE = AudioSystem.ROUTE_EARPIECE;
/**
* Routing audio output to spaker
+ * @deprecated
*/
- public static final int ROUTE_SPEAKER = AudioSystem.ROUTE_SPEAKER;
+ @Deprecated public static final int ROUTE_SPEAKER = AudioSystem.ROUTE_SPEAKER;
/**
* @deprecated use {@link #ROUTE_BLUETOOTH_SCO}
+ * @deprecated
*/
@Deprecated public static final int ROUTE_BLUETOOTH = AudioSystem.ROUTE_BLUETOOTH_SCO;
/**
* Routing audio output to bluetooth SCO
+ * @deprecated
*/
- public static final int ROUTE_BLUETOOTH_SCO = AudioSystem.ROUTE_BLUETOOTH_SCO;
+ @Deprecated public static final int ROUTE_BLUETOOTH_SCO = AudioSystem.ROUTE_BLUETOOTH_SCO;
/**
* Routing audio output to headset
+ * @deprecated
*/
- public static final int ROUTE_HEADSET = AudioSystem.ROUTE_HEADSET;
+ @Deprecated public static final int ROUTE_HEADSET = AudioSystem.ROUTE_HEADSET;
/**
* Routing audio output to bluetooth A2DP
+ * @deprecated
*/
- public static final int ROUTE_BLUETOOTH_A2DP = AudioSystem.ROUTE_BLUETOOTH_A2DP;
+ @Deprecated public static final int ROUTE_BLUETOOTH_A2DP = AudioSystem.ROUTE_BLUETOOTH_A2DP;
/**
* Used for mask parameter of {@link #setRouting(int,int,int)}.
+ * @deprecated
*/
- public static final int ROUTE_ALL = AudioSystem.ROUTE_ALL;
+ @Deprecated public static final int ROUTE_ALL = AudioSystem.ROUTE_ALL;
/**
* Sets the audio routing for a specified mode
@@ -846,16 +868,10 @@ public class AudioManager {
* ROUTE_xxx types. Unset bits indicate the route should be left unchanged
*
* @deprecated Do not set audio routing directly, use setSpeakerphoneOn(),
- * setBluetoothScoOn(), setBluetoothA2dpOn() and setWiredHeadsetOn() methods instead.
+ * setBluetoothScoOn() methods instead.
*/
-
+ @Deprecated
public void setRouting(int mode, int routes, int mask) {
- IAudioService service = getService();
- try {
- service.setRouting(mode, routes, mask);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setRouting", e);
- }
}
/**
@@ -869,13 +885,7 @@ public class AudioManager {
*/
@Deprecated
public int getRouting(int mode) {
- IAudioService service = getService();
- try {
- return service.getRouting(mode);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in getRouting", e);
- return -1;
- }
+ return -1;
}
/**
@@ -884,13 +894,7 @@ public class AudioManager {
* @return true if any music tracks are active.
*/
public boolean isMusicActive() {
- IAudioService service = getService();
- try {
- return service.isMusicActive();
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in isMusicActive", e);
- return false;
- }
+ return AudioSystem.isMusicActive();
}
/*
@@ -906,14 +910,32 @@ public class AudioManager {
*/
/**
* @hide
+ * @deprecated Use {@link #setPrameters(String)} instead
*/
- public void setParameter(String key, String value) {
- IAudioService service = getService();
- try {
- service.setParameter(key, value);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setParameter", e);
- }
+ @Deprecated public void setParameter(String key, String value) {
+ setParameters(key+"="+value);
+ }
+
+ /**
+ * Sets a variable number of parameter values to audio hardware.
+ *
+ * @param keyValuePairs list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
+ *
+ */
+ public void setParameters(String keyValuePairs) {
+ AudioSystem.setParameters(keyValuePairs);
+ }
+
+ /**
+ * Sets a varaible number of parameter values to audio hardware.
+ *
+ * @param keys list of parameters
+ * @return list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
+ */
+ public String getParameters(String keys) {
+ return AudioSystem.getParameters(keys);
}
/* Sound effect identifiers */
@@ -1082,31 +1104,4 @@ public class AudioManager {
* {@hide}
*/
private IBinder mICallBack = new Binder();
-
- /**
- * {@hide}
- */
- private void setRoutingP(int mode, int routes, int mask) {
- IAudioService service = getService();
- try {
- service.setRouting(mode, routes, mask);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setRouting", e);
- }
- }
-
-
- /**
- * {@hide}
- */
- private int getRoutingP(int mode) {
- IAudioService service = getService();
- try {
- return service.getRouting(mode);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in getRouting", e);
- return -1;
- }
- }
-
}
diff --git a/media/java/android/media/AudioRecord.java b/media/java/android/media/AudioRecord.java
index 4d1535f..d96331f 100644
--- a/media/java/android/media/AudioRecord.java
+++ b/media/java/android/media/AudioRecord.java
@@ -86,7 +86,7 @@ public class AudioRecord
public static final int ERROR_INVALID_OPERATION = -3;
private static final int AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT = -16;
- private static final int AUDIORECORD_ERROR_SETUP_INVALIDCHANNELCOUNT = -17;
+ private static final int AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK = -17;
private static final int AUDIORECORD_ERROR_SETUP_INVALIDFORMAT = -18;
private static final int AUDIORECORD_ERROR_SETUP_INVALIDSOURCE = -19;
private static final int AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED = -20;
@@ -135,7 +135,7 @@ public class AudioRecord
/**
* The current audio channel configuration
*/
- private int mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ private int mChannelConfiguration = AudioFormat.CHANNEL_IN_MONO;
/**
* The encoding of the audio samples.
* @see AudioFormat#ENCODING_PCM_8BIT
@@ -193,8 +193,8 @@ public class AudioRecord
* @param sampleRateInHz the sample rate expressed in Hertz. Examples of rates are (but
* not limited to) 44100, 22050 and 11025.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_IN_MONO} and
+ * {@link AudioFormat#CHANNEL_IN_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
@@ -265,18 +265,18 @@ public class AudioRecord
//--------------
// channel config
switch (channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_DEFAULT:
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_IN_DEFAULT:
+ case AudioFormat.CHANNEL_IN_MONO:
mChannelCount = 1;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ mChannelConfiguration = AudioFormat.CHANNEL_IN_MONO;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_IN_STEREO:
mChannelCount = 2;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ mChannelConfiguration = AudioFormat.CHANNEL_IN_STEREO;
break;
default:
mChannelCount = 0;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_INVALID;
+ mChannelConfiguration = AudioFormat.CHANNEL_INVALID;
throw (new IllegalArgumentException("Unsupported channel configuration."));
}
@@ -368,8 +368,8 @@ public class AudioRecord
/**
* Returns the configured channel configuration.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO}
- * and {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}.
+ * See {@link AudioFormat#CHANNEL_IN_MONO}
+ * and {@link AudioFormat#CHANNEL_IN_STEREO}.
*/
public int getChannelConfiguration() {
return mChannelConfiguration;
@@ -425,8 +425,8 @@ public class AudioRecord
* will be polled for new data.
* @param sampleRateInHz the sample rate expressed in Hertz.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_IN_MONO} and
+ * {@link AudioFormat#CHANNEL_IN_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT}.
* @return {@link #ERROR_BAD_VALUE} if the recording parameters are not supported by the
@@ -438,14 +438,14 @@ public class AudioRecord
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_DEFAULT:
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_IN_DEFAULT:
+ case AudioFormat.CHANNEL_IN_MONO:
channelCount = 1;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_IN_STEREO:
channelCount = 2;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_INVALID:
+ case AudioFormat.CHANNEL_INVALID:
default:
loge("getMinBufferSize(): Invalid channel configuration.");
return AudioRecord.ERROR_BAD_VALUE;
diff --git a/media/java/android/media/AudioService.java b/media/java/android/media/AudioService.java
index 58c04f3..177a7cf 100644
--- a/media/java/android/media/AudioService.java
+++ b/media/java/android/media/AudioService.java
@@ -20,6 +20,12 @@ import android.app.ActivityManagerNative;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
+import android.content.IntentFilter;
+import android.bluetooth.BluetoothIntent;
+import android.content.BroadcastReceiver;
+import android.bluetooth.BluetoothHeadset;
+import android.bluetooth.BluetoothA2dp;
+
import android.content.pm.PackageManager;
import android.database.ContentObserver;
import android.media.MediaPlayer.OnCompletionListener;
@@ -94,16 +100,10 @@ public class AudioService extends IAudioService.Stub {
/** @see VolumeStreamState */
private VolumeStreamState[] mStreamStates;
private SettingsObserver mSettingsObserver;
-
- private boolean mMicMute;
+
private int mMode;
- private int[] mRoutes = new int[AudioSystem.NUM_MODES];
private Object mSettingsLock = new Object();
private boolean mMediaServerOk;
- private boolean mSpeakerIsOn;
- private boolean mBluetoothScoIsConnected;
- private boolean mHeadsetIsConnected;
- private boolean mBluetoothA2dpIsConnected;
private SoundPool mSoundPool;
private Object mSoundEffectsLock = new Object();
@@ -135,6 +135,23 @@ public class AudioService extends IAudioService.Stub {
{4, -1} // FX_FOCUS_RETURN
};
+ /* STREAM_VOLUME_ALIAS[] indicates for each stream if it uses the volume settings
+ * of another stream: This avoids multiplying the volume settings for hidden
+ * stream types that follow other stream behavior for volume settings
+ * NOTE: do not create loops in aliases! */
+ private int[] STREAM_VOLUME_ALIAS = new int[] {
+ AudioSystem.STREAM_VOICE_CALL, // STREAM_VOICE_CALL
+ AudioSystem.STREAM_SYSTEM, // STREAM_SYSTEM
+ AudioSystem.STREAM_RING, // STREAM_RING
+ AudioSystem.STREAM_MUSIC, // STREAM_MUSIC
+ AudioSystem.STREAM_ALARM, // STREAM_ALARM
+ AudioSystem.STREAM_NOTIFICATION, // STREAM_NOTIFICATION
+ AudioSystem.STREAM_VOICE_CALL, // STREAM_BLUETOOTH_SCO
+ AudioSystem.STREAM_SYSTEM, // STREAM_SYSTEM_ENFORCED
+ AudioSystem.STREAM_VOICE_CALL, // STREAM_DTMF
+ AudioSystem.STREAM_MUSIC // STREAM_TTS
+ };
+
private AudioSystem.ErrorCallback mAudioSystemCallback = new AudioSystem.ErrorCallback() {
public void onError(int error) {
switch (error) {
@@ -178,6 +195,21 @@ public class AudioService extends IAudioService.Stub {
*/
private int mVibrateSetting;
+ /** @see System#NOTIFICATIONS_USE_RING_VOLUME */
+ private int mNotificationsUseRingVolume;
+
+ // Broadcast receiver for device connections intent broadcasts
+ private final BroadcastReceiver mReceiver = new AudioServiceBroadcastReceiver();
+
+ //TODO: use common definitions with HeadsetObserver
+ private static final int BIT_HEADSET = (1 << 0);
+ private static final int BIT_HEADSET_NO_MIC = (1 << 1);
+ private static final int BIT_TTY = (1 << 2);
+ private static final int BIT_FM_HEADSET = (1 << 3);
+ private static final int BIT_FM_SPEAKER = (1 << 4);
+
+ private int mHeadsetState;
+
///////////////////////////////////////////////////////////////////////////
// Construction
///////////////////////////////////////////////////////////////////////////
@@ -188,18 +220,20 @@ public class AudioService extends IAudioService.Stub {
mContentResolver = context.getContentResolver();
mVolumePanel = new VolumePanel(context, this);
mSettingsObserver = new SettingsObserver();
-
+
createAudioSystemThread();
- createStreamStates();
readPersistedSettings();
- readAudioSettings();
+ createStreamStates();
mMediaServerOk = true;
AudioSystem.setErrorCallback(mAudioSystemCallback);
loadSoundEffects();
- mSpeakerIsOn = false;
- mBluetoothScoIsConnected = false;
- mHeadsetIsConnected = false;
- mBluetoothA2dpIsConnected = false;
+
+ // Register for device connection intent broadcasts.
+ IntentFilter intentFilter =
+ new IntentFilter(Intent.ACTION_HEADSET_PLUG);
+ intentFilter.addAction(BluetoothA2dp.SINK_STATE_CHANGED_ACTION);
+ intentFilter.addAction(BluetoothIntent.HEADSET_AUDIO_STATE_CHANGED_ACTION);
+ context.registerReceiver(mReceiver, intentFilter);
}
private void createAudioSystemThread() {
@@ -223,63 +257,23 @@ public class AudioService extends IAudioService.Stub {
}
private void createStreamStates() {
- final int[] volumeLevelsPhone =
- createVolumeLevels(0, AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_VOICE_CALL]);
- final int[] volumeLevelsCoarse =
- createVolumeLevels(0, AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_SYSTEM]);
- final int[] volumeLevelsFine =
- createVolumeLevels(0, AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_MUSIC]);
- final int[] volumeLevelsBtPhone =
- createVolumeLevels(0,
- AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_BLUETOOTH_SCO]);
-
int numStreamTypes = AudioSystem.getNumStreamTypes();
VolumeStreamState[] streams = mStreamStates = new VolumeStreamState[numStreamTypes];
for (int i = 0; i < numStreamTypes; i++) {
- final int[] levels;
-
- switch (i) {
-
- case AudioSystem.STREAM_MUSIC:
- levels = volumeLevelsFine;
- break;
-
- case AudioSystem.STREAM_VOICE_CALL:
- levels = volumeLevelsPhone;
- break;
-
- case AudioSystem.STREAM_BLUETOOTH_SCO:
- levels = volumeLevelsBtPhone;
- break;
-
- default:
- levels = volumeLevelsCoarse;
- break;
- }
-
- if (i == AudioSystem.STREAM_BLUETOOTH_SCO) {
- streams[i] = new VolumeStreamState(AudioManager.DEFAULT_STREAM_VOLUME[i], i,levels);
- } else {
- streams[i] = new VolumeStreamState(System.VOLUME_SETTINGS[i], i, levels);
- }
+ streams[i] = new VolumeStreamState(System.VOLUME_SETTINGS[STREAM_VOLUME_ALIAS[i]], i);
}
- }
- private static int[] createVolumeLevels(int offset, int numlevels) {
- double curve = 1.0f; // 1.4f
- int [] volumes = new int[numlevels + offset];
- for (int i = 0; i < offset; i++) {
- volumes[i] = 0;
- }
-
- double val = 0;
- double max = Math.pow(numlevels - 1, curve);
- for (int i = 0; i < numlevels; i++) {
- val = Math.pow(i, curve) / max;
- volumes[offset + i] = (int) (val * 100.0f);
+ // Correct stream index values for streams with aliases
+ for (int i = 0; i < numStreamTypes; i++) {
+ if (STREAM_VOLUME_ALIAS[i] != i) {
+ int index = rescaleIndex(streams[i].mIndex, STREAM_VOLUME_ALIAS[i], i);
+ streams[i].mIndex = streams[i].getValidIndex(index);
+ setStreamVolumeIndex(i, index);
+ index = rescaleIndex(streams[i].mLastAudibleIndex, STREAM_VOLUME_ALIAS[i], i);
+ streams[i].mLastAudibleIndex = streams[i].getValidIndex(index);
+ }
}
- return volumes;
}
private void readPersistedSettings() {
@@ -291,12 +285,19 @@ public class AudioService extends IAudioService.Stub {
mRingerModeAffectedStreams = Settings.System.getInt(cr,
Settings.System.MODE_RINGER_STREAMS_AFFECTED,
- ((1 << AudioManager.STREAM_RING)|(1 << AudioManager.STREAM_NOTIFICATION)|(1 << AudioManager.STREAM_SYSTEM)));
+ ((1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_NOTIFICATION)|
+ (1 << AudioSystem.STREAM_SYSTEM)|(1 << AudioSystem.STREAM_SYSTEM_ENFORCED)));
mMuteAffectedStreams = System.getInt(cr,
System.MUTE_STREAMS_AFFECTED,
((1 << AudioSystem.STREAM_MUSIC)|(1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_SYSTEM)));
+ mNotificationsUseRingVolume = System.getInt(cr,
+ Settings.System.NOTIFICATIONS_USE_RING_VOLUME, 1);
+
+ if (mNotificationsUseRingVolume == 1) {
+ STREAM_VOLUME_ALIAS[AudioSystem.STREAM_NOTIFICATION] = AudioSystem.STREAM_RING;
+ }
// Each stream will read its own persisted settings
// Broadcast the sticky intent
@@ -307,25 +308,13 @@ public class AudioService extends IAudioService.Stub {
broadcastVibrateSetting(AudioManager.VIBRATE_TYPE_NOTIFICATION);
}
- private void readAudioSettings() {
- synchronized (mSettingsLock) {
- mMicMute = AudioSystem.isMicrophoneMuted();
- mMode = AudioSystem.getMode();
- for (int mode = 0; mode < AudioSystem.NUM_MODES; mode++) {
- mRoutes[mode] = AudioSystem.getRouting(mode);
- }
- }
+ private void setStreamVolumeIndex(int stream, int index) {
+ AudioSystem.setStreamVolumeIndex(stream, (index + 5)/10);
}
- private void applyAudioSettings() {
- synchronized (mSettingsLock) {
- AudioSystem.muteMicrophone(mMicMute);
- AudioSystem.setMode(mMode);
- for (int mode = 0; mode < AudioSystem.NUM_MODES; mode++) {
- AudioSystem.setRouting(mode, mRoutes[mode], AudioSystem.ROUTE_ALL);
- }
- }
- }
+ private int rescaleIndex(int index, int srcStream, int dstStream) {
+ return (index * mStreamStates[dstStream].getMaxIndex() + mStreamStates[srcStream].getMaxIndex() / 2) / mStreamStates[srcStream].getMaxIndex();
+ }
///////////////////////////////////////////////////////////////////////////
// IPC methods
@@ -354,44 +343,26 @@ public class AudioService extends IAudioService.Stub {
ensureValidDirection(direction);
ensureValidStreamType(streamType);
- boolean notificationsUseRingVolume = Settings.System.getInt(mContentResolver,
- Settings.System.NOTIFICATIONS_USE_RING_VOLUME, 1) == 1;
- if (notificationsUseRingVolume && streamType == AudioManager.STREAM_NOTIFICATION) {
- // Redirect the volume change to the ring stream
- streamType = AudioManager.STREAM_RING;
- }
- VolumeStreamState streamState = mStreamStates[streamType];
+ VolumeStreamState streamState = mStreamStates[STREAM_VOLUME_ALIAS[streamType]];
final int oldIndex = streamState.mIndex;
boolean adjustVolume = true;
// If either the client forces allowing ringer modes for this adjustment,
// or the stream type is one that is affected by ringer modes
if ((flags & AudioManager.FLAG_ALLOW_RINGER_MODES) != 0
- || streamType == AudioManager.STREAM_RING) {
+ || streamType == AudioSystem.STREAM_RING) {
// Check if the ringer mode changes with this volume adjustment. If
// it does, it will handle adjusting the volume, so we won't below
adjustVolume = checkForRingerModeChange(oldIndex, direction);
}
if (adjustVolume && streamState.adjustIndex(direction)) {
-
- boolean alsoUpdateNotificationVolume = notificationsUseRingVolume &&
- streamType == AudioManager.STREAM_RING;
- if (alsoUpdateNotificationVolume) {
- mStreamStates[AudioManager.STREAM_NOTIFICATION].adjustIndex(direction);
- }
-
// Post message to set system volume (it in turn will post a message
// to persist). Do not change volume if stream is muted.
if (streamState.muteCount() == 0) {
- sendMsg(mAudioHandler, MSG_SET_SYSTEM_VOLUME, streamType, SENDMSG_NOOP, 0, 0,
+ sendMsg(mAudioHandler, MSG_SET_SYSTEM_VOLUME, STREAM_VOLUME_ALIAS[streamType], SENDMSG_NOOP, 0, 0,
streamState, 0);
-
- if (alsoUpdateNotificationVolume) {
- sendMsg(mAudioHandler, MSG_SET_SYSTEM_VOLUME, AudioManager.STREAM_NOTIFICATION,
- SENDMSG_NOOP, 0, 0, mStreamStates[AudioManager.STREAM_NOTIFICATION], 0);
- }
}
}
@@ -404,9 +375,8 @@ public class AudioService extends IAudioService.Stub {
/** @see AudioManager#setStreamVolume(int, int, int) */
public void setStreamVolume(int streamType, int index, int flags) {
ensureValidStreamType(streamType);
- syncRingerAndNotificationStreamVolume(streamType, index, false);
-
- setStreamVolumeInt(streamType, index, false, true);
+ index = rescaleIndex(index * 10, streamType, STREAM_VOLUME_ALIAS[streamType]);
+ setStreamVolumeInt(STREAM_VOLUME_ALIAS[streamType], index, false, true);
// UI, etc.
mVolumePanel.postVolumeChanged(streamType, flags);
@@ -420,37 +390,12 @@ public class AudioService extends IAudioService.Stub {
intent.putExtra(AudioManager.EXTRA_VOLUME_STREAM_VALUE, getStreamVolume(streamType));
// Currently, sending the intent only when the stream is BLUETOOTH_SCO
- if (streamType == AudioManager.STREAM_BLUETOOTH_SCO) {
+ if (streamType == AudioSystem.STREAM_BLUETOOTH_SCO) {
mContext.sendBroadcast(intent);
}
}
/**
- * Sync the STREAM_RING and STREAM_NOTIFICATION volumes if mandated by the
- * value in Settings.
- *
- * @param streamType Type of the stream
- * @param index Volume index for the stream
- * @param force If true, set the volume even if the current and desired
- * volume as same
- */
- private void syncRingerAndNotificationStreamVolume(int streamType, int index, boolean force) {
- boolean notificationsUseRingVolume = Settings.System.getInt(mContentResolver,
- Settings.System.NOTIFICATIONS_USE_RING_VOLUME, 1) == 1;
- if (notificationsUseRingVolume) {
- if (streamType == AudioManager.STREAM_NOTIFICATION) {
- // Redirect the volume change to the ring stream
- streamType = AudioManager.STREAM_RING;
- }
- if (streamType == AudioManager.STREAM_RING) {
- // One-off to sync notification volume to ringer volume
- setStreamVolumeInt(AudioManager.STREAM_NOTIFICATION, index, force, true);
- }
- }
- }
-
-
- /**
* Sets the stream state's index, and posts a message to set system volume.
* This will not call out to the UI. Assumes a valid stream type.
*
@@ -491,13 +436,13 @@ public class AudioService extends IAudioService.Stub {
/** @see AudioManager#getStreamVolume(int) */
public int getStreamVolume(int streamType) {
ensureValidStreamType(streamType);
- return mStreamStates[streamType].mIndex;
+ return (mStreamStates[streamType].mIndex + 5) / 10;
}
/** @see AudioManager#getStreamMaxVolume(int) */
public int getStreamMaxVolume(int streamType) {
ensureValidStreamType(streamType);
- return mStreamStates[streamType].getMaxIndex();
+ return (mStreamStates[streamType].getMaxIndex() + 5) / 10;
}
/** @see AudioManager#getRingerMode() */
@@ -507,11 +452,12 @@ public class AudioService extends IAudioService.Stub {
/** @see AudioManager#setRingerMode(int) */
public void setRingerMode(int ringerMode) {
- if (ringerMode != mRingerMode) {
- setRingerModeInt(ringerMode, true);
-
- // Send sticky broadcast
- broadcastRingerMode();
+ synchronized (mSettingsLock) {
+ if (ringerMode != mRingerMode) {
+ setRingerModeInt(ringerMode, true);
+ // Send sticky broadcast
+ broadcastRingerMode();
+ }
}
}
@@ -541,7 +487,7 @@ public class AudioService extends IAudioService.Stub {
}
}
}
-
+
// Post a persist ringer mode msg
if (persist) {
sendMsg(mAudioHandler, MSG_PERSIST_RINGER_MODE, SHARED_MSG,
@@ -606,39 +552,28 @@ public class AudioService extends IAudioService.Stub {
return existingValue;
}
- /** @see AudioManager#setMicrophoneMute(boolean) */
- public void setMicrophoneMute(boolean on) {
- if (!checkAudioSettingsPermission("setMicrophoneMute()")) {
- return;
- }
- synchronized (mSettingsLock) {
- if (on != mMicMute) {
- AudioSystem.muteMicrophone(on);
- mMicMute = on;
- }
- }
- }
-
- /** @see AudioManager#isMicrophoneMute() */
- public boolean isMicrophoneMute() {
- return mMicMute;
- }
-
/** @see AudioManager#setMode(int) */
public void setMode(int mode) {
if (!checkAudioSettingsPermission("setMode()")) {
return;
}
+
+ if (mode < AudioSystem.MODE_CURRENT || mode > AudioSystem.MODE_IN_CALL) {
+ return;
+ }
+
synchronized (mSettingsLock) {
+ if (mode == AudioSystem.MODE_CURRENT) {
+ mode = mMode;
+ }
if (mode != mMode) {
- if (AudioSystem.setMode(mode) == AudioSystem.AUDIO_STATUS_OK) {
+ if (AudioSystem.setPhoneState(mode) == AudioSystem.AUDIO_STATUS_OK) {
mMode = mode;
}
}
int streamType = getActiveStreamType(AudioManager.USE_DEFAULT_STREAM_TYPE);
- int index = mStreamStates[streamType].mIndex;
- syncRingerAndNotificationStreamVolume(streamType, index, true);
- setStreamVolumeInt(streamType, index, true, true);
+ int index = mStreamStates[STREAM_VOLUME_ALIAS[streamType]].mIndex;
+ setStreamVolumeInt(STREAM_VOLUME_ALIAS[streamType], index, true, true);
}
}
@@ -647,187 +582,6 @@ public class AudioService extends IAudioService.Stub {
return mMode;
}
- /** @see AudioManager#setRouting(int, int, int) */
- public void setRouting(int mode, int routes, int mask) {
- int incallMask = 0;
- int ringtoneMask = 0;
- int normalMask = 0;
-
- if (!checkAudioSettingsPermission("setRouting()")) {
- return;
- }
- synchronized (mSettingsLock) {
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // mode AudioSystem.MODE_INVALID is used only by the following AudioManager methods:
- // setWiredHeadsetOn(), setBluetoothA2dpOn(), setBluetoothScoOn() and setSpeakerphoneOn().
- // If applications are using AudioManager.setRouting() that is now deprecated, the routing
- // command will be ignored.
- if (mode == AudioSystem.MODE_INVALID) {
- switch (mask) {
- case AudioSystem.ROUTE_SPEAKER:
- // handle setSpeakerphoneOn()
- if (routes != 0 && !mSpeakerIsOn) {
- mSpeakerIsOn = true;
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_SPEAKER;
- incallMask = AudioSystem.ROUTE_ALL;
- } else if (routes == 0 && mSpeakerIsOn) {
- mSpeakerIsOn = false;
- if (mBluetoothScoIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_BLUETOOTH_SCO;
- } else if (mHeadsetIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_HEADSET;
- } else {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_EARPIECE;
- }
- incallMask = AudioSystem.ROUTE_ALL;
- }
- break;
-
- case AudioSystem.ROUTE_BLUETOOTH_SCO:
- // handle setBluetoothScoOn()
- if (routes != 0 && !mBluetoothScoIsConnected) {
- mBluetoothScoIsConnected = true;
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_BLUETOOTH_SCO;
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_BLUETOOTH_SCO;
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_BLUETOOTH_SCO;
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than SCO headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- } else if (routes == 0 && mBluetoothScoIsConnected) {
- mBluetoothScoIsConnected = false;
- if (mHeadsetIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_HEADSET;
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- (AudioSystem.ROUTE_HEADSET|AudioSystem.ROUTE_SPEAKER);
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_HEADSET;
- } else {
- if (mSpeakerIsOn) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_SPEAKER;
- } else {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_EARPIECE;
- }
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
- }
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than SCO headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- break;
-
- case AudioSystem.ROUTE_HEADSET:
- // handle setWiredHeadsetOn()
- if (routes != 0 && !mHeadsetIsConnected) {
- mHeadsetIsConnected = true;
- // do not act upon headset connection if bluetooth SCO is connected to match phone app behavior
- if (!mBluetoothScoIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_HEADSET;
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- (AudioSystem.ROUTE_HEADSET|AudioSystem.ROUTE_SPEAKER);
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_HEADSET;
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than wired headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- } else if (routes == 0 && mHeadsetIsConnected) {
- mHeadsetIsConnected = false;
- // do not act upon headset disconnection if bluetooth SCO is connected to match phone app behavior
- if (!mBluetoothScoIsConnected) {
- if (mSpeakerIsOn) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_SPEAKER;
- } else {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_EARPIECE;
- }
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
-
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than wired headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- }
- break;
-
- case AudioSystem.ROUTE_BLUETOOTH_A2DP:
- // handle setBluetoothA2dpOn()
- if (routes != 0 && !mBluetoothA2dpIsConnected) {
- mBluetoothA2dpIsConnected = true;
- mRoutes[AudioSystem.MODE_RINGTONE] |= AudioSystem.ROUTE_BLUETOOTH_A2DP;
- mRoutes[AudioSystem.MODE_NORMAL] |= AudioSystem.ROUTE_BLUETOOTH_A2DP;
- // the audio flinger chooses A2DP as a higher priority,
- // so there is no need to disable other routes.
- ringtoneMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- } else if (routes == 0 && mBluetoothA2dpIsConnected) {
- mBluetoothA2dpIsConnected = false;
- mRoutes[AudioSystem.MODE_RINGTONE] &= ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- mRoutes[AudioSystem.MODE_NORMAL] &= ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- // the audio flinger chooses A2DP as a higher priority,
- // so there is no need to disable other routes.
- ringtoneMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- break;
- }
-
- // incallMask is != 0 means we must apply ne routing to MODE_IN_CALL mode
- if (incallMask != 0) {
- AudioSystem.setRouting(AudioSystem.MODE_IN_CALL,
- mRoutes[AudioSystem.MODE_IN_CALL],
- incallMask);
- }
- // ringtoneMask is != 0 means we must apply ne routing to MODE_RINGTONE mode
- if (ringtoneMask != 0) {
- AudioSystem.setRouting(AudioSystem.MODE_RINGTONE,
- mRoutes[AudioSystem.MODE_RINGTONE],
- ringtoneMask);
- }
- // normalMask is != 0 means we must apply ne routing to MODE_NORMAL mode
- if (normalMask != 0) {
- AudioSystem.setRouting(AudioSystem.MODE_NORMAL,
- mRoutes[AudioSystem.MODE_NORMAL],
- normalMask);
- }
-
- int streamType = getActiveStreamType(AudioManager.USE_DEFAULT_STREAM_TYPE);
- int index = mStreamStates[streamType].mIndex;
- syncRingerAndNotificationStreamVolume(streamType, index, true);
- setStreamVolumeInt(streamType, index, true, true);
- }
- }
- }
-
- /** @see AudioManager#getRouting(int) */
- public int getRouting(int mode) {
- return mRoutes[mode];
- }
-
- /** @see AudioManager#isMusicActive() */
- public boolean isMusicActive() {
- return AudioSystem.isMusicActive();
- }
-
- /** @see AudioManager#setParameter(String, String) */
- public void setParameter(String key, String value) {
- AudioSystem.setParameter(key, value);
- }
-
/** @see AudioManager#playSoundEffect(int) */
public void playSoundEffect(int effectType) {
sendMsg(mAudioHandler, MSG_PLAY_SOUND_EFFECT, SHARED_MSG, SENDMSG_NOOP,
@@ -930,14 +684,28 @@ public class AudioService extends IAudioService.Stub {
if (streamType != AudioSystem.STREAM_BLUETOOTH_SCO) {
String settingName = System.VOLUME_SETTINGS[streamType];
String lastAudibleSettingName = settingName + System.APPEND_FOR_LAST_AUDIBLE;
-
- streamState.mIndex = streamState.getValidIndex(Settings.System.getInt(mContentResolver,
- settingName,
- AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
- streamState.mLastAudibleIndex = streamState.getValidIndex(Settings.System.getInt(mContentResolver,
- lastAudibleSettingName,
- streamState.mIndex > 0 ? streamState.mIndex : AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
+ int index = Settings.System.getInt(mContentResolver,
+ settingName,
+ AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ if (STREAM_VOLUME_ALIAS[streamType] != streamType) {
+ index = rescaleIndex(index * 10, STREAM_VOLUME_ALIAS[streamType], streamType);
+ } else {
+ index *= 10;
+ }
+ streamState.mIndex = streamState.getValidIndex(index);
+
+ index = (index + 5) / 10;
+ index = Settings.System.getInt(mContentResolver,
+ lastAudibleSettingName,
+ (index > 0) ? index : AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ if (STREAM_VOLUME_ALIAS[streamType] != streamType) {
+ index = rescaleIndex(index * 10, STREAM_VOLUME_ALIAS[streamType], streamType);
+ } else {
+ index *= 10;
+ }
+ streamState.mLastAudibleIndex = streamState.getValidIndex(index);
}
+
// unmute stream that whas muted but is not affect by mute anymore
if (streamState.muteCount() != 0 && !isStreamAffectedByMute(streamType)) {
int size = streamState.mDeathHandlers.size();
@@ -948,7 +716,7 @@ public class AudioService extends IAudioService.Stub {
}
// apply stream volume
if (streamState.muteCount() == 0) {
- AudioSystem.setVolume(streamType, streamState.mVolumes[streamState.mIndex]);
+ setStreamVolumeIndex(streamType, streamState.mIndex);
}
}
@@ -969,7 +737,7 @@ public class AudioService extends IAudioService.Stub {
boolean adjustVolumeIndex = true;
int newRingerMode = mRingerMode;
- if (mRingerMode == AudioManager.RINGER_MODE_NORMAL && oldIndex == 1
+ if (mRingerMode == AudioManager.RINGER_MODE_NORMAL && (oldIndex + 5) / 10 == 1
&& direction == AudioManager.ADJUST_LOWER) {
newRingerMode = AudioManager.RINGER_MODE_VIBRATE;
} else if (mRingerMode == AudioManager.RINGER_MODE_VIBRATE) {
@@ -1026,7 +794,7 @@ public class AudioService extends IAudioService.Stub {
Log.w(TAG, "Couldn't connect to phone service", e);
}
- if ((getRouting(AudioSystem.MODE_IN_CALL) & AudioSystem.ROUTE_BLUETOOTH_SCO) != 0) {
+ if (AudioSystem.getForceUse(AudioSystem.FOR_COMMUNICATION) == AudioSystem.FORCE_BT_SCO) {
// Log.v(TAG, "getActiveStreamType: Forcing STREAM_BLUETOOTH_SCO...");
return AudioSystem.STREAM_BLUETOOTH_SCO;
} else if (isOffhook) {
@@ -1110,47 +878,36 @@ public class AudioService extends IAudioService.Stub {
private final String mLastAudibleVolumeIndexSettingName;
private final int mStreamType;
- private final int[] mVolumes;
+ private int mIndexMax;
private int mIndex;
private int mLastAudibleIndex;
private ArrayList<VolumeDeathHandler> mDeathHandlers; //handles mute/solo requests client death
- private VolumeStreamState(String settingName, int streamType, int[] volumes) {
+ private VolumeStreamState(String settingName, int streamType) {
mVolumeIndexSettingName = settingName;
mLastAudibleVolumeIndexSettingName = settingName + System.APPEND_FOR_LAST_AUDIBLE;
mStreamType = streamType;
- mVolumes = volumes;
final ContentResolver cr = mContentResolver;
- mIndex = getValidIndex(Settings.System.getInt(cr, mVolumeIndexSettingName, AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
- mLastAudibleIndex = getValidIndex(Settings.System.getInt(cr,
- mLastAudibleVolumeIndexSettingName, mIndex > 0 ? mIndex : AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
-
- AudioSystem.setVolume(streamType, volumes[mIndex]);
- mDeathHandlers = new ArrayList<VolumeDeathHandler>();
- }
-
- /**
- * Constructor to be used when there is no setting associated with the VolumeStreamState.
- *
- * @param defaultVolume Default volume of the stream to use.
- * @param streamType Type of the stream.
- * @param volumes Volumes levels associated with this stream.
- */
- private VolumeStreamState(int defaultVolume, int streamType, int[] volumes) {
- mVolumeIndexSettingName = null;
- mLastAudibleVolumeIndexSettingName = null;
- mIndex = mLastAudibleIndex = defaultVolume;
- mStreamType = streamType;
- mVolumes = volumes;
- AudioSystem.setVolume(mStreamType, defaultVolume);
+ mIndexMax = AudioManager.MAX_STREAM_VOLUME[streamType];
+ mIndex = Settings.System.getInt(cr,
+ mVolumeIndexSettingName,
+ AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ mLastAudibleIndex = Settings.System.getInt(cr,
+ mLastAudibleVolumeIndexSettingName,
+ (mIndex > 0) ? mIndex : AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ AudioSystem.initStreamVolume(streamType, 0, mIndexMax);
+ mIndexMax *= 10;
+ mIndex = getValidIndex(10 * mIndex);
+ mLastAudibleIndex = getValidIndex(10 * mLastAudibleIndex);
+ setStreamVolumeIndex(streamType, mIndex);
mDeathHandlers = new ArrayList<VolumeDeathHandler>();
}
public boolean adjustIndex(int deltaIndex) {
- return setIndex(mIndex + deltaIndex, true);
+ return setIndex(mIndex + deltaIndex * 10, true);
}
public boolean setIndex(int index, boolean lastAudible) {
@@ -1161,6 +918,13 @@ public class AudioService extends IAudioService.Stub {
if (lastAudible) {
mLastAudibleIndex = mIndex;
}
+ // Apply change to all streams using this one as alias
+ int numStreamTypes = AudioSystem.getNumStreamTypes();
+ for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
+ if (streamType != mStreamType && STREAM_VOLUME_ALIAS[streamType] == mStreamType) {
+ mStreamStates[streamType].setIndex(rescaleIndex(mIndex, mStreamType, streamType), lastAudible);
+ }
+ }
return true;
} else {
return false;
@@ -1168,7 +932,7 @@ public class AudioService extends IAudioService.Stub {
}
public int getMaxIndex() {
- return mVolumes.length - 1;
+ return mIndexMax;
}
public void mute(IBinder cb, boolean state) {
@@ -1183,8 +947,8 @@ public class AudioService extends IAudioService.Stub {
private int getValidIndex(int index) {
if (index < 0) {
return 0;
- } else if (index >= mVolumes.length) {
- return mVolumes.length - 1;
+ } else if (index > mIndexMax) {
+ return mIndexMax;
}
return index;
@@ -1318,8 +1082,16 @@ public class AudioService extends IAudioService.Stub {
private void setSystemVolume(VolumeStreamState streamState) {
// Adjust volume
- AudioSystem
- .setVolume(streamState.mStreamType, streamState.mVolumes[streamState.mIndex]);
+ setStreamVolumeIndex(streamState.mStreamType, streamState.mIndex);
+
+ // Apply change to all streams using this one as alias
+ int numStreamTypes = AudioSystem.getNumStreamTypes();
+ for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
+ if (streamType != streamState.mStreamType &&
+ STREAM_VOLUME_ALIAS[streamType] == streamState.mStreamType) {
+ setStreamVolumeIndex(streamType, mStreamStates[streamType].mIndex);
+ }
+ }
// Post a persist volume msg
sendMsg(mAudioHandler, MSG_PERSIST_VOLUME, streamState.mStreamType,
@@ -1327,12 +1099,10 @@ public class AudioService extends IAudioService.Stub {
}
private void persistVolume(VolumeStreamState streamState) {
- if (streamState.mStreamType != AudioManager.STREAM_BLUETOOTH_SCO) {
- System.putInt(mContentResolver, streamState.mVolumeIndexSettingName,
- streamState.mIndex);
- System.putInt(mContentResolver, streamState.mLastAudibleVolumeIndexSettingName,
- streamState.mLastAudibleIndex);
- }
+ System.putInt(mContentResolver, streamState.mVolumeIndexSettingName,
+ (streamState.mIndex + 5)/ 10);
+ System.putInt(mContentResolver, streamState.mLastAudibleVolumeIndexSettingName,
+ (streamState.mLastAudibleIndex + 5) / 10);
}
private void persistRingerMode() {
@@ -1426,18 +1196,17 @@ public class AudioService extends IAudioService.Stub {
case MSG_MEDIA_SERVER_STARTED:
Log.e(TAG, "Media server started.");
- // Restore audio routing and stream volumes
- applyAudioSettings();
+ // Restore stream volumes
int numStreamTypes = AudioSystem.getNumStreamTypes();
for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
- int volume;
+ int index;
VolumeStreamState streamState = mStreamStates[streamType];
if (streamState.muteCount() == 0) {
- volume = streamState.mVolumes[streamState.mIndex];
+ index = streamState.mIndex;
} else {
- volume = streamState.mVolumes[0];
+ index = 0;
}
- AudioSystem.setVolume(streamType, volume);
+ setStreamVolumeIndex(streamType, index);
}
setRingerMode(mRingerMode);
mMediaServerOk = true;
@@ -1451,28 +1220,144 @@ public class AudioService extends IAudioService.Stub {
}
private class SettingsObserver extends ContentObserver {
-
+
SettingsObserver() {
super(new Handler());
mContentResolver.registerContentObserver(Settings.System.getUriFor(
Settings.System.MODE_RINGER_STREAMS_AFFECTED), false, this);
+ mContentResolver.registerContentObserver(Settings.System.getUriFor(
+ Settings.System.NOTIFICATIONS_USE_RING_VOLUME), false, this);
}
@Override
public void onChange(boolean selfChange) {
super.onChange(selfChange);
-
- mRingerModeAffectedStreams = Settings.System.getInt(mContentResolver,
- Settings.System.MODE_RINGER_STREAMS_AFFECTED,
- 0);
+ synchronized (mSettingsLock) {
+ int ringerModeAffectedStreams = Settings.System.getInt(mContentResolver,
+ Settings.System.MODE_RINGER_STREAMS_AFFECTED,
+ 0);
+ if (ringerModeAffectedStreams != mRingerModeAffectedStreams) {
+ /*
+ * Ensure all stream types that should be affected by ringer mode
+ * are in the proper state.
+ */
+ mRingerModeAffectedStreams = ringerModeAffectedStreams;
+ setRingerModeInt(getRingerMode(), false);
+ }
- /*
- * Ensure all stream types that should be affected by ringer mode
- * are in the proper state.
- */
- setRingerModeInt(getRingerMode(), false);
+ int notificationsUseRingVolume = Settings.System.getInt(mContentResolver,
+ Settings.System.NOTIFICATIONS_USE_RING_VOLUME,
+ 1);
+ if (notificationsUseRingVolume != mNotificationsUseRingVolume) {
+ mNotificationsUseRingVolume = notificationsUseRingVolume;
+ if (mNotificationsUseRingVolume == 1) {
+ STREAM_VOLUME_ALIAS[AudioSystem.STREAM_NOTIFICATION] = AudioSystem.STREAM_RING;
+ } else {
+ STREAM_VOLUME_ALIAS[AudioSystem.STREAM_NOTIFICATION] = AudioSystem.STREAM_NOTIFICATION;
+ // Persist notification volume volume as it was not persisted while aliased to ring volume
+ sendMsg(mAudioHandler, MSG_PERSIST_VOLUME, AudioSystem.STREAM_NOTIFICATION,
+ SENDMSG_REPLACE, 0, 0, mStreamStates[AudioSystem.STREAM_NOTIFICATION], PERSIST_DELAY);
+ }
+ }
+ }
}
-
}
-
+
+ /**
+ * Receiver for misc intent broadcasts the Phone app cares about.
+ */
+ private class AudioServiceBroadcastReceiver extends BroadcastReceiver {
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ String action = intent.getAction();
+
+ if (action.equals(BluetoothA2dp.SINK_STATE_CHANGED_ACTION)) {
+ int state = intent.getIntExtra(BluetoothA2dp.SINK_STATE,
+ BluetoothA2dp.STATE_DISCONNECTED);
+ String address = intent.getStringExtra(BluetoothIntent.ADDRESS);
+ if (state == BluetoothA2dp.STATE_DISCONNECTED) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ address);
+ } else if (state == BluetoothA2dp.STATE_CONNECTED){
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ address);
+ }
+ } else if (action.equals(BluetoothIntent.HEADSET_AUDIO_STATE_CHANGED_ACTION)) {
+ int state = intent.getIntExtra(BluetoothIntent.HEADSET_AUDIO_STATE,
+ BluetoothHeadset.STATE_ERROR);
+ String address = intent.getStringExtra(BluetoothIntent.ADDRESS);
+ if (state == BluetoothHeadset.AUDIO_STATE_DISCONNECTED) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_SCO,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ address);
+ } else if (state == BluetoothHeadset.AUDIO_STATE_CONNECTED) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_SCO,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ address);
+ }
+ } else if (action.equals(Intent.ACTION_HEADSET_PLUG)) {
+ int state = intent.getIntExtra("state", 0);
+ if ((state & BIT_HEADSET) == 0 &&
+ (mHeadsetState & BIT_HEADSET) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADSET,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_HEADSET) != 0 &&
+ (mHeadsetState & BIT_HEADSET) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADSET,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_HEADSET_NO_MIC) == 0 &&
+ (mHeadsetState & BIT_HEADSET_NO_MIC) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADPHONE,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_HEADSET_NO_MIC) != 0 &&
+ (mHeadsetState & BIT_HEADSET_NO_MIC) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADPHONE,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_TTY) == 0 &&
+ (mHeadsetState & BIT_TTY) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_TTY,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_TTY) != 0 &&
+ (mHeadsetState & BIT_TTY) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_TTY,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_FM_HEADSET) == 0 &&
+ (mHeadsetState & BIT_FM_HEADSET) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_HEADPHONE,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_FM_HEADSET) != 0 &&
+ (mHeadsetState & BIT_FM_HEADSET) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_HEADPHONE,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_FM_SPEAKER) == 0 &&
+ (mHeadsetState & BIT_FM_SPEAKER) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_SPEAKER,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_FM_SPEAKER) != 0 &&
+ (mHeadsetState & BIT_FM_SPEAKER) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_SPEAKER,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ mHeadsetState = state;
+ }
+ }
+ }
+
+
}
diff --git a/media/java/android/media/AudioSystem.java b/media/java/android/media/AudioSystem.java
index 5917ab9..d587f65 100644
--- a/media/java/android/media/AudioSystem.java
+++ b/media/java/android/media/AudioSystem.java
@@ -45,38 +45,21 @@ public class AudioSystem
public static final int STREAM_NOTIFICATION = 5;
/* @hide The audio stream for phone calls when connected on bluetooth */
public static final int STREAM_BLUETOOTH_SCO = 6;
+ /* @hide The audio stream for enforced system sounds in certain countries (e.g camera in Japan) */
+ public static final int STREAM_SYSTEM_ENFORCED = 7;
+ /* @hide The audio stream for DTMF tones */
+ public static final int STREAM_DTMF = 8;
+ /* @hide The audio stream for text to speech (TTS) */
+ public static final int STREAM_TTS = 9;
/**
* @deprecated Use {@link #numStreamTypes() instead}
*/
public static final int NUM_STREAMS = 5;
// Expose only the getter method publicly so we can change it in the future
- private static final int NUM_STREAM_TYPES = 7;
+ private static final int NUM_STREAM_TYPES = 10;
public static final int getNumStreamTypes() { return NUM_STREAM_TYPES; }
- /* max and min volume levels */
- /* Maximum volume setting, for use with setVolume(int,int) */
- public static final int MAX_VOLUME = 100;
- /* Minimum volume setting, for use with setVolume(int,int) */
- public static final int MIN_VOLUME = 0;
-
- /*
- * Sets the volume of a specified audio stream.
- *
- * param type the stream type to set the volume of (e.g. STREAM_MUSIC)
- * param volume the volume level to set (0-100)
- * return command completion status see AUDIO_STATUS_OK, see AUDIO_STATUS_ERROR
- */
- public static native int setVolume(int type, int volume);
-
- /*
- * Returns the volume of a specified audio stream.
- *
- * param type the stream type to get the volume of (e.g. STREAM_MUSIC)
- * return the current volume (0-100)
- */
- public static native int getVolume(int type);
-
/*
* Sets the microphone mute on or off.
*
@@ -101,17 +84,22 @@ public class AudioSystem
* it can route the audio appropriately.
* return command completion status see AUDIO_STATUS_OK, see AUDIO_STATUS_ERROR
*/
- public static native int setMode(int mode);
-
+ /** @deprecated use {@link #setPhoneState(int)} */
+ public static int setMode(int mode) {
+ return AUDIO_STATUS_ERROR;
+ }
/*
* Returns the current audio mode.
*
* return the current audio mode (NORMAL, RINGTONE, or IN_CALL).
* Returns the current current audio state from the HAL.
*/
- public static native int getMode();
+ /** @deprecated */
+ public static int getMode() {
+ return MODE_INVALID;
+ }
- /* modes for setMode/getMode/setRoute/getRoute */
+ /* modes for setPhoneState */
public static final int MODE_INVALID = -2;
public static final int MODE_CURRENT = -1;
public static final int MODE_NORMAL = 0;
@@ -121,15 +109,20 @@ public class AudioSystem
/* Routing bits for setRouting/getRouting API */
- public static final int ROUTE_EARPIECE = (1 << 0);
- public static final int ROUTE_SPEAKER = (1 << 1);
-
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_EARPIECE = (1 << 0);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_SPEAKER = (1 << 1);
/** @deprecated use {@link #ROUTE_BLUETOOTH_SCO} */
@Deprecated public static final int ROUTE_BLUETOOTH = (1 << 2);
- public static final int ROUTE_BLUETOOTH_SCO = (1 << 2);
- public static final int ROUTE_HEADSET = (1 << 3);
- public static final int ROUTE_BLUETOOTH_A2DP = (1 << 4);
- public static final int ROUTE_ALL = 0xFFFFFFFF;
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_BLUETOOTH_SCO = (1 << 2);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_HEADSET = (1 << 3);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_BLUETOOTH_A2DP = (1 << 4);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_ALL = 0xFFFFFFFF;
/*
* Sets the audio routing for a specified mode
@@ -141,7 +134,10 @@ public class AudioSystem
* ROUTE_xxx types. Unset bits indicate the route should be left unchanged
* return command completion status see AUDIO_STATUS_OK, see AUDIO_STATUS_ERROR
*/
- public static native int setRouting(int mode, int routes, int mask);
+ /** @deprecated use {@link #setDeviceConnectionState(int,int,String)} */
+ public static int setRouting(int mode, int routes, int mask) {
+ return AUDIO_STATUS_ERROR;
+ }
/*
* Returns the current audio routing bit vector for a specified mode.
@@ -150,7 +146,10 @@ public class AudioSystem
* return an audio route bit vector that can be compared with ROUTE_xxx
* bits
*/
- public static native int getRouting(int mode);
+ /** @deprecated use {@link #getDeviceConnectionState(int,String)} */
+ public static int getRouting(int mode) {
+ return 0;
+ }
/*
* Checks whether any music is active.
@@ -160,17 +159,23 @@ public class AudioSystem
public static native boolean isMusicActive();
/*
- * Sets a generic audio configuration parameter. The use of these parameters
+ * Sets a group generic audio configuration parameters. The use of these parameters
* are platform dependant, see libaudio
*
- * ** Temporary interface - DO NOT USE
- *
- * TODO: Replace with a more generic key:value get/set mechanism
+ * param keyValuePairs list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
+ */
+ public static native int setParameters(String keyValuePairs);
+
+ /*
+ * Gets a group generic audio configuration parameters. The use of these parameters
+ * are platform dependant, see libaudio
*
- * param key name of parameter to set. Must not be null.
- * param value value of parameter. Must not be null.
+ * param keys list of parameters
+ * return value: list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
*/
- public static native void setParameter(String key, String value);
+ public static native String getParameters(String keys);
/*
private final static String TAG = "audio";
@@ -220,4 +225,68 @@ public class AudioSystem
mErrorCallback.onError(error);
}
}
+
+ /*
+ * AudioPolicyService methods
+ */
+
+ // output devices
+ public static final int DEVICE_OUT_EARPIECE = 0x1;
+ public static final int DEVICE_OUT_SPEAKER = 0x2;
+ public static final int DEVICE_OUT_WIRED_HEADSET = 0x4;
+ public static final int DEVICE_OUT_WIRED_HEADPHONE = 0x8;
+ public static final int DEVICE_OUT_BLUETOOTH_SCO = 0x10;
+ public static final int DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20;
+ public static final int DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40;
+ public static final int DEVICE_OUT_BLUETOOTH_A2DP = 0x80;
+ public static final int DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100;
+ public static final int DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200;
+ public static final int DEVICE_OUT_AUX_DIGITAL = 0x400;
+ public static final int DEVICE_OUT_FM_HEADPHONE = 0x800;
+ public static final int DEVICE_OUT_FM_SPEAKER = 0x1000;
+ public static final int DEVICE_OUT_TTY = 0x2000;
+ public static final int DEVICE_OUT_DEFAULT = 0x8000;
+ // input devices
+ public static final int DEVICE_IN_COMMUNICATION = 0x10000;
+ public static final int DEVICE_IN_AMBIENT = 0x20000;
+ public static final int DEVICE_IN_BUILTIN_MIC1 = 0x40000;
+ public static final int DEVICE_IN_BUILTIN_MIC2 = 0x80000;
+ public static final int DEVICE_IN_MIC_ARRAY = 0x100000;
+ public static final int DEVICE_IN_BLUETOOTH_SCO_HEADSET = 0x200000;
+ public static final int DEVICE_IN_WIRED_HEADSET = 0x400000;
+ public static final int DEVICE_IN_AUX_DIGITAL = 0x800000;
+ public static final int DEVICE_IN_DEFAULT = 0x80000000;
+
+ // device states
+ public static final int DEVICE_STATE_UNAVAILABLE = 0;
+ public static final int DEVICE_STATE_AVAILABLE = 1;
+
+ // phone state
+ public static final int PHONE_STATE_OFFCALL = 0;
+ public static final int PHONE_STATE_RINGING = 1;
+ public static final int PHONE_STATE_INCALL = 2;
+
+ // config for setForceUse
+ public static final int FORCE_NONE = 0;
+ public static final int FORCE_SPEAKER = 1;
+ public static final int FORCE_HEADPHONES = 2;
+ public static final int FORCE_BT_SCO = 3;
+ public static final int FORCE_BT_A2DP = 4;
+ public static final int FORCE_WIRED_ACCESSORY = 5;
+ public static final int FORCE_DEFAULT = FORCE_NONE;
+
+ // usage for serForceUse
+ public static final int FOR_COMMUNICATION = 0;
+ public static final int FOR_MEDIA = 1;
+ public static final int FOR_RECORD = 2;
+
+ public static native int setDeviceConnectionState(int device, int state, String device_address);
+ public static native int getDeviceConnectionState(int device, String device_address);
+ public static native int setPhoneState(int state);
+ public static native int setRingerMode(int mode, int mask);
+ public static native int setForceUse(int usage, int config);
+ public static native int getForceUse(int usage);
+ public static native int initStreamVolume(int stream, int indexMin, int indexMax);
+ public static native int setStreamVolumeIndex(int stream, int index);
+ public static native int getStreamVolumeIndex(int stream);
}
diff --git a/media/java/android/media/AudioTrack.java b/media/java/android/media/AudioTrack.java
index 5f1be9d..7fbe965 100644
--- a/media/java/android/media/AudioTrack.java
+++ b/media/java/android/media/AudioTrack.java
@@ -120,7 +120,7 @@ public class AudioTrack
public static final int ERROR_INVALID_OPERATION = -3;
private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16;
- private static final int ERROR_NATIVESETUP_INVALIDCHANNELCOUNT = -17;
+ private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17;
private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18;
private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19;
private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20;
@@ -198,7 +198,7 @@ public class AudioTrack
/**
* The current audio channel configuration.
*/
- private int mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
/**
* The encoding of the audio samples.
* @see AudioFormat#ENCODING_PCM_8BIT
@@ -235,8 +235,8 @@ public class AudioTrack
* @param sampleRateInHz the sample rate expressed in Hertz. Examples of rates are (but
* not limited to) 44100, 22050 and 11025.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_OUT_MONO} and
+ * {@link AudioFormat#CHANNEL_OUT_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
@@ -298,7 +298,8 @@ public class AudioTrack
&& (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
&& (streamType != AudioManager.STREAM_VOICE_CALL)
&& (streamType != AudioManager.STREAM_NOTIFICATION)
- && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)) {
+ && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
+ && (streamType != AudioManager.STREAM_DTMF)) {
throw (new IllegalArgumentException("Invalid stream type."));
} else {
mStreamType = streamType;
@@ -316,18 +317,18 @@ public class AudioTrack
//--------------
// channel config
switch (channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_DEFAULT:
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_OUT_DEFAULT:
+ case AudioFormat.CHANNEL_OUT_MONO:
mChannelCount = 1;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_OUT_STEREO:
mChannelCount = 2;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ mChannelConfiguration = AudioFormat.CHANNEL_OUT_STEREO;
break;
default:
mChannelCount = 0;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_INVALID;
+ mChannelConfiguration = AudioFormat.CHANNEL_INVALID;
throw(new IllegalArgumentException("Unsupported channel configuration."));
}
@@ -452,8 +453,8 @@ public class AudioTrack
/**
* Returns the configured channel configuration.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO}
- * and {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}.
+ * See {@link AudioFormat#CHANNEL_OUT_MONO}
+ * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
*/
public int getChannelConfiguration() {
return mChannelConfiguration;
@@ -531,8 +532,8 @@ public class AudioTrack
* the expected frequency at which the buffer will be refilled with additional data to play.
* @param sampleRateInHz the sample rate expressed in Hertz.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_OUT_MONO} and
+ * {@link AudioFormat#CHANNEL_OUT_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
@@ -544,10 +545,10 @@ public class AudioTrack
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_OUT_MONO:
channelCount = 1;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_OUT_STEREO:
channelCount = 2;
break;
default:
diff --git a/media/java/android/media/IAudioService.aidl b/media/java/android/media/IAudioService.aidl
index 9a8264f..bb4252b 100644
--- a/media/java/android/media/IAudioService.aidl
+++ b/media/java/android/media/IAudioService.aidl
@@ -29,9 +29,9 @@ interface IAudioService {
void setStreamVolume(int streamType, int index, int flags);
- void setStreamSolo(int streamType, boolean state, IBinder cb);
+ void setStreamSolo(int streamType, boolean state, IBinder cb);
- void setStreamMute(int streamType, boolean state, IBinder cb);
+ void setStreamMute(int streamType, boolean state, IBinder cb);
int getStreamVolume(int streamType);
@@ -46,23 +46,11 @@ interface IAudioService {
int getVibrateSetting(int vibrateType);
boolean shouldVibrate(int vibrateType);
-
- void setMicrophoneMute(boolean on);
-
- boolean isMicrophoneMute();
void setMode(int mode);
int getMode();
- void setRouting(int mode, int routes, int mask);
-
- int getRouting(int mode);
-
- boolean isMusicActive();
-
- void setParameter(String key, String value);
-
oneway void playSoundEffect(int effectType);
oneway void playSoundEffectVolume(int effectType, float volume);
diff --git a/media/java/android/media/JetPlayer.java b/media/java/android/media/JetPlayer.java
index 4fb0ead..2263605 100644
--- a/media/java/android/media/JetPlayer.java
+++ b/media/java/android/media/JetPlayer.java
@@ -89,7 +89,7 @@ public class JetPlayer
// Jet rendering audio parameters
private static final int JET_OUTPUT_RATE = 22050; // _SAMPLE_RATE_22050 in Android.mk
private static final int JET_OUTPUT_CHANNEL_CONFIG =
- AudioFormat.CHANNEL_CONFIGURATION_STEREO; // NUM_OUTPUT_CHANNELS=2 in Android.mk
+ AudioFormat.CHANNEL_OUT_STEREO; // NUM_OUTPUT_CHANNELS=2 in Android.mk
//--------------------------------------------
diff --git a/media/java/android/media/ToneGenerator.java b/media/java/android/media/ToneGenerator.java
index e5ee9a3..c60a1ac 100644
--- a/media/java/android/media/ToneGenerator.java
+++ b/media/java/android/media/ToneGenerator.java
@@ -724,9 +724,9 @@ public class ToneGenerator
public static final int TONE_CDMA_SIGNAL_OFF = 98;
/** Maximum volume, for use with {@link #ToneGenerator(int,int)} */
- public static final int MAX_VOLUME = AudioSystem.MAX_VOLUME;
+ public static final int MAX_VOLUME = 100;
/** Minimum volume setting, for use with {@link #ToneGenerator(int,int)} */
- public static final int MIN_VOLUME = AudioSystem.MIN_VOLUME;
+ public static final int MIN_VOLUME = 0;
/**
diff --git a/media/jni/soundpool/SoundPool.cpp b/media/jni/soundpool/SoundPool.cpp
index 1fdecdd..0d07abe 100644
--- a/media/jni/soundpool/SoundPool.cpp
+++ b/media/jni/soundpool/SoundPool.cpp
@@ -527,13 +527,14 @@ void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftV
// wrong audio audio buffer size (mAudioBufferSize)
unsigned long toggle = mToggle ^ 1;
void *userData = (void *)((unsigned long)this | toggle);
+ uint32_t channels = (numChannels == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO;
#ifdef USE_SHARED_MEM_BUFFER
newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
- numChannels, sample->getIMemory(), 0, callback, userData);
+ channels, sample->getIMemory(), 0, callback, userData);
#else
newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
- numChannels, frameCount, 0, callback, userData, bufferFrames);
+ channels, frameCount, 0, callback, userData, bufferFrames);
#endif
if (newTrack->initCheck() != NO_ERROR) {
LOGE("Error creating AudioTrack");
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 07c81f7..9d442c3 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -20,7 +20,8 @@ LOCAL_SRC_FILES:= \
mediametadataretriever.cpp \
ToneGenerator.cpp \
JetPlayer.cpp \
- IOMX.cpp
+ IOMX.cpp \
+ IAudioPolicyService.cpp
LOCAL_SHARED_LIBRARIES := \
libui libcutils libutils libbinder libsonivox
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 0a6f4f7..5e35564 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -28,6 +28,7 @@
#include <media/AudioSystem.h>
#include <media/AudioRecord.h>
+#include <media/mediarecorder.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
@@ -45,7 +46,7 @@ namespace android {
// ---------------------------------------------------------------------------
AudioRecord::AudioRecord()
- : mStatus(NO_INIT)
+ : mStatus(NO_INIT), mInput(0)
{
}
@@ -53,15 +54,15 @@ AudioRecord::AudioRecord(
int inputSource,
uint32_t sampleRate,
int format,
- int channelCount,
+ uint32_t channels,
int frameCount,
uint32_t flags,
callback_t cbf,
void* user,
int notificationFrames)
- : mStatus(NO_INIT)
+ : mStatus(NO_INIT), mInput(0)
{
- mStatus = set(inputSource, sampleRate, format, channelCount,
+ mStatus = set(inputSource, sampleRate, format, channels,
frameCount, flags, cbf, user, notificationFrames);
}
@@ -78,6 +79,7 @@ AudioRecord::~AudioRecord()
}
mAudioRecord.clear();
IPCThreadState::self()->flushCommands();
+ AudioSystem::releaseInput(mInput);
}
}
@@ -85,7 +87,7 @@ status_t AudioRecord::set(
int inputSource,
uint32_t sampleRate,
int format,
- int channelCount,
+ uint32_t channels,
int frameCount,
uint32_t flags,
callback_t cbf,
@@ -94,7 +96,7 @@ status_t AudioRecord::set(
bool threadCanCallJava)
{
- LOGV("set(): sampleRate %d, channelCount %d, frameCount %d",sampleRate, channelCount, frameCount);
+ LOGV("set(): sampleRate %d, channels %d, frameCount %d",sampleRate, channels, frameCount);
if (mAudioRecord != 0) {
return INVALID_OPERATION;
}
@@ -104,8 +106,8 @@ status_t AudioRecord::set(
return NO_INIT;
}
- if (inputSource == DEFAULT_INPUT) {
- inputSource = MIC_INPUT;
+ if (inputSource == AUDIO_SOURCE_DEFAULT) {
+ inputSource = AUDIO_SOURCE_MIC;
}
if (sampleRate == 0) {
@@ -115,15 +117,21 @@ status_t AudioRecord::set(
if (format == 0) {
format = AudioSystem::PCM_16_BIT;
}
- if (channelCount == 0) {
- channelCount = 1;
+ // validate parameters
+ if (!AudioSystem::isValidFormat(format)) {
+ LOGE("Invalid format");
+ return BAD_VALUE;
}
- // validate parameters
- if (format != AudioSystem::PCM_16_BIT) {
+ if (!AudioSystem::isInputChannel(channels)) {
return BAD_VALUE;
}
- if (channelCount != 1 && channelCount != 2) {
+ int channelCount = AudioSystem::popCount(channels);
+
+ mInput = AudioSystem::getInput(inputSource,
+ sampleRate, format, channels, (AudioSystem::audio_in_acoustics)flags);
+ if (mInput == 0) {
+ LOGE("Could not get audio output for stream type %d", inputSource);
return BAD_VALUE;
}
@@ -132,14 +140,22 @@ status_t AudioRecord::set(
if (AudioSystem::getInputBufferSize(sampleRate, format, channelCount, &inputBuffSizeInBytes)
!= NO_ERROR) {
LOGE("AudioSystem could not query the input buffer size.");
- return NO_INIT;
+ return NO_INIT;
}
+
if (inputBuffSizeInBytes == 0) {
LOGE("Recording parameters are not supported: sampleRate %d, channelCount %d, format %d",
sampleRate, channelCount, format);
return BAD_VALUE;
}
+
int frameSizeInBytes = channelCount * (format == AudioSystem::PCM_16_BIT ? 2 : 1);
+ if (AudioSystem::isLinearPCM(format)) {
+ frameSizeInBytes = channelCount * (format == AudioSystem::PCM_16_BIT ? sizeof(int16_t) : sizeof(int8_t));
+ } else {
+ frameSizeInBytes = sizeof(int8_t);
+ }
+
// We use 2* size of input buffer for ping pong use of record buffer.
int minFrameCount = 2 * inputBuffSizeInBytes / frameSizeInBytes;
@@ -157,11 +173,11 @@ status_t AudioRecord::set(
// open record channel
status_t status;
- sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), inputSource,
+ sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), mInput,
sampleRate, format,
channelCount,
frameCount,
- ((uint16_t)flags) << 16,
+ ((uint16_t)flags) << 16,
&status);
if (record == 0) {
LOGE("AudioFlinger could not create record track, status: %d", status);
@@ -188,7 +204,7 @@ status_t AudioRecord::set(
mFormat = format;
// Update buffer size in case it has been limited by AudioFlinger during track creation
mFrameCount = mCblk->frameCount;
- mChannelCount = channelCount;
+ mChannelCount = (uint8_t)channelCount;
mActive = 0;
mCbf = cbf;
mNotificationFrames = notificationFrames;
@@ -234,7 +250,11 @@ uint32_t AudioRecord::frameCount() const
int AudioRecord::frameSize() const
{
- return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ if (AudioSystem::isLinearPCM(mFormat)) {
+ return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ } else {
+ return sizeof(uint8_t);
+ }
}
int AudioRecord::inputSource() const
@@ -262,15 +282,18 @@ status_t AudioRecord::start()
}
if (android_atomic_or(1, &mActive) == 0) {
- mNewPosition = mCblk->user + mUpdatePeriod;
- mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
- mCblk->waitTimeMs = 0;
- if (t != 0) {
- t->run("ClientRecordThread", THREAD_PRIORITY_AUDIO_CLIENT);
- } else {
- setpriority(PRIO_PROCESS, 0, THREAD_PRIORITY_AUDIO_CLIENT);
+ ret = AudioSystem::startInput(mInput);
+ if (ret == NO_ERROR) {
+ mNewPosition = mCblk->user + mUpdatePeriod;
+ mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
+ mCblk->waitTimeMs = 0;
+ if (t != 0) {
+ t->run("ClientRecordThread", THREAD_PRIORITY_AUDIO_CLIENT);
+ } else {
+ setpriority(PRIO_PROCESS, 0, THREAD_PRIORITY_AUDIO_CLIENT);
+ }
+ ret = mAudioRecord->start();
}
- ret = mAudioRecord->start();
}
if (t != 0) {
@@ -301,6 +324,7 @@ status_t AudioRecord::stop()
} else {
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
}
+ AudioSystem::stopInput(mInput);
}
if (t != 0) {
@@ -421,7 +445,7 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
"this shouldn't happen (user=%08x, server=%08x)", cblk->user, cblk->server);
cblk->waitTimeMs = 0;
-
+
if (framesReq > framesReady) {
framesReq = framesReady;
}
@@ -437,7 +461,7 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
audioBuffer->channelCount= mChannelCount;
audioBuffer->format = mFormat;
audioBuffer->frameCount = framesReq;
- audioBuffer->size = framesReq*mChannelCount*sizeof(int16_t);
+ audioBuffer->size = framesReq*cblk->frameSize;
audioBuffer->raw = (int8_t*)cblk->buffer(u);
active = mActive;
return active ? status_t(NO_ERROR) : status_t(STOPPED);
@@ -468,7 +492,7 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize)
do {
- audioBuffer.frameCount = userSize/mChannelCount/sizeof(int16_t);
+ audioBuffer.frameCount = userSize/frameSize();
// Calling obtainBuffer() with a negative wait count causes
// an (almost) infinite wait time.
@@ -519,8 +543,8 @@ bool AudioRecord::processAudioBuffer(const sp<ClientRecordThread>& thread)
do {
audioBuffer.frameCount = frames;
- // Calling obtainBuffer() with a wait count of 1
- // limits wait time to WAIT_PERIOD_MS. This prevents from being
+ // Calling obtainBuffer() with a wait count of 1
+ // limits wait time to WAIT_PERIOD_MS. This prevents from being
// stuck here not being able to handle timed events (position, markers).
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
@@ -548,14 +572,14 @@ bool AudioRecord::processAudioBuffer(const sp<ClientRecordThread>& thread)
if (readSize > reqSize) readSize = reqSize;
audioBuffer.size = readSize;
- audioBuffer.frameCount = readSize/mChannelCount/sizeof(int16_t);
+ audioBuffer.frameCount = readSize/frameSize();
frames -= audioBuffer.frameCount;
releaseBuffer(&audioBuffer);
} while (frames);
-
+
// Manage overrun callback
if (mActive && (mCblk->framesAvailable_l() == 0)) {
LOGV("Overrun user: %x, server: %x, flowControlFlag %d", mCblk->user, mCblk->server, mCblk->flowControlFlag);
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 86d0542..1fc1024 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -20,8 +20,18 @@
#include <utils/Log.h>
#include <binder/IServiceManager.h>
#include <media/AudioSystem.h>
+#include <media/IAudioPolicyService.h>
#include <math.h>
+// ----------------------------------------------------------------------------
+// the sim build doesn't have gettid
+
+#ifndef HAVE_GETTID
+# define gettid getpid
+#endif
+
+// ----------------------------------------------------------------------------
+
namespace android {
// client singleton for AudioFlinger binder interface
@@ -30,10 +40,9 @@ sp<IAudioFlinger> AudioSystem::gAudioFlinger;
sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
// Cached values
-int AudioSystem::gOutSamplingRate[NUM_AUDIO_OUTPUT_TYPES];
-int AudioSystem::gOutFrameCount[NUM_AUDIO_OUTPUT_TYPES];
-uint32_t AudioSystem::gOutLatency[NUM_AUDIO_OUTPUT_TYPES];
-bool AudioSystem::gA2dpEnabled;
+DefaultKeyedVector<int, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
+DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
+
// Cached values for recording queries
uint32_t AudioSystem::gPrevInSamplingRate = 16000;
int AudioSystem::gPrevInFormat = AudioSystem::PCM_16_BIT;
@@ -65,42 +74,10 @@ const sp<IAudioFlinger>& AudioSystem::get_audio_flinger()
binder->linkToDeath(gAudioFlingerClient);
gAudioFlinger = interface_cast<IAudioFlinger>(binder);
gAudioFlinger->registerClient(gAudioFlingerClient);
- // Cache frequently accessed parameters
- for (int output = 0; output < NUM_AUDIO_OUTPUT_TYPES; output++) {
- gOutFrameCount[output] = (int)gAudioFlinger->frameCount(output);
- gOutSamplingRate[output] = (int)gAudioFlinger->sampleRate(output);
- gOutLatency[output] = gAudioFlinger->latency(output);
- }
- gA2dpEnabled = gAudioFlinger->isA2dpEnabled();
}
LOGE_IF(gAudioFlinger==0, "no AudioFlinger!?");
- return gAudioFlinger;
-}
-// routing helper functions
-status_t AudioSystem::speakerphone(bool state) {
- uint32_t routes = state ? ROUTE_SPEAKER : ROUTE_EARPIECE;
- return setRouting(MODE_IN_CALL, routes, ROUTE_ALL);
-}
-
-status_t AudioSystem::isSpeakerphoneOn(bool* state) {
- uint32_t routes = 0;
- status_t s = getRouting(MODE_IN_CALL, &routes);
- *state = !!(routes & ROUTE_SPEAKER);
- return s;
-}
-
-status_t AudioSystem::bluetoothSco(bool state) {
- uint32_t mask = ROUTE_BLUETOOTH_SCO;
- uint32_t routes = state ? mask : ROUTE_EARPIECE;
- return setRouting(MODE_IN_CALL, routes, ROUTE_ALL);
-}
-
-status_t AudioSystem::isBluetoothScoOn(bool* state) {
- uint32_t routes = 0;
- status_t s = getRouting(MODE_IN_CALL, &routes);
- *state = !!(routes & ROUTE_BLUETOOTH_SCO);
- return s;
+ return gAudioFlinger;
}
status_t AudioSystem::muteMicrophone(bool state) {
@@ -148,12 +125,12 @@ status_t AudioSystem::getMasterMute(bool* mute)
return NO_ERROR;
}
-status_t AudioSystem::setStreamVolume(int stream, float value)
+status_t AudioSystem::setStreamVolume(int stream, float value, void *output)
{
if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- af->setStreamVolume(stream, value);
+ af->setStreamVolume(stream, value, output);
return NO_ERROR;
}
@@ -166,12 +143,12 @@ status_t AudioSystem::setStreamMute(int stream, bool mute)
return NO_ERROR;
}
-status_t AudioSystem::getStreamVolume(int stream, float* volume)
+status_t AudioSystem::getStreamVolume(int stream, float* volume, void *output)
{
if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- *volume = af->streamVolume(stream);
+ *volume = af->streamVolume(stream, output);
return NO_ERROR;
}
@@ -192,43 +169,28 @@ status_t AudioSystem::setMode(int mode)
return af->setMode(mode);
}
-status_t AudioSystem::getMode(int* mode)
-{
+
+status_t AudioSystem::isMusicActive(bool* state) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- *mode = af->getMode();
+ *state = af->isMusicActive();
return NO_ERROR;
}
-status_t AudioSystem::setRouting(int mode, uint32_t routes, uint32_t mask)
-{
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
- return af->setRouting(mode, routes, mask);
-}
-status_t AudioSystem::getRouting(int mode, uint32_t* routes)
-{
+status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- uint32_t r = af->getRouting(mode);
- *routes = r;
- return NO_ERROR;
+ return af->setParameters(ioHandle, keyValuePairs);
}
-status_t AudioSystem::isMusicActive(bool* state) {
+String8 AudioSystem::getParameters(audio_io_handle_t ioHandle, const String8& keys) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
- *state = af->isMusicActive();
- return NO_ERROR;
-}
+ String8 result = String8("");
+ if (af == 0) return result;
-// Temporary interface, do not use
-// TODO: Replace with a more generic key:value get/set mechanism
-status_t AudioSystem::setParameter(const char* key, const char* value) {
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
- return af->setParameter(key, value);
+ result = af->getParameters(ioHandle, keys);
+ return result;
}
// convert volume steps to natural log scale
@@ -257,55 +219,108 @@ int AudioSystem::logToLinear(float volume)
status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType)
{
- int output = getOutput(streamType);
-
- if (output == NUM_AUDIO_OUTPUT_TYPES) return PERMISSION_DENIED;
+ OutputDescriptor *outputDesc;
+ audio_io_handle_t output;
+
+ if (streamType == DEFAULT) {
+ streamType = MUSIC;
+ }
+
+ output = getOutput((stream_type)streamType);
+ if (output == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ gLock.lock();
+ outputDesc = AudioSystem::gOutputs.valueFor(output);
+ if (outputDesc == 0) {
+ LOGV("getOutputSamplingRate() no output descriptor for output %p in gOutputs", output);
+ gLock.unlock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *samplingRate = af->sampleRate(output);
+ } else {
+ LOGV("getOutputSamplingRate() reading from output desc");
+ *samplingRate = outputDesc->samplingRate;
+ gLock.unlock();
+ }
+
+ LOGV("getOutputSamplingRate() streamType %d, output %p, sampling rate %d", streamType, output, *samplingRate);
- // gOutSamplingRate[] is updated by getOutput() which calls get_audio_flinger()
- LOGV("getOutputSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, gOutSamplingRate[output]);
-
- *samplingRate = gOutSamplingRate[output];
-
return NO_ERROR;
}
status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType)
{
- int output = getOutput(streamType);
+ OutputDescriptor *outputDesc;
+ audio_io_handle_t output;
+
+ if (streamType == DEFAULT) {
+ streamType = MUSIC;
+ }
- if (output == NUM_AUDIO_OUTPUT_TYPES) return PERMISSION_DENIED;
+ output = getOutput((stream_type)streamType);
+ if (output == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ gLock.lock();
+ outputDesc = AudioSystem::gOutputs.valueFor(output);
+ if (outputDesc == 0) {
+ gLock.unlock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *frameCount = af->frameCount(output);
+ } else {
+ *frameCount = outputDesc->frameCount;
+ gLock.unlock();
+ }
- // gOutFrameCount[] is updated by getOutput() which calls get_audio_flinger()
- LOGV("getOutputFrameCount() streamType %d, output %d, frame count %d", streamType, output, gOutFrameCount[output]);
+ LOGV("getOutputFrameCount() streamType %d, output %p, frameCount %d", streamType, output, *frameCount);
- *frameCount = gOutFrameCount[output];
-
return NO_ERROR;
}
status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType)
{
- int output = getOutput(streamType);
+ OutputDescriptor *outputDesc;
+ audio_io_handle_t output;
+
+ if (streamType == DEFAULT) {
+ streamType = MUSIC;
+ }
- if (output == NUM_AUDIO_OUTPUT_TYPES) return PERMISSION_DENIED;
+ output = getOutput((stream_type)streamType);
+ if (output == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ gLock.lock();
+ outputDesc = AudioSystem::gOutputs.valueFor(output);
+ if (outputDesc == 0) {
+ gLock.unlock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *latency = af->latency(output);
+ } else {
+ *latency = outputDesc->latency;
+ gLock.unlock();
+ }
- // gOutLatency[] is updated by getOutput() which calls get_audio_flinger()
- LOGV("getOutputLatency() streamType %d, output %d, latency %d", streamType, output, gOutLatency[output]);
+ LOGV("getOutputLatency() streamType %d, output %p, latency %d", streamType, output, *latency);
- *latency = gOutLatency[output];
-
return NO_ERROR;
}
-status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
+status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
size_t* buffSize)
{
// Do we have a stale gInBufferSize or are we requesting the input buffer size for new values
- if ((gInBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
+ if ((gInBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
|| (channelCount != gPrevInChannelCount)) {
// save the request params
gPrevInSamplingRate = sampleRate;
- gPrevInFormat = format;
+ gPrevInFormat = format;
gPrevInChannelCount = channelCount;
gInBuffSize = 0;
@@ -314,24 +329,18 @@ status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int ch
return PERMISSION_DENIED;
}
gInBuffSize = af->getInputBufferSize(sampleRate, format, channelCount);
- }
+ }
*buffSize = gInBuffSize;
-
+
return NO_ERROR;
}
// ---------------------------------------------------------------------------
-void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
+void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
Mutex::Autolock _l(AudioSystem::gLock);
- AudioSystem::gAudioFlinger.clear();
- for (int output = 0; output < NUM_AUDIO_OUTPUT_TYPES; output++) {
- gOutFrameCount[output] = 0;
- gOutSamplingRate[output] = 0;
- gOutLatency[output] = 0;
- }
- AudioSystem::gInBuffSize = 0;
+ AudioSystem::gAudioFlinger.clear();
if (gAudioErrorCallback) {
gAudioErrorCallback(DEAD_OBJECT);
@@ -339,33 +348,83 @@ void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
LOGW("AudioFlinger server died!");
}
-void AudioSystem::AudioFlingerClient::a2dpEnabledChanged(bool enabled) {
- gA2dpEnabled = enabled;
- LOGV("AudioFlinger A2DP enabled status changed! %d", enabled);
-}
+void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, void *param1, void *param2) {
+ LOGV("ioConfigChanged() event %d", event);
+ audio_io_handle_t ioHandle = (audio_io_handle_t)param1;
+ OutputDescriptor *desc;
+ uint32_t stream;
+
+ if (param1 == 0) return;
-void AudioSystem::setErrorCallback(audio_error_callback cb) {
Mutex::Autolock _l(AudioSystem::gLock);
- gAudioErrorCallback = cb;
-}
-int AudioSystem::getOutput(int streamType)
-{
- // make sure that gA2dpEnabled is valid by calling get_audio_flinger() which in turn
- // will call gAudioFlinger->isA2dpEnabled()
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return NUM_AUDIO_OUTPUT_TYPES;
+ switch (event) {
+ case STREAM_CONFIG_CHANGED:
+ if (param2 == 0) break;
+ stream = *(uint32_t *)param2;
+ LOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %p", stream, ioHandle);
+ if (gStreamOutputMap.indexOfKey(stream) >= 0) {
+ gStreamOutputMap.replaceValueFor(stream, ioHandle);
+ }
+ break;
+ case OUTPUT_OPENED: {
+ if (gOutputs.indexOfKey(ioHandle) >= 0) {
+ LOGV("ioConfigChanged() opening already existing output! %p", ioHandle);
+ break;
+ }
+ if (param2 == 0) break;
+ desc = (OutputDescriptor *)param2;
+
+ OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
+ gOutputs.add(ioHandle, outputDesc);
+ LOGV("ioConfigChanged() new output samplingRate %d, format %d channels %d frameCount %d latency %d",
+ outputDesc->samplingRate, outputDesc->format, outputDesc->channels, outputDesc->frameCount, outputDesc->latency);
+ } break;
+ case OUTPUT_CLOSED: {
+ if (gOutputs.indexOfKey(ioHandle) < 0) {
+ LOGW("ioConfigChanged() closing unknow output! %p", ioHandle);
+ break;
+ }
+ LOGV("ioConfigChanged() output %p closed", ioHandle);
+
+ gOutputs.removeItem(ioHandle);
+ for (int i = gStreamOutputMap.size() - 1; i >= 0 ; i--) {
+ if (gStreamOutputMap.valueAt(i) == ioHandle) {
+ gStreamOutputMap.removeItemsAt(i);
+ }
+ }
+ } break;
+
+ case OUTPUT_CONFIG_CHANGED: {
+ int index = gOutputs.indexOfKey(ioHandle);
+ if (index < 0) {
+ LOGW("ioConfigChanged() modifying unknow output! %p", ioHandle);
+ break;
+ }
+ if (param2 == 0) break;
+ desc = (OutputDescriptor *)param2;
+
+ LOGV("ioConfigChanged() new config for output %p samplingRate %d, format %d channels %d frameCount %d latency %d",
+ ioHandle, desc->samplingRate, desc->format,
+ desc->channels, desc->frameCount, desc->latency);
+ OutputDescriptor *outputDesc = gOutputs.valueAt(index);
+ delete outputDesc;
+ outputDesc = new OutputDescriptor(*desc);
+ gOutputs.replaceValueFor(ioHandle, outputDesc);
+ } break;
+ case INPUT_OPENED:
+ case INPUT_CLOSED:
+ case INPUT_CONFIG_CHANGED:
+ break;
- if (streamType == DEFAULT) {
- streamType = MUSIC;
- }
- if (gA2dpEnabled && routedToA2dpOutput(streamType)) {
- return AUDIO_OUTPUT_A2DP;
- } else {
- return AUDIO_OUTPUT_HARDWARE;
}
}
+void AudioSystem::setErrorCallback(audio_error_callback cb) {
+ Mutex::Autolock _l(gLock);
+ gAudioErrorCallback = cb;
+}
+
bool AudioSystem::routedToA2dpOutput(int streamType) {
switch(streamType) {
case MUSIC:
@@ -379,6 +438,451 @@ bool AudioSystem::routedToA2dpOutput(int streamType) {
}
+// client singleton for AudioPolicyService binder interface
+sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
+sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient;
+
+
+// establish binder interface to AudioFlinger service
+const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
+{
+ gLock.lock();
+ if (gAudioPolicyService.get() == 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_policy"));
+ if (binder != 0)
+ break;
+ LOGW("AudioPolicyService not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while(true);
+ if (gAudioPolicyServiceClient == NULL) {
+ gAudioPolicyServiceClient = new AudioPolicyServiceClient();
+ }
+ binder->linkToDeath(gAudioPolicyServiceClient);
+ gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
+ gLock.unlock();
+ } else {
+ gLock.unlock();
+ }
+ return gAudioPolicyService;
+}
+
+status_t AudioSystem::setDeviceConnectionState(audio_devices device,
+ device_connection_state state,
+ const char *device_address)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ return aps->setDeviceConnectionState(device, state, device_address);
+}
+
+AudioSystem::device_connection_state AudioSystem::getDeviceConnectionState(audio_devices device,
+ const char *device_address)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return DEVICE_STATE_UNAVAILABLE;
+
+ return aps->getDeviceConnectionState(device, device_address);
+}
+
+status_t AudioSystem::setPhoneState(int state)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ return aps->setPhoneState(state);
+}
+
+status_t AudioSystem::setRingerMode(uint32_t mode, uint32_t mask)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setRingerMode(mode, mask);
+}
+
+status_t AudioSystem::setForceUse(force_use usage, forced_config config)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setForceUse(usage, config);
+}
+
+AudioSystem::forced_config AudioSystem::getForceUse(force_use usage)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return FORCE_NONE;
+ return aps->getForceUse(usage);
+}
+
+
+audio_io_handle_t AudioSystem::getOutput(stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ output_flags flags)
+{
+ audio_io_handle_t output = NULL;
+ if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
+ Mutex::Autolock _l(gLock);
+ output = AudioSystem::gStreamOutputMap.valueFor(stream);
+ LOGV_IF((output != NULL), "getOutput() read %p from cache for stream %d", output, stream);
+ }
+ if (output == NULL) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return NULL;
+ output = aps->getOutput(stream, samplingRate, format, channels, flags);
+ if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
+ Mutex::Autolock _l(gLock);
+ AudioSystem::gStreamOutputMap.add(stream, output);
+ }
+ }
+ return output;
+}
+
+status_t AudioSystem::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->startOutput(output, stream);
+}
+
+status_t AudioSystem::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->stopOutput(output, stream);
+}
+
+void AudioSystem::releaseOutput(audio_io_handle_t output)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return;
+ aps->releaseOutput(output);
+}
+
+audio_io_handle_t AudioSystem::getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ audio_in_acoustics acoustics)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return NULL;
+ return aps->getInput(inputSource, samplingRate, format, channels, acoustics);
+}
+
+status_t AudioSystem::startInput(audio_io_handle_t input)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->startInput(input);
+}
+
+status_t AudioSystem::stopInput(audio_io_handle_t input)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->stopInput(input);
+}
+
+void AudioSystem::releaseInput(audio_io_handle_t input)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return;
+ aps->releaseInput(input);
+}
+
+status_t AudioSystem::initStreamVolume(stream_type stream,
+ int indexMin,
+ int indexMax)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->initStreamVolume(stream, indexMin, indexMax);
+}
+
+status_t AudioSystem::setStreamVolumeIndex(stream_type stream, int index)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setStreamVolumeIndex(stream, index);
+}
+
+status_t AudioSystem::getStreamVolumeIndex(stream_type stream, int *index)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getStreamVolumeIndex(stream, index);
+}
+
+// ---------------------------------------------------------------------------
+
+void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(AudioSystem::gLock);
+ AudioSystem::gAudioPolicyService.clear();
+
+ LOGW("AudioPolicyService server died!");
+}
+
+// ---------------------------------------------------------------------------
+
+
+// use emulated popcount optimization
+// http://www.df.lth.se/~john_e/gems/gem002d.html
+uint32_t AudioSystem::popCount(uint32_t u)
+{
+ u = ((u&0x55555555) + ((u>>1)&0x55555555));
+ u = ((u&0x33333333) + ((u>>2)&0x33333333));
+ u = ((u&0x0f0f0f0f) + ((u>>4)&0x0f0f0f0f));
+ u = ((u&0x00ff00ff) + ((u>>8)&0x00ff00ff));
+ u = ( u&0x0000ffff) + (u>>16);
+ return u;
+}
+
+bool AudioSystem::isOutputDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ ((device & ~AudioSystem::DEVICE_OUT_ALL) == 0)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isInputDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ ((device & ~AudioSystem::DEVICE_IN_ALL) == 0)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isA2dpDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ (device & (AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isBluetoothScoDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ (device & (AudioSystem::DEVICE_OUT_BLUETOOTH_SCO |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isLowVisibility(stream_type stream)
+{
+ if (stream == AudioSystem::SYSTEM || stream == AudioSystem::NOTIFICATION) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isInputChannel(uint32_t channel)
+{
+ if ((channel & ~AudioSystem::CHANNEL_IN_ALL) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isOutputChannel(uint32_t channel)
+{
+ if ((channel & ~AudioSystem::CHANNEL_OUT_ALL) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isValidFormat(uint32_t format)
+{
+ switch (format & MAIN_FORMAT_MASK) {
+ case PCM:
+ case MP3:
+ case AMR_NB:
+ case AMR_WB:
+ case AAC:
+ case HE_AAC_V1:
+ case HE_AAC_V2:
+ case VORBIS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool AudioSystem::isLinearPCM(uint32_t format)
+{
+ switch (format) {
+ case PCM_16_BIT:
+ case PCM_8_BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+//------------------------- AudioParameter class implementation ---------------
+
+const char *AudioParameter::keyRouting = "routing";
+const char *AudioParameter::keySamplingRate = "sampling_rate";
+const char *AudioParameter::keyFormat = "format";
+const char *AudioParameter::keyChannels = "channels";
+const char *AudioParameter::keyFrameCount = "frame_count";
+
+AudioParameter::AudioParameter(const String8& keyValuePairs)
+{
+ char *str = new char[keyValuePairs.length()+1];
+ mKeyValuePairs = keyValuePairs;
+
+ strcpy(str, keyValuePairs.string());
+ char *pair = strtok(str, ";");
+ while (pair != NULL) {
+ if (strlen(pair) != 0) {
+ size_t eqIdx = strcspn(pair, "=");
+ String8 key = String8(pair, eqIdx);
+ String8 value;
+ if (eqIdx == strlen(pair)) {
+ value = String8("");
+ } else {
+ value = String8(pair + eqIdx + 1);
+ }
+ if (mParameters.indexOfKey(key) < 0) {
+ mParameters.add(key, value);
+ } else {
+ mParameters.replaceValueFor(key, value);
+ }
+ } else {
+ LOGV("AudioParameter() cstor empty key value pair");
+ }
+ pair = strtok(NULL, ";");
+ }
+
+ delete[] str;
+}
+
+AudioParameter::~AudioParameter()
+{
+ mParameters.clear();
+}
+
+String8 AudioParameter::toString()
+{
+ String8 str = String8("");
+
+ size_t size = mParameters.size();
+ for (size_t i = 0; i < size; i++) {
+ str += mParameters.keyAt(i);
+ str += "=";
+ str += mParameters.valueAt(i);
+ if (i < (size - 1)) str += ";";
+ }
+ return str;
+}
+
+status_t AudioParameter::add(const String8& key, const String8& value)
+{
+ if (mParameters.indexOfKey(key) < 0) {
+ mParameters.add(key, value);
+ return NO_ERROR;
+ } else {
+ mParameters.replaceValueFor(key, value);
+ return ALREADY_EXISTS;
+ }
+}
+
+status_t AudioParameter::addInt(const String8& key, const int value)
+{
+ char str[12];
+ if (snprintf(str, 12, "%d", value) > 0) {
+ String8 str8 = String8(str);
+ return add(key, str8);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::addFloat(const String8& key, const float value)
+{
+ char str[23];
+ if (snprintf(str, 23, "%.10f", value) > 0) {
+ String8 str8 = String8(str);
+ return add(key, str8);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::remove(const String8& key)
+{
+ if (mParameters.indexOfKey(key) >= 0) {
+ mParameters.removeItem(key);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::get(const String8& key, String8& value)
+{
+ if (mParameters.indexOfKey(key) >= 0) {
+ value = mParameters.valueFor(key);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::getInt(const String8& key, int& value)
+{
+ String8 str8;
+ status_t result = get(key, str8);
+ value = 0;
+ if (result == NO_ERROR) {
+ int val;
+ if (sscanf(str8.string(), "%d", &val) == 1) {
+ value = val;
+ } else {
+ result = INVALID_OPERATION;
+ }
+ }
+ return result;
+}
+
+status_t AudioParameter::getFloat(const String8& key, float& value)
+{
+ String8 str8;
+ status_t result = get(key, str8);
+ value = 0;
+ if (result == NO_ERROR) {
+ float val;
+ if (sscanf(str8.string(), "%f", &val) == 1) {
+ value = val;
+ } else {
+ result = INVALID_OPERATION;
+ }
+ }
+ return result;
+}
}; // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 7b9eda7..b147d25 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -54,7 +54,7 @@ AudioTrack::AudioTrack(
int streamType,
uint32_t sampleRate,
int format,
- int channelCount,
+ int channels,
int frameCount,
uint32_t flags,
callback_t cbf,
@@ -62,7 +62,7 @@ AudioTrack::AudioTrack(
int notificationFrames)
: mStatus(NO_INIT)
{
- mStatus = set(streamType, sampleRate, format, channelCount,
+ mStatus = set(streamType, sampleRate, format, channels,
frameCount, flags, cbf, user, notificationFrames, 0);
}
@@ -70,7 +70,7 @@ AudioTrack::AudioTrack(
int streamType,
uint32_t sampleRate,
int format,
- int channelCount,
+ int channels,
const sp<IMemory>& sharedBuffer,
uint32_t flags,
callback_t cbf,
@@ -78,7 +78,7 @@ AudioTrack::AudioTrack(
int notificationFrames)
: mStatus(NO_INIT)
{
- mStatus = set(streamType, sampleRate, format, channelCount,
+ mStatus = set(streamType, sampleRate, format, channels,
0, flags, cbf, user, notificationFrames, sharedBuffer);
}
@@ -97,6 +97,7 @@ AudioTrack::~AudioTrack()
}
mAudioTrack.clear();
IPCThreadState::self()->flushCommands();
+ AudioSystem::releaseOutput(getOutput());
}
}
@@ -104,7 +105,7 @@ status_t AudioTrack::set(
int streamType,
uint32_t sampleRate,
int format,
- int channelCount,
+ int channels,
int frameCount,
uint32_t flags,
callback_t cbf,
@@ -150,63 +151,84 @@ status_t AudioTrack::set(
if (format == 0) {
format = AudioSystem::PCM_16_BIT;
}
- if (channelCount == 0) {
- channelCount = 2;
+ if (channels == 0) {
+ channels = AudioSystem::CHANNEL_OUT_STEREO;
}
// validate parameters
- if (((format != AudioSystem::PCM_8_BIT) || sharedBuffer != 0) &&
- (format != AudioSystem::PCM_16_BIT)) {
+ if (!AudioSystem::isValidFormat(format)) {
LOGE("Invalid format");
return BAD_VALUE;
}
- if (channelCount != 1 && channelCount != 2) {
- LOGE("Invalid channel number");
+
+ // force direct flag if format is not linear PCM
+ if (!AudioSystem::isLinearPCM(format)) {
+ flags |= AudioSystem::OUTPUT_FLAG_DIRECT;
+ }
+
+ if (!AudioSystem::isOutputChannel(channels)) {
+ LOGE("Invalid channel mask");
return BAD_VALUE;
}
+ uint32_t channelCount = AudioSystem::popCount(channels);
- // Ensure that buffer depth covers at least audio hardware latency
- uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
- if (minBufCount < 2) minBufCount = 2;
+ audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,
+ sampleRate, format, channels, (AudioSystem::output_flags)flags);
- // When playing from shared buffer, playback will start even if last audioflinger
- // block is partly filled.
- if (sharedBuffer != 0 && minBufCount > 1) {
- minBufCount--;
+ if (output == 0) {
+ LOGE("Could not get audio output for stream type %d", streamType);
+ return BAD_VALUE;
}
- int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
-
- if (sharedBuffer == 0) {
- if (frameCount == 0) {
- frameCount = minFrameCount;
- }
- if (notificationFrames == 0) {
- notificationFrames = frameCount/2;
- }
- // Make sure that application is notified with sufficient margin
- // before underrun
- if (notificationFrames > frameCount/2) {
- notificationFrames = frameCount/2;
+ if (!AudioSystem::isLinearPCM(format)) {
+ if (sharedBuffer != 0) {
+ frameCount = sharedBuffer->size();
}
} else {
- // Ensure that buffer alignment matches channelcount
- if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
- LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
- return BAD_VALUE;
- }
- frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
- }
+ // Ensure that buffer depth covers at least audio hardware latency
+ uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
+ if (minBufCount < 2) minBufCount = 2;
- if (frameCount < minFrameCount) {
- LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);
- return BAD_VALUE;
+ int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+
+ if (sharedBuffer == 0) {
+ if (frameCount == 0) {
+ frameCount = minFrameCount;
+ }
+ if (notificationFrames == 0) {
+ notificationFrames = frameCount/2;
+ }
+ // Make sure that application is notified with sufficient margin
+ // before underrun
+ if (notificationFrames > frameCount/2) {
+ notificationFrames = frameCount/2;
+ }
+ if (frameCount < minFrameCount) {
+ LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);
+ return BAD_VALUE;
+ }
+ } else {
+ // Ensure that buffer alignment matches channelcount
+ if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
+ LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
+ return BAD_VALUE;
+ }
+ frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
+ }
}
// create the track
status_t status;
sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
- streamType, sampleRate, format, channelCount, frameCount, flags, sharedBuffer, &status);
+ streamType,
+ sampleRate,
+ format,
+ channelCount,
+ frameCount,
+ ((uint16_t)flags) << 16,
+ sharedBuffer,
+ output,
+ &status);
if (track == 0) {
LOGE("AudioFlinger could not create track, status: %d", status);
@@ -245,6 +267,7 @@ status_t AudioTrack::set(
mVolume[RIGHT] = 1.0f;
mStreamType = streamType;
mFormat = format;
+ mChannels = channels;
mChannelCount = channelCount;
mSharedBuffer = sharedBuffer;
mMuted = false;
@@ -259,6 +282,7 @@ status_t AudioTrack::set(
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
+ mFlags = flags;
return NO_ERROR;
}
@@ -297,7 +321,11 @@ uint32_t AudioTrack::frameCount() const
int AudioTrack::frameSize() const
{
- return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ if (AudioSystem::isLinearPCM(mFormat)) {
+ return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ } else {
+ return sizeof(uint8_t);
+ }
}
sp<IMemory>& AudioTrack::sharedBuffer()
@@ -323,6 +351,7 @@ void AudioTrack::start()
}
if (android_atomic_or(1, &mActive) == 0) {
+ AudioSystem::startOutput(getOutput(), (AudioSystem::stream_type)mStreamType);
mNewPosition = mCblk->server + mUpdatePeriod;
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
mCblk->waitTimeMs = 0;
@@ -367,6 +396,7 @@ void AudioTrack::stop()
} else {
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
}
+ AudioSystem::stopOutput(getOutput(), (AudioSystem::stream_type)mStreamType);
}
if (t != 0) {
@@ -382,12 +412,12 @@ bool AudioTrack::stopped() const
void AudioTrack::flush()
{
LOGV("flush");
-
+
// clear playback marker and periodic update counter
mMarkerPosition = 0;
mMarkerReached = false;
mUpdatePeriod = 0;
-
+
if (!mActive) {
mAudioTrack->flush();
@@ -403,6 +433,7 @@ void AudioTrack::pause()
if (android_atomic_and(~1, &mActive) == 1) {
mActive = 0;
mAudioTrack->pause();
+ AudioSystem::stopOutput(getOutput(), (AudioSystem::stream_type)mStreamType);
}
}
@@ -455,7 +486,6 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount
{
audio_track_cblk_t* cblk = mCblk;
-
Mutex::Autolock _l(cblk->lock);
if (loopCount == 0) {
@@ -476,7 +506,7 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount
LOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, framecount %d",
loopStart, loopEnd, mFrameCount);
return BAD_VALUE;
- }
+ }
cblk->loopStart = loopStart;
cblk->loopEnd = loopEnd;
@@ -555,7 +585,7 @@ status_t AudioTrack::setPosition(uint32_t position)
mCblk->server = position;
mCblk->forceReady = 1;
-
+
return NO_ERROR;
}
@@ -571,7 +601,7 @@ status_t AudioTrack::getPosition(uint32_t *position)
status_t AudioTrack::reload()
{
if (!stopped()) return INVALID_OPERATION;
-
+
flush();
mCblk->stepUser(mFrameCount);
@@ -579,6 +609,12 @@ status_t AudioTrack::reload()
return NO_ERROR;
}
+audio_io_handle_t AudioTrack::getOutput()
+{
+ return AudioSystem::getOutput((AudioSystem::stream_type)mStreamType,
+ mCblk->sampleRate, mFormat, mChannels, (AudioSystem::output_flags)mFlags);
+}
+
// -------------------------------------------------------------------------
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
@@ -608,7 +644,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
return WOULD_BLOCK;
timeout = 0;
result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
- if (__builtin_expect(result!=NO_ERROR, false)) {
+ if (__builtin_expect(result!=NO_ERROR, false)) {
cblk->waitTimeMs += waitTimeMs;
if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
// timing out when a loop has been set and we have already written upto loop end
@@ -616,7 +652,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
if (cblk->user < cblk->loopEnd) {
LOGW( "obtainBuffer timed out (is the CPU pegged?) %p "
"user=%08x, server=%08x", this, cblk->user, cblk->server);
- //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
+ //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
cblk->lock.unlock();
mAudioTrack->start();
cblk->lock.lock();
@@ -624,7 +660,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
}
cblk->waitTimeMs = 0;
}
-
+
if (--waitCount == 0) {
return TIMED_OUT;
}
@@ -636,7 +672,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
}
cblk->waitTimeMs = 0;
-
+
if (framesReq > framesAvail) {
framesReq = framesAvail;
}
@@ -653,12 +689,16 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
"but didn't need to be locked. We recovered, but "
"this shouldn't happen (user=%08x, server=%08x)", cblk->user, cblk->server);
- audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
- audioBuffer->channelCount= mChannelCount;
- audioBuffer->format = AudioSystem::PCM_16_BIT;
- audioBuffer->frameCount = framesReq;
- audioBuffer->size = framesReq*mChannelCount*sizeof(int16_t);
- audioBuffer->raw = (int8_t *)cblk->buffer(u);
+ audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
+ audioBuffer->channelCount = mChannelCount;
+ audioBuffer->frameCount = framesReq;
+ audioBuffer->size = framesReq * cblk->frameSize;
+ if (AudioSystem::isLinearPCM(mFormat)) {
+ audioBuffer->format = AudioSystem::PCM_16_BIT;
+ } else {
+ audioBuffer->format = mFormat;
+ }
+ audioBuffer->raw = (int8_t *)cblk->buffer(u);
active = mActive;
return active ? status_t(NO_ERROR) : status_t(STOPPED);
}
@@ -690,10 +730,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
Buffer audioBuffer;
do {
- audioBuffer.frameCount = userSize/mChannelCount;
- if (mFormat == AudioSystem::PCM_16_BIT) {
- audioBuffer.frameCount >>= 1;
- }
+ audioBuffer.frameCount = userSize/frameSize();
+
// Calling obtainBuffer() with a negative wait count causes
// an (almost) infinite wait time.
status_t err = obtainBuffer(&audioBuffer, -1);
@@ -705,6 +743,7 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
}
size_t toWrite;
+
if (mFormat == AudioSystem::PCM_8_BIT) {
// Divide capacity by 2 to take expansion into account
toWrite = audioBuffer.size>>1;
@@ -742,13 +781,13 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
if (mCblk->flowControlFlag == 0) {
mCbf(EVENT_UNDERRUN, mUserData, 0);
if (mCblk->server == mCblk->frameCount) {
- mCbf(EVENT_BUFFER_END, mUserData, 0);
+ mCbf(EVENT_BUFFER_END, mUserData, 0);
}
mCblk->flowControlFlag = 1;
if (mSharedBuffer != 0) return false;
}
}
-
+
// Manage loop end callback
while (mLoopCount > mCblk->loopCount) {
int loopCount = -1;
@@ -767,7 +806,7 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
}
// Manage new position callback
- if(mUpdatePeriod > 0) {
+ if (mUpdatePeriod > 0) {
while (mCblk->server >= mNewPosition) {
mCbf(EVENT_NEW_POS, mUserData, (void *)&mNewPosition);
mNewPosition += mUpdatePeriod;
@@ -784,10 +823,10 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
do {
audioBuffer.frameCount = frames;
-
- // Calling obtainBuffer() with a wait count of 1
- // limits wait time to WAIT_PERIOD_MS. This prevents from being
- // stuck here not being able to handle timed events (position, markers, loops).
+
+ // Calling obtainBuffer() with a wait count of 1
+ // limits wait time to WAIT_PERIOD_MS. This prevents from being
+ // stuck here not being able to handle timed events (position, markers, loops).
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
@@ -832,7 +871,11 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
}
audioBuffer.size = writtenSize;
- audioBuffer.frameCount = writtenSize/mChannelCount/sizeof(int16_t);
+ // NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for
+ // 8 bit PCM data: in this case, mCblk->frameSize is based on a sampel size of
+ // 16 bit.
+ audioBuffer.frameCount = writtenSize/mCblk->frameSize;
+
frames -= audioBuffer.frameCount;
releaseBuffer(&audioBuffer);
@@ -949,7 +992,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount)
// we switch to normal obtainBuffer() timeout period
if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
bufferTimeoutMs = MAX_RUN_TIMEOUT_MS - 1;
- }
+ }
// It is possible that we receive a flush()
// while the mixer is processing a block: in this case,
// stepServer() is called After the flush() has reset u & s and
@@ -981,7 +1024,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount)
void* audio_track_cblk_t::buffer(uint32_t offset) const
{
- return (int16_t *)this->buffers + (offset-userBase)*this->channels;
+ return (int8_t *)this->buffers + (offset - userBase) * this->frameSize;
}
uint32_t audio_track_cblk_t::framesAvailable()
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 6fc0cb7..9385367 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -16,6 +16,7 @@
*/
#define LOG_TAG "IAudioFlinger"
+//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <stdint.h>
@@ -44,17 +45,21 @@ enum {
STREAM_VOLUME,
STREAM_MUTE,
SET_MODE,
- GET_MODE,
- SET_ROUTING,
- GET_ROUTING,
SET_MIC_MUTE,
GET_MIC_MUTE,
IS_MUSIC_ACTIVE,
- SET_PARAMETER,
+ SET_PARAMETERS,
+ GET_PARAMETERS,
REGISTER_CLIENT,
GET_INPUTBUFFERSIZE,
- WAKE_UP,
- IS_A2DP_ENABLED
+ OPEN_OUTPUT,
+ OPEN_DUPLICATE_OUTPUT,
+ CLOSE_OUTPUT,
+ SUSPEND_OUTPUT,
+ RESTORE_OUTPUT,
+ OPEN_INPUT,
+ CLOSE_INPUT,
+ SET_STREAM_OUTPUT
};
class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -74,6 +79,7 @@ public:
int frameCount,
uint32_t flags,
const sp<IMemory>& sharedBuffer,
+ void *output,
status_t *status)
{
Parcel data, reply;
@@ -86,6 +92,7 @@ public:
data.writeInt32(frameCount);
data.writeInt32(flags);
data.writeStrongBinder(sharedBuffer->asBinder());
+ data.write(&output, sizeof(void *));
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
LOGE("createTrack error: %s", strerror(-lStatus));
@@ -99,7 +106,7 @@ public:
virtual sp<IAudioRecord> openRecord(
pid_t pid,
- int inputSource,
+ void *input,
uint32_t sampleRate,
int format,
int channelCount,
@@ -110,7 +117,7 @@ public:
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(pid);
- data.writeInt32(inputSource);
+ data.write(&input, sizeof(void *));
data.writeInt32(sampleRate);
data.writeInt32(format);
data.writeInt32(channelCount);
@@ -124,47 +131,47 @@ public:
return interface_cast<IAudioRecord>(reply.readStrongBinder());
}
- virtual uint32_t sampleRate(int output) const
+ virtual uint32_t sampleRate(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(SAMPLE_RATE, data, &reply);
return reply.readInt32();
}
- virtual int channelCount(int output) const
+ virtual int channelCount(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(CHANNEL_COUNT, data, &reply);
return reply.readInt32();
}
- virtual int format(int output) const
+ virtual int format(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(FORMAT, data, &reply);
return reply.readInt32();
}
- virtual size_t frameCount(int output) const
+ virtual size_t frameCount(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(FRAME_COUNT, data, &reply);
return reply.readInt32();
}
- virtual uint32_t latency(int output) const
+ virtual uint32_t latency(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(LATENCY, data, &reply);
return reply.readInt32();
}
@@ -203,12 +210,13 @@ public:
return reply.readInt32();
}
- virtual status_t setStreamVolume(int stream, float value)
+ virtual status_t setStreamVolume(int stream, float value, void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(stream);
data.writeFloat(value);
+ data.write(&output, sizeof(void *));
remote()->transact(SET_STREAM_VOLUME, data, &reply);
return reply.readInt32();
}
@@ -223,11 +231,12 @@ public:
return reply.readInt32();
}
- virtual float streamVolume(int stream) const
+ virtual float streamVolume(int stream, void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(stream);
+ data.write(&output, sizeof(void *));
remote()->transact(STREAM_VOLUME, data, &reply);
return reply.readFloat();
}
@@ -241,111 +250,205 @@ public:
return reply.readInt32();
}
- virtual status_t setRouting(int mode, uint32_t routes, uint32_t mask)
+ virtual status_t setMode(int mode)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(mode);
- data.writeInt32(routes);
- data.writeInt32(mask);
- remote()->transact(SET_ROUTING, data, &reply);
+ remote()->transact(SET_MODE, data, &reply);
return reply.readInt32();
}
- virtual uint32_t getRouting(int mode) const
+ virtual status_t setMicMute(bool state)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(mode);
- remote()->transact(GET_ROUTING, data, &reply);
+ data.writeInt32(state);
+ remote()->transact(SET_MIC_MUTE, data, &reply);
return reply.readInt32();
}
- virtual status_t setMode(int mode)
+ virtual bool getMicMute() const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(mode);
- remote()->transact(SET_MODE, data, &reply);
+ remote()->transact(GET_MIC_MUTE, data, &reply);
return reply.readInt32();
}
- virtual int getMode() const
+ virtual bool isMusicActive() const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_MODE, data, &reply);
+ remote()->transact(IS_MUSIC_ACTIVE, data, &reply);
return reply.readInt32();
}
- virtual status_t setMicMute(bool state)
+ virtual status_t setParameters(void *ioHandle, const String8& keyValuePairs)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(state);
- remote()->transact(SET_MIC_MUTE, data, &reply);
+ data.write(&ioHandle, sizeof(void *));
+ data.writeString8(keyValuePairs);
+ remote()->transact(SET_PARAMETERS, data, &reply);
return reply.readInt32();
}
- virtual bool getMicMute() const
+ virtual String8 getParameters(void *ioHandle, const String8& keys)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_MIC_MUTE, data, &reply);
+ data.write(&ioHandle, sizeof(void *));
+ data.writeString8(keys);
+ remote()->transact(GET_PARAMETERS, data, &reply);
+ return reply.readString8();
+ }
+
+ virtual void registerClient(const sp<IAudioFlingerClient>& client)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeStrongBinder(client->asBinder());
+ remote()->transact(REGISTER_CLIENT, data, &reply);
+ }
+
+ virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(sampleRate);
+ data.writeInt32(format);
+ data.writeInt32(channelCount);
+ remote()->transact(GET_INPUTBUFFERSIZE, data, &reply);
return reply.readInt32();
}
- virtual bool isMusicActive() const
+ virtual void *openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ uint32_t flags)
{
Parcel data, reply;
+ uint32_t devices = pDevices ? *pDevices : 0;
+ uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
+ uint32_t format = pFormat ? *pFormat : 0;
+ uint32_t channels = pChannels ? *pChannels : 0;
+ uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
+
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(IS_MUSIC_ACTIVE, data, &reply);
+ data.writeInt32(devices);
+ data.writeInt32(samplingRate);
+ data.writeInt32(format);
+ data.writeInt32(channels);
+ data.writeInt32(latency);
+ data.writeInt32(flags);
+ remote()->transact(OPEN_OUTPUT, data, &reply);
+ void *output;
+ reply.read(&output, sizeof(void *));
+ LOGV("openOutput() returned output, %p", output);
+ devices = reply.readInt32();
+ if (pDevices) *pDevices = devices;
+ samplingRate = reply.readInt32();
+ if (pSamplingRate) *pSamplingRate = samplingRate;
+ format = reply.readInt32();
+ if (pFormat) *pFormat = format;
+ channels = reply.readInt32();
+ if (pChannels) *pChannels = channels;
+ latency = reply.readInt32();
+ if (pLatencyMs) *pLatencyMs = latency;
+ return output;
+ }
+
+ virtual void *openDuplicateOutput(void *output1, void *output2)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.write(&output1, sizeof(void *));
+ data.write(&output2, sizeof(void *));
+ remote()->transact(OPEN_DUPLICATE_OUTPUT, data, &reply);
+ void *output;
+ reply.read(&output, sizeof(void *));
+ return output;
+ }
+
+ virtual status_t closeOutput(void *output)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.write(&output, sizeof(void *));
+ remote()->transact(CLOSE_OUTPUT, data, &reply);
return reply.readInt32();
}
- virtual status_t setParameter(const char* key, const char* value)
+ virtual status_t suspendOutput(void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeCString(key);
- data.writeCString(value);
- remote()->transact(SET_PARAMETER, data, &reply);
+ data.write(&output, sizeof(void *));
+ remote()->transact(SUSPEND_OUTPUT, data, &reply);
return reply.readInt32();
}
-
- virtual void registerClient(const sp<IAudioFlingerClient>& client)
+
+ virtual status_t restoreOutput(void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeStrongBinder(client->asBinder());
- remote()->transact(REGISTER_CLIENT, data, &reply);
+ data.write(&output, sizeof(void *));
+ remote()->transact(RESTORE_OUTPUT, data, &reply);
+ return reply.readInt32();
}
-
- virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
+
+ virtual void *openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics)
{
Parcel data, reply;
+ uint32_t devices = pDevices ? *pDevices : 0;
+ uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
+ uint32_t format = pFormat ? *pFormat : 0;
+ uint32_t channels = pChannels ? *pChannels : 0;
+
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(sampleRate);
+ data.writeInt32(devices);
+ data.writeInt32(samplingRate);
data.writeInt32(format);
- data.writeInt32(channelCount);
- remote()->transact(GET_INPUTBUFFERSIZE, data, &reply);
- return reply.readInt32();
+ data.writeInt32(channels);
+ data.writeInt32(acoustics);
+ remote()->transact(OPEN_INPUT, data, &reply);
+ void *input;
+ reply.read(&input, sizeof(void *));
+ devices = reply.readInt32();
+ if (pDevices) *pDevices = devices;
+ samplingRate = reply.readInt32();
+ if (pSamplingRate) *pSamplingRate = samplingRate;
+ format = reply.readInt32();
+ if (pFormat) *pFormat = format;
+ channels = reply.readInt32();
+ if (pChannels) *pChannels = channels;
+ return input;
}
-
- virtual void wakeUp()
+
+ virtual status_t closeInput(void *input)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(WAKE_UP, data, &reply, IBinder::FLAG_ONEWAY);
- return;
+ data.write(&input, sizeof(void *));
+ remote()->transact(CLOSE_INPUT, data, &reply);
+ return reply.readInt32();
}
- virtual bool isA2dpEnabled() const
+ virtual status_t setStreamOutput(uint32_t stream, void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(IS_A2DP_ENABLED, data, &reply);
- return (bool)reply.readInt32();
+ data.writeInt32(stream);
+ data.write(&output, sizeof(void *));
+ remote()->transact(SET_STREAM_OUTPUT, data, &reply);
+ return reply.readInt32();
}
};
@@ -367,10 +470,12 @@ status_t BnAudioFlinger::onTransact(
size_t bufferCount = data.readInt32();
uint32_t flags = data.readInt32();
sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
+ void *output;
+ data.read(&output, sizeof(void *));
status_t status;
sp<IAudioTrack> track = createTrack(pid,
streamType, sampleRate, format,
- channelCount, bufferCount, flags, buffer, &status);
+ channelCount, bufferCount, flags, buffer, output, &status);
reply->writeInt32(status);
reply->writeStrongBinder(track->asBinder());
return NO_ERROR;
@@ -378,14 +483,15 @@ status_t BnAudioFlinger::onTransact(
case OPEN_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
pid_t pid = data.readInt32();
- int inputSource = data.readInt32();
+ void *input;
+ data.read(&input, sizeof(void *));
uint32_t sampleRate = data.readInt32();
int format = data.readInt32();
int channelCount = data.readInt32();
size_t bufferCount = data.readInt32();
uint32_t flags = data.readInt32();
status_t status;
- sp<IAudioRecord> record = openRecord(pid, inputSource,
+ sp<IAudioRecord> record = openRecord(pid, input,
sampleRate, format, channelCount, bufferCount, flags, &status);
reply->writeInt32(status);
reply->writeStrongBinder(record->asBinder());
@@ -393,31 +499,36 @@ status_t BnAudioFlinger::onTransact(
} break;
case SAMPLE_RATE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( sampleRate(output) );
return NO_ERROR;
} break;
case CHANNEL_COUNT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( channelCount(output) );
return NO_ERROR;
} break;
case FORMAT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( format(output) );
return NO_ERROR;
} break;
case FRAME_COUNT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( frameCount(output) );
return NO_ERROR;
} break;
case LATENCY: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( latency(output) );
return NO_ERROR;
} break;
@@ -444,7 +555,10 @@ status_t BnAudioFlinger::onTransact(
case SET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int stream = data.readInt32();
- reply->writeInt32( setStreamVolume(stream, data.readFloat()) );
+ float volume = data.readFloat();
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32( setStreamVolume(stream, volume, output) );
return NO_ERROR;
} break;
case SET_STREAM_MUTE: {
@@ -456,7 +570,9 @@ status_t BnAudioFlinger::onTransact(
case STREAM_VOLUME: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int stream = data.readInt32();
- reply->writeFloat( streamVolume(stream) );
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeFloat( streamVolume(stream, output) );
return NO_ERROR;
} break;
case STREAM_MUTE: {
@@ -465,31 +581,12 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( streamMute(stream) );
return NO_ERROR;
} break;
- case SET_ROUTING: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int mode = data.readInt32();
- uint32_t routes = data.readInt32();
- uint32_t mask = data.readInt32();
- reply->writeInt32( setRouting(mode, routes, mask) );
- return NO_ERROR;
- } break;
- case GET_ROUTING: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int mode = data.readInt32();
- reply->writeInt32( getRouting(mode) );
- return NO_ERROR;
- } break;
case SET_MODE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int mode = data.readInt32();
reply->writeInt32( setMode(mode) );
return NO_ERROR;
} break;
- case GET_MODE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( getMode() );
- return NO_ERROR;
- } break;
case SET_MIC_MUTE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int state = data.readInt32();
@@ -506,13 +603,23 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( isMusicActive() );
return NO_ERROR;
} break;
- case SET_PARAMETER: {
+ case SET_PARAMETERS: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- const char *key = data.readCString();
- const char *value = data.readCString();
- reply->writeInt32( setParameter(key, value) );
+ void *ioHandle;
+ data.read(&ioHandle, sizeof(void *));
+ String8 keyValuePairs(data.readString8());
+ reply->writeInt32(setParameters(ioHandle, keyValuePairs));
return NO_ERROR;
- } break;
+ } break;
+ case GET_PARAMETERS: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *ioHandle;
+ data.read(&ioHandle, sizeof(void *));
+ String8 keys(data.readString8());
+ reply->writeString8(getParameters(ioHandle, keys));
+ return NO_ERROR;
+ } break;
+
case REGISTER_CLIENT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(data.readStrongBinder());
@@ -527,14 +634,93 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( getInputBufferSize(sampleRate, format, channelCount) );
return NO_ERROR;
} break;
- case WAKE_UP: {
+ case OPEN_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ uint32_t devices = data.readInt32();
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ uint32_t latency = data.readInt32();
+ uint32_t flags = data.readInt32();
+ void *output = openOutput(&devices,
+ &samplingRate,
+ &format,
+ &channels,
+ &latency,
+ flags);
+ LOGV("OPEN_OUTPUT output, %p", output);
+ reply->write(&output, sizeof(void *));
+ reply->writeInt32(devices);
+ reply->writeInt32(samplingRate);
+ reply->writeInt32(format);
+ reply->writeInt32(channels);
+ reply->writeInt32(latency);
+ return NO_ERROR;
+ } break;
+ case OPEN_DUPLICATE_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output1;
+ void *output2;
+ data.read(&output1, sizeof(void *));
+ data.read(&output2, sizeof(void *));
+ void *output = openDuplicateOutput(output1, output2);
+ reply->write(&output, sizeof(void *));
+ return NO_ERROR;
+ } break;
+ case CLOSE_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(closeOutput(output));
+ return NO_ERROR;
+ } break;
+ case SUSPEND_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(suspendOutput(output));
+ return NO_ERROR;
+ } break;
+ case RESTORE_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(restoreOutput(output));
+ return NO_ERROR;
+ } break;
+ case OPEN_INPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ uint32_t devices = data.readInt32();
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ uint32_t acoutics = data.readInt32();
+
+ void *input = openInput(&devices,
+ &samplingRate,
+ &format,
+ &channels,
+ acoutics);
+ reply->write(&input, sizeof(void *));
+ reply->writeInt32(devices);
+ reply->writeInt32(samplingRate);
+ reply->writeInt32(format);
+ reply->writeInt32(channels);
+ return NO_ERROR;
+ } break;
+ case CLOSE_INPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- wakeUp();
+ void *input;
+ data.read(&input, sizeof(void *));
+ reply->writeInt32(closeInput(input));
return NO_ERROR;
} break;
- case IS_A2DP_ENABLED: {
+ case SET_STREAM_OUTPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( (int)isA2dpEnabled() );
+ void *output;
+ uint32_t stream = data.readInt32();
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(setStreamOutput(stream, output));
return NO_ERROR;
} break;
default:
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 75699b4..eaae977 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -23,11 +23,12 @@
#include <binder/Parcel.h>
#include <media/IAudioFlingerClient.h>
+#include <media/AudioSystem.h>
namespace android {
enum {
- AUDIO_OUTPUT_CHANGED = IBinder::FIRST_CALL_TRANSACTION
+ IO_CONFIG_CHANGED = IBinder::FIRST_CALL_TRANSACTION
};
class BpAudioFlingerClient : public BpInterface<IAudioFlingerClient>
@@ -38,12 +39,25 @@ public:
{
}
- void a2dpEnabledChanged(bool enabled)
+ void ioConfigChanged(int event, void *param1, void *param2)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlingerClient::getInterfaceDescriptor());
- data.writeInt32((int)enabled);
- remote()->transact(AUDIO_OUTPUT_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
+ data.writeInt32(event);
+ data.write(&param1, sizeof(void *));
+ if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
+ uint32_t stream = *(uint32_t *)param2;
+ LOGV("ioConfigChanged stream %d", stream);
+ data.writeInt32(stream);
+ } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
+ AudioSystem::OutputDescriptor *desc = (AudioSystem::OutputDescriptor *)param2;
+ data.writeInt32(desc->samplingRate);
+ data.writeInt32(desc->format);
+ data.writeInt32(desc->channels);
+ data.writeInt32(desc->frameCount);
+ data.writeInt32(desc->latency);
+ }
+ remote()->transact(IO_CONFIG_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
}
};
@@ -55,10 +69,27 @@ status_t BnAudioFlingerClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
- case AUDIO_OUTPUT_CHANGED: {
+ case IO_CONFIG_CHANGED: {
CHECK_INTERFACE(IAudioFlingerClient, data, reply);
- bool enabled = (bool)data.readInt32();
- a2dpEnabledChanged(enabled);
+ int event = data.readInt32();
+ void *param1;
+ void *param2 = 0;
+ AudioSystem::OutputDescriptor desc;
+ uint32_t stream;
+ data.read(&param1, sizeof(void *));
+ if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
+ stream = data.readInt32();
+ param2 = &stream;
+ LOGV("STREAM_CONFIG_CHANGED stream %d", stream);
+ } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
+ desc.samplingRate = data.readInt32();
+ desc.format = data.readInt32();
+ desc.channels = data.readInt32();
+ desc.frameCount = data.readInt32();
+ desc.latency = data.readInt32();
+ param2 = &desc;
+ }
+ ioConfigChanged(event, param1, param2);
return NO_ERROR;
} break;
default:
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
new file mode 100644
index 0000000..0d8a329
--- /dev/null
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -0,0 +1,423 @@
+/*
+**
+** Copyright 2009, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IAudioPolicyService"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+
+#include <media/IAudioPolicyService.h>
+
+namespace android {
+
+enum {
+ SET_DEVICE_CONNECTION_STATE = IBinder::FIRST_CALL_TRANSACTION,
+ GET_DEVICE_CONNECTION_STATE,
+ SET_PHONE_STATE,
+ SET_RINGER_MODE,
+ SET_FORCE_USE,
+ GET_FORCE_USE,
+ GET_OUTPUT,
+ START_OUTPUT,
+ STOP_OUTPUT,
+ RELEASE_OUTPUT,
+ GET_INPUT,
+ START_INPUT,
+ STOP_INPUT,
+ RELEASE_INPUT,
+ INIT_STREAM_VOLUME,
+ SET_STREAM_VOLUME,
+ GET_STREAM_VOLUME
+};
+
+class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
+{
+public:
+ BpAudioPolicyService(const sp<IBinder>& impl)
+ : BpInterface<IAudioPolicyService>(impl)
+ {
+ }
+
+ virtual status_t setDeviceConnectionState(
+ AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(device));
+ data.writeInt32(static_cast <uint32_t>(state));
+ data.writeCString(device_address);
+ remote()->transact(SET_DEVICE_CONNECTION_STATE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(
+ AudioSystem::audio_devices device,
+ const char *device_address)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(device));
+ data.writeCString(device_address);
+ remote()->transact(GET_DEVICE_CONNECTION_STATE, data, &reply);
+ return static_cast <AudioSystem::device_connection_state>(reply.readInt32());
+ }
+
+ virtual status_t setPhoneState(int state)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(state);
+ remote()->transact(SET_PHONE_STATE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t setRingerMode(uint32_t mode, uint32_t mask)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(mode);
+ data.writeInt32(mask);
+ remote()->transact(SET_RINGER_MODE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(usage));
+ data.writeInt32(static_cast <uint32_t>(config));
+ remote()->transact(SET_FORCE_USE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(usage));
+ remote()->transact(GET_FORCE_USE, data, &reply);
+ return static_cast <AudioSystem::forced_config> (reply.readInt32());
+ }
+
+ virtual audio_io_handle_t getOutput(
+ AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ data.writeInt32(samplingRate);
+ data.writeInt32(static_cast <uint32_t>(format));
+ data.writeInt32(channels);
+ data.writeInt32(static_cast <uint32_t>(flags));
+ remote()->transact(GET_OUTPUT, data, &reply);
+ audio_io_handle_t output;
+ reply.read(&output, sizeof(audio_io_handle_t));
+ return output;
+ }
+
+ virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&output, sizeof(audio_io_handle_t));
+ data.writeInt32(stream);
+ remote()->transact(START_OUTPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&output, sizeof(audio_io_handle_t));
+ data.writeInt32(stream);
+ remote()->transact(STOP_OUTPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual void releaseOutput(audio_io_handle_t output)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&output, sizeof(audio_io_handle_t));
+ remote()->transact(RELEASE_OUTPUT, data, &reply);
+ }
+
+ virtual audio_io_handle_t getInput(
+ int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(inputSource);
+ data.writeInt32(samplingRate);
+ data.writeInt32(static_cast <uint32_t>(format));
+ data.writeInt32(channels);
+ data.writeInt32(static_cast <uint32_t>(acoustics));
+ remote()->transact(GET_INPUT, data, &reply);
+ audio_io_handle_t input;
+ reply.read(&input, sizeof(audio_io_handle_t));
+ return input;
+ }
+
+ virtual status_t startInput(audio_io_handle_t input)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&input, sizeof(audio_io_handle_t));
+ remote()->transact(START_INPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t stopInput(audio_io_handle_t input)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&input, sizeof(audio_io_handle_t));
+ remote()->transact(STOP_INPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual void releaseInput(audio_io_handle_t input)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&input, sizeof(audio_io_handle_t));
+ remote()->transact(RELEASE_INPUT, data, &reply);
+ }
+
+ virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ data.writeInt32(indexMin);
+ data.writeInt32(indexMax);
+ remote()->transact(INIT_STREAM_VOLUME, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ data.writeInt32(index);
+ remote()->transact(SET_STREAM_VOLUME, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ remote()->transact(GET_STREAM_VOLUME, data, &reply);
+ int lIndex = reply.readInt32();
+ if (index) *index = lIndex;
+ return static_cast <status_t> (reply.readInt32());
+ }
+};
+
+IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
+
+// ----------------------------------------------------------------------
+
+
+status_t BnAudioPolicyService::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case SET_DEVICE_CONNECTION_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32());
+ AudioSystem::device_connection_state state = static_cast <AudioSystem::device_connection_state>(data.readInt32());
+ const char *device_address = data.readCString();
+ reply->writeInt32(static_cast <uint32_t>(setDeviceConnectionState(device, state, device_address)));
+ return NO_ERROR;
+ } break;
+
+ case GET_DEVICE_CONNECTION_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32());
+ const char *device_address = data.readCString();
+ reply->writeInt32(static_cast <uint32_t>(getDeviceConnectionState(device, device_address)));
+ return NO_ERROR;
+ } break;
+
+ case SET_PHONE_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ reply->writeInt32(static_cast <uint32_t>(setPhoneState(data.readInt32())));
+ return NO_ERROR;
+ } break;
+
+ case SET_RINGER_MODE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ uint32_t mode = data.readInt32();
+ uint32_t mask = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(setRingerMode(mode, mask)));
+ return NO_ERROR;
+ } break;
+
+ case SET_FORCE_USE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
+ AudioSystem::forced_config config = static_cast <AudioSystem::forced_config>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config)));
+ return NO_ERROR;
+ } break;
+
+ case GET_FORCE_USE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(getForceUse(usage)));
+ return NO_ERROR;
+ } break;
+
+ case GET_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ AudioSystem::output_flags flags = static_cast <AudioSystem::output_flags>(data.readInt32());
+
+ audio_io_handle_t output = getOutput(stream,
+ samplingRate,
+ format,
+ channels,
+ flags);
+ reply->write(&output, sizeof(audio_io_handle_t));
+ return NO_ERROR;
+ } break;
+
+ case START_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t output;
+ data.read(&output, sizeof(audio_io_handle_t));
+ uint32_t stream = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(startOutput(output, (AudioSystem::stream_type)stream)));
+ return NO_ERROR;
+ } break;
+
+ case STOP_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t output;
+ data.read(&output, sizeof(audio_io_handle_t));
+ uint32_t stream = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(stopOutput(output, (AudioSystem::stream_type)stream)));
+ return NO_ERROR;
+ } break;
+
+ case RELEASE_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t output;
+ data.read(&output, sizeof(audio_io_handle_t));
+ releaseOutput(output);
+ return NO_ERROR;
+ } break;
+
+ case GET_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ int inputSource = data.readInt32();
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ AudioSystem::audio_in_acoustics acoustics = static_cast <AudioSystem::audio_in_acoustics>(data.readInt32());
+ audio_io_handle_t input = getInput(inputSource,
+ samplingRate,
+ format,
+ channels,
+ acoustics);
+ reply->write(&input, sizeof(audio_io_handle_t));
+ return NO_ERROR;
+ } break;
+
+ case START_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t input;
+ data.read(&input, sizeof(audio_io_handle_t));
+ reply->writeInt32(static_cast <uint32_t>(startInput(input)));
+ return NO_ERROR;
+ } break;
+
+ case STOP_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t input;
+ data.read(&input, sizeof(audio_io_handle_t));
+ reply->writeInt32(static_cast <uint32_t>(stopInput(input)));
+ return NO_ERROR;
+ } break;
+
+ case RELEASE_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t input;
+ data.read(&input, sizeof(audio_io_handle_t));
+ releaseInput(input);
+ return NO_ERROR;
+ } break;
+
+ case INIT_STREAM_VOLUME: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ int indexMin = data.readInt32();
+ int indexMax = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(initStreamVolume(stream, indexMin,indexMax)));
+ return NO_ERROR;
+ } break;
+
+ case SET_STREAM_VOLUME: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ int index = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index)));
+ return NO_ERROR;
+ } break;
+
+ case GET_STREAM_VOLUME: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ int index;
+ status_t status = getStreamVolumeIndex(stream, &index);
+ reply->writeInt32(index);
+ reply->writeInt32(static_cast <uint32_t>(status));
+ return NO_ERROR;
+ } break;
+
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 586aacb..ee9e1d8 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -99,7 +99,7 @@ int JetPlayer::init()
mAudioTrack->set(AudioSystem::MUSIC, //TODO parametrize this
pLibConfig->sampleRate,
1, // format = PCM 16bits per sample,
- pLibConfig->numChannels,
+ (pLibConfig->numChannels == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
mTrackBufferSize,
0);
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 5435da7..3ea64ae 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1001,7 +1001,7 @@ bool ToneGenerator::initAudioTrack() {
// Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
mpAudioTrack
- = new AudioTrack(mStreamType, 0, AudioSystem::PCM_16_BIT, 1, 0, 0, audioCallback, this, 0);
+ = new AudioTrack(mStreamType, 0, AudioSystem::PCM_16_BIT, AudioSystem::CHANNEL_OUT_MONO, 0, 0, audioCallback, this, 0);
if (mpAudioTrack == 0) {
LOGE("AudioTrack allocation failed");
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 77f7434..1d960c5 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1310,11 +1310,21 @@ status_t MediaPlayerService::AudioOutput::open(
AudioTrack *t;
if (mCallback != NULL) {
t = new AudioTrack(
- mStreamType, sampleRate, format, channelCount, frameCount,
- 0 /* flags */, CallbackWrapper, this);
+ mStreamType,
+ sampleRate,
+ format,
+ (channelCount == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ frameCount,
+ 0 /* flags */,
+ CallbackWrapper,
+ this);
} else {
t = new AudioTrack(
- mStreamType, sampleRate, format, channelCount, frameCount);
+ mStreamType,
+ sampleRate,
+ format,
+ (channelCount == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ frameCount);
}
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index fbea0d4..7094cfa 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -28,6 +28,7 @@
#include <AudioFlinger.h>
#include <CameraService.h>
#include <MediaPlayerService.h>
+#include <AudioPolicyService.h>
#include <private/android_filesystem_config.h>
using namespace android;
@@ -40,6 +41,7 @@ int main(int argc, char** argv)
AudioFlinger::instantiate();
MediaPlayerService::instantiate();
CameraService::instantiate();
+ AudioPolicyService::instantiate();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java b/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java
index aefedc3..cea3a5a 100644
--- a/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java
+++ b/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java
@@ -140,7 +140,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorMono16MusicStream: " + res.mResultLog, res.mResult);
@@ -153,7 +153,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorStereo16MusicStream: " + res.mResultLog, res.mResult);
@@ -166,7 +166,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorMono16MusicStatic: " + res.mResultLog, res.mResult);
@@ -179,7 +179,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorStereo16MusicStatic: " + res.mResultLog, res.mResult);
@@ -196,7 +196,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorMono8MusicStream: " + res.mResultLog, res.mResult);
@@ -208,7 +208,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorStereo8MusicStream: " + res.mResultLog, res.mResult);
@@ -220,7 +220,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorMono8MusicStatic: " + res.mResultLog, res.mResult);
@@ -232,7 +232,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorStereo8MusicStatic: " + res.mResultLog, res.mResult);
@@ -248,15 +248,15 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
public void testConstructorStreamType() throws Exception {
// constants for test
final int TYPE_TEST_SR = 22050;
- final int TYPE_TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TYPE_TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TYPE_TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TYPE_TEST_MODE = AudioTrack.MODE_STREAM;
final int[] STREAM_TYPES = { AudioManager.STREAM_ALARM, AudioManager.STREAM_BLUETOOTH_SCO,
AudioManager.STREAM_MUSIC, AudioManager.STREAM_NOTIFICATION,
AudioManager.STREAM_RING, AudioManager.STREAM_SYSTEM,
- AudioManager.STREAM_VOICE_CALL };
+ AudioManager.STREAM_VOICE_CALL, AudioManager.STREAM_DTMF, };
final String[] STREAM_NAMES = { "STREAM_ALARM", "STREAM_BLUETOOTH_SCO", "STREAM_MUSIC",
- "STREAM_NOTIFICATION", "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL" };
+ "STREAM_NOTIFICATION", "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL", "STREAM_DTMF" };
boolean localTestRes = true;
AudioTrack track = null;
@@ -303,7 +303,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterInit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -324,7 +324,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionIncrease";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -352,7 +352,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterFlush";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -382,7 +382,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterStop";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -413,7 +413,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterPause";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -448,7 +448,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetStereoVolumeMax";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -474,7 +474,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetStereoVolumeMin";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -500,7 +500,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetStereoVolumeMid";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -526,7 +526,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRate";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -552,7 +552,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRateZero";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -574,7 +574,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRateTwiceOutputSR";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -601,7 +601,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetGetPlaybackRate";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -628,7 +628,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRateUninit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -655,7 +655,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionPlaying";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -682,7 +682,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionStopped";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -710,7 +710,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionPaused";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -738,7 +738,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionTooFar";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -770,7 +770,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsStream";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -794,7 +794,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsStartAfterEnd";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -818,7 +818,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsSuccess";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -842,7 +842,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsLoopTooLong";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -868,7 +868,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsStartTooFar";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -896,7 +896,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsEndTooFar";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -929,7 +929,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteOffsetTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -953,7 +953,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortOffsetTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -977,7 +977,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteSizeTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1001,7 +1001,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortSizeTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1025,7 +1025,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteNegativeOffset";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1049,7 +1049,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortNegativeOffset";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1073,7 +1073,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteNegativeSize";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1097,7 +1097,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortNegativeSize";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1121,7 +1121,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByte";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1145,7 +1145,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShort";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1169,7 +1169,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByte8bit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1193,7 +1193,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShort8bit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1221,7 +1221,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constant for test
final String TEST_NAME = "testGetMinBufferSizeTooLowSR";
final int TEST_SR = 3999;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1238,7 +1238,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constant for testg
final String TEST_NAME = "testGetMinBufferSizeTooHighSR";
final int TEST_SR = 48001;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
diff --git a/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java b/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java
index 2b888e4..f00fd39 100644
--- a/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java
+++ b/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java
@@ -64,7 +64,7 @@ public class DatabaseHelper extends SQLiteOpenHelper {
private static final String TAG = "SettingsProvider";
private static final String DATABASE_NAME = "settings.db";
- private static final int DATABASE_VERSION = 36;
+ private static final int DATABASE_VERSION = 37;
private Context mContext;
@@ -414,7 +414,27 @@ public class DatabaseHelper extends SQLiteOpenHelper {
}
upgradeVersion = 36;
}
-
+ if (upgradeVersion == 36) {
+ // This upgrade adds the STREAM_SYSTEM_ENFORCED type to the list of
+ // types affected by ringer modes (silent, vibrate, etc.)
+ db.beginTransaction();
+ try {
+ db.execSQL("DELETE FROM system WHERE name='"
+ + Settings.System.MODE_RINGER_STREAMS_AFFECTED + "'");
+ int newValue = (1 << AudioManager.STREAM_RING)
+ | (1 << AudioManager.STREAM_NOTIFICATION)
+ | (1 << AudioManager.STREAM_SYSTEM)
+ | (1 << AudioManager.STREAM_SYSTEM_ENFORCED);
+ db.execSQL("INSERT INTO system ('name', 'value') values ('"
+ + Settings.System.MODE_RINGER_STREAMS_AFFECTED + "', '"
+ + String.valueOf(newValue) + "')");
+ db.setTransactionSuccessful();
+ } finally {
+ db.endTransaction();
+ }
+ upgradeVersion = 36;
+ }
+
if (upgradeVersion != currentVersion) {
Log.w(TAG, "Got stuck trying to upgrade from version " + upgradeVersion
+ ", must wipe the settings provider");
@@ -575,7 +595,7 @@ public class DatabaseHelper extends SQLiteOpenHelper {
// By default, only the ring/notification and system streams are affected
loadSetting(stmt, Settings.System.MODE_RINGER_STREAMS_AFFECTED,
(1 << AudioManager.STREAM_RING) | (1 << AudioManager.STREAM_NOTIFICATION) |
- (1 << AudioManager.STREAM_SYSTEM));
+ (1 << AudioManager.STREAM_SYSTEM) | (1 << AudioManager.STREAM_SYSTEM_ENFORCED));
loadSetting(stmt, Settings.System.MUTE_STREAMS_AFFECTED,
((1 << AudioManager.STREAM_MUSIC) |
diff --git a/packages/TtsService/jni/android_tts_SynthProxy.cpp b/packages/TtsService/jni/android_tts_SynthProxy.cpp
index 99d7723..a4090cf 100644
--- a/packages/TtsService/jni/android_tts_SynthProxy.cpp
+++ b/packages/TtsService/jni/android_tts_SynthProxy.cpp
@@ -71,7 +71,7 @@ class SynthProxyJniStorage {
AudioTrack* mAudioOut;
AudioSystem::stream_type mStreamType;
uint32_t mSampleRate;
- AudioSystem::audio_format mAudFormat;
+ uint32_t mAudFormat;
int mNbChannels;
int8_t * mBuffer;
size_t mBufferSize;
@@ -118,7 +118,6 @@ class SynthProxyJniStorage {
mSampleRate = rate;
mAudFormat = format;
mNbChannels = channel;
-
mStreamType = streamType;
// retrieve system properties to ensure successful creation of the
@@ -139,7 +138,8 @@ class SynthProxyJniStorage {
if (minBufCount < 2) minBufCount = 2;
int minFrameCount = (afFrameCount * rate * minBufCount)/afSampleRate;
- mAudioOut = new AudioTrack(mStreamType, rate, format, channel,
+ mAudioOut = new AudioTrack(mStreamType, rate, format,
+ (channel == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
minFrameCount > 4096 ? minFrameCount : 4096,
0, 0, 0, 0); // not using an AudioTrack callback
@@ -182,7 +182,7 @@ void prepAudioTrack(SynthProxyJniStorage* pJniData, AudioSystem::stream_type str
* Directly speaks using AudioTrack or write to file
*/
static tts_callback_status ttsSynthDoneCB(void *& userdata, uint32_t rate,
- AudioSystem::audio_format format, int channel,
+ uint32_t format, int channel,
int8_t *&wav, size_t &bufferSize, tts_synth_status status) {
//LOGV("ttsSynthDoneCallback: %d bytes", bufferSize);
@@ -202,7 +202,7 @@ static tts_callback_status ttsSynthDoneCB(void *& userdata, uint32_t rate,
}
if (bufferSize > 0) {
- prepAudioTrack(pJniData, pForAfter->streamType, rate, format, channel);
+ prepAudioTrack(pJniData, pForAfter->streamType, rate, (AudioSystem::audio_format)format, channel);
if (pJniData->mAudioOut) {
pJniData->mAudioOut->write(wav, bufferSize);
memset(wav, 0, bufferSize);
diff --git a/services/java/com/android/server/HeadsetObserver.java b/services/java/com/android/server/HeadsetObserver.java
index 9b0a2d4..bee3108 100644
--- a/services/java/com/android/server/HeadsetObserver.java
+++ b/services/java/com/android/server/HeadsetObserver.java
@@ -41,10 +41,16 @@ class HeadsetObserver extends UEventObserver {
private static final String HEADSET_STATE_PATH = "/sys/class/switch/h2w/state";
private static final String HEADSET_NAME_PATH = "/sys/class/switch/h2w/name";
+ private static final int BIT_HEADSET = (1 << 0);
+ private static final int BIT_HEADSET_NO_MIC = (1 << 1);
+ private static final int BIT_TTY = (1 << 2);
+ private static final int BIT_FM_HEADSET = (1 << 3);
+ private static final int BIT_FM_SPEAKER = (1 << 4);
+
private int mHeadsetState;
+ private int mPrevHeadsetState;
private String mHeadsetName;
- private boolean mAudioRouteNeedsUpdate;
- private AudioManager mAudioManager;
+ private boolean mPendingIntent;
private final Context mContext;
private final WakeLock mWakeLock; // held while there is a pending route change
@@ -76,6 +82,7 @@ class HeadsetObserver extends UEventObserver {
String newName = mHeadsetName;
int newState = mHeadsetState;
+ mPrevHeadsetState = mHeadsetState;
try {
FileReader file = new FileReader(HEADSET_STATE_PATH);
int len = file.read(buffer, 0, 1024);
@@ -91,20 +98,25 @@ class HeadsetObserver extends UEventObserver {
Log.e(TAG, "" , e);
}
- mAudioManager = (AudioManager) mContext.getSystemService(Context.AUDIO_SERVICE);
update(newName, newState);
}
private synchronized final void update(String newName, int newState) {
if (newName != mHeadsetName || newState != mHeadsetState) {
- boolean isUnplug = (newState == 0 && mHeadsetState == 1);
+ boolean isUnplug = false;
+ if ( (mHeadsetState & BIT_HEADSET) > 0 || (mHeadsetState & BIT_HEADSET_NO_MIC) > 0) {
+ if ((newState & BIT_HEADSET) == 0 && (newState & BIT_HEADSET_NO_MIC) == 0)
+ isUnplug = true;
+ }
mHeadsetName = newName;
+ mPrevHeadsetState = mHeadsetState;
mHeadsetState = newState;
- mAudioRouteNeedsUpdate = true;
-
- sendIntent(isUnplug);
+ mPendingIntent = true;
if (isUnplug) {
+ Intent intent = new Intent(AudioManager.ACTION_AUDIO_BECOMING_NOISY);
+ mContext.sendBroadcast(intent);
+
// It can take hundreds of ms flush the audio pipeline after
// apps pause audio playback, but audio route changes are
// immediate, so delay the route change by 1000ms.
@@ -113,12 +125,13 @@ class HeadsetObserver extends UEventObserver {
mWakeLock.acquire();
mHandler.sendEmptyMessageDelayed(0, 1000);
} else {
- updateAudioRoute();
+ sendIntent();
+ mPendingIntent = false;
}
}
}
- private synchronized final void sendIntent(boolean isUnplug) {
+ private synchronized final void sendIntent() {
// Pack up the values and broadcast them to everyone
Intent intent = new Intent(Intent.ACTION_HEADSET_PLUG);
intent.addFlags(Intent.FLAG_RECEIVER_REGISTERED_ONLY);
@@ -128,24 +141,15 @@ class HeadsetObserver extends UEventObserver {
// TODO: Should we require a permission?
ActivityManagerNative.broadcastStickyIntent(intent, null);
-
- if (isUnplug) {
- intent = new Intent(AudioManager.ACTION_AUDIO_BECOMING_NOISY);
- mContext.sendBroadcast(intent);
- }
- }
-
- private synchronized final void updateAudioRoute() {
- if (mAudioRouteNeedsUpdate) {
- mAudioManager.setWiredHeadsetOn(mHeadsetState == 1);
- mAudioRouteNeedsUpdate = false;
- }
}
private final Handler mHandler = new Handler() {
@Override
public void handleMessage(Message msg) {
- updateAudioRoute();
+ if (mPendingIntent) {
+ sendIntent();
+ mPendingIntent = false;
+ }
mWakeLock.release();
}
};