summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
Diffstat (limited to 'media')
-rw-r--r--media/java/android/media/AudioFormat.java55
-rw-r--r--media/java/android/media/AudioManager.java203
-rw-r--r--media/java/android/media/AudioRecord.java36
-rw-r--r--media/java/android/media/AudioService.java701
-rw-r--r--media/java/android/media/AudioSystem.java157
-rw-r--r--media/java/android/media/AudioTrack.java35
-rw-r--r--media/java/android/media/IAudioService.aidl16
-rw-r--r--media/java/android/media/JetPlayer.java2
-rw-r--r--media/java/android/media/ToneGenerator.java4
-rw-r--r--media/jni/soundpool/SoundPool.cpp5
-rw-r--r--media/libmedia/Android.mk3
-rw-r--r--media/libmedia/AudioRecord.cpp90
-rw-r--r--media/libmedia/AudioSystem.cpp742
-rw-r--r--media/libmedia/AudioTrack.cpp189
-rw-r--r--media/libmedia/IAudioFlinger.cpp382
-rw-r--r--media/libmedia/IAudioFlingerClient.cpp45
-rw-r--r--media/libmedia/IAudioPolicyService.cpp423
-rw-r--r--media/libmedia/JetPlayer.cpp2
-rw-r--r--media/libmedia/ToneGenerator.cpp2
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp16
-rw-r--r--media/mediaserver/main_mediaserver.cpp2
-rw-r--r--media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java96
22 files changed, 2206 insertions, 1000 deletions
diff --git a/media/java/android/media/AudioFormat.java b/media/java/android/media/AudioFormat.java
index 0732b61..500f6a4 100644
--- a/media/java/android/media/AudioFormat.java
+++ b/media/java/android/media/AudioFormat.java
@@ -37,15 +37,58 @@ public class AudioFormat {
public static final int ENCODING_PCM_8BIT = 3; // accessed by native code
/** Invalid audio channel configuration */
- public static final int CHANNEL_CONFIGURATION_INVALID = 0;
+ /** @deprecated use CHANNEL_INVALID instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_INVALID = 0;
/** Default audio channel configuration */
- public static final int CHANNEL_CONFIGURATION_DEFAULT = 1;
+ /** @deprecated use CHANNEL_OUT_DEFAULT or CHANNEL_IN_DEFAULT instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_DEFAULT = 1;
/** Mono audio configuration */
- public static final int CHANNEL_CONFIGURATION_MONO = 2;
+ /** @deprecated use CHANNEL_OUT_MONO or CHANNEL_IN_MONO instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_MONO = 2;
/** Stereo (2 channel) audio configuration */
- public static final int CHANNEL_CONFIGURATION_STEREO = 3;
-
-}
+ /** @deprecated use CHANNEL_OUT_STEREO or CHANNEL_IN_STEREO instead */
+ @Deprecated public static final int CHANNEL_CONFIGURATION_STEREO = 3;
+ /** Invalid audio channel mask */
+ public static final int CHANNEL_INVALID = -1;
+ /** Default audio channel mask */
+ // Channel mask definitions must be kept in sync with native values in include/media/AudioSystem.h
+ public static final int CHANNEL_OUT_DEFAULT = 0;
+ public static final int CHANNEL_OUT_FRONT_LEFT = 0x1;
+ public static final int CHANNEL_OUT_FRONT_RIGHT = 0x2;
+ public static final int CHANNEL_OUT_FRONT_CENTER = 0x4;
+ public static final int CHANNEL_OUT_LOW_FREQUENCY = 0x8;
+ public static final int CHANNEL_OUT_BACK_LEFT = 0x10;
+ public static final int CHANNEL_OUT_BACK_RIGHT = 0x20;
+ public static final int CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40;
+ public static final int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80;
+ public static final int CHANNEL_OUT_BACK_CENTER = 0x100;
+ public static final int CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT;
+ public static final int CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT);
+ public static final int CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
+ public static final int CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER);
+ public static final int CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
+ public static final int CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
+ CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
+ CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
+ public static final int CHANNEL_IN_DEFAULT = 0;
+ public static final int CHANNEL_IN_LEFT = 0x10000;
+ public static final int CHANNEL_IN_RIGHT = 0x20000;
+ public static final int CHANNEL_IN_FRONT = 0x40000;
+ public static final int CHANNEL_IN_BACK = 0x80000;
+ public static final int CHANNEL_IN_LEFT_PROCESSED = 0x100000;
+ public static final int CHANNEL_IN_RIGHT_PROCESSED = 0x200000;
+ public static final int CHANNEL_IN_FRONT_PROCESSED = 0x400000;
+ public static final int CHANNEL_IN_BACK_PROCESSED = 0x800000;
+ public static final int CHANNEL_IN_PRESSURE = 0x1000000;
+ public static final int CHANNEL_IN_X_AXIS = 0x2000000;
+ public static final int CHANNEL_IN_Y_AXIS = 0x4000000;
+ public static final int CHANNEL_IN_Z_AXIS = 0x8000000;
+ public static final int CHANNEL_IN_MONO = CHANNEL_IN_FRONT;
+ public static final int CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT);
+}
diff --git a/media/java/android/media/AudioManager.java b/media/java/android/media/AudioManager.java
index a65a417..040d4bc 100644
--- a/media/java/android/media/AudioManager.java
+++ b/media/java/android/media/AudioManager.java
@@ -140,11 +140,17 @@ public class AudioManager {
public static final int STREAM_NOTIFICATION = AudioSystem.STREAM_NOTIFICATION;
/** @hide The audio stream for phone calls when connected to bluetooth */
public static final int STREAM_BLUETOOTH_SCO = AudioSystem.STREAM_BLUETOOTH_SCO;
+ /** @hide The audio stream for enforced system sounds in certain countries (e.g camera in Japan) */
+ public static final int STREAM_SYSTEM_ENFORCED = AudioSystem.STREAM_SYSTEM_ENFORCED;
+ /** The audio stream for DTMF Tones */
+ public static final int STREAM_DTMF = AudioSystem.STREAM_DTMF;
+ /** @hide The audio stream for text to speech (TTS) */
+ public static final int STREAM_TTS = AudioSystem.STREAM_TTS;
/** Number of audio streams */
/**
* @deprecated Use AudioSystem.getNumStreamTypes() instead
*/
- public static final int NUM_STREAMS = AudioSystem.NUM_STREAMS;
+ @Deprecated public static final int NUM_STREAMS = AudioSystem.NUM_STREAMS;
/** @hide Maximum volume index values for audio streams */
@@ -156,6 +162,9 @@ public class AudioManager {
8, // STREAM_ALARM
8, // STREAM_NOTIFICATION
16, // STREAM_BLUETOOTH_SCO
+ 8, // STREAM_SYSTEM_ENFORCED
+ 16, // STREAM_DTMF
+ 16 // STREAM_TTS
};
/** @hide Default volume index values for audio streams */
@@ -166,7 +175,10 @@ public class AudioManager {
11, // STREAM_MUSIC
6, // STREAM_ALARM
5, // STREAM_NOTIFICATION
- 7 // STREAM_BLUETOOTH_SCO
+ 7, // STREAM_BLUETOOTH_SCO
+ 5, // STREAM_SYSTEM_ENFORCED
+ 11, // STREAM_DTMF
+ 11 // STREAM_TTS
};
/**
@@ -637,9 +649,11 @@ public class AudioManager {
* <var>false</var> to turn it off
*/
public void setSpeakerphoneOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_SPEAKER: 0, ROUTE_SPEAKER);
+ if (on) {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_SPEAKER);
+ } else {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_NONE);
+ }
}
/**
@@ -648,41 +662,47 @@ public class AudioManager {
* @return true if speakerphone is on, false if it's off
*/
public boolean isSpeakerphoneOn() {
- return (getRoutingP(MODE_IN_CALL) & ROUTE_SPEAKER) == 0 ? false : true;
+ if (AudioSystem.getForceUse(AudioSystem.FOR_COMMUNICATION) == AudioSystem.FORCE_SPEAKER) {
+ return true;
+ } else {
+ return false;
+ }
}
/**
- * Sets audio routing to the Bluetooth headset on or off.
+ * Request use of Bluetooth SCO headset for communications.
*
- * @param on set <var>true</var> to route SCO (voice) audio to/from Bluetooth
- * headset; <var>false</var> to route audio to/from phone earpiece
+ * @param on set <var>true</var> to use bluetooth SCO for communications;
+ * <var>false</var> to not use bluetooth SCO for communications
*/
public void setBluetoothScoOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_BLUETOOTH_SCO: 0, ROUTE_BLUETOOTH_SCO);
+ if (on) {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_BT_SCO);
+ } else {
+ AudioSystem.setForceUse(AudioSystem.FOR_COMMUNICATION, AudioSystem.FORCE_NONE);
+ }
}
/**
- * Checks whether audio routing to the Bluetooth headset is on or off.
+ * Checks whether communications use Bluetooth SCO.
*
- * @return true if SCO audio is being routed to/from Bluetooth headset;
+ * @return true if SCO is used for communications;
* false if otherwise
*/
public boolean isBluetoothScoOn() {
- return (getRoutingP(MODE_IN_CALL) & ROUTE_BLUETOOTH_SCO) == 0 ? false : true;
+ if (AudioSystem.getForceUse(AudioSystem.FOR_COMMUNICATION) == AudioSystem.FORCE_BT_SCO) {
+ return true;
+ } else {
+ return false;
+ }
}
/**
- * Sets A2DP audio routing to the Bluetooth headset on or off.
- *
* @param on set <var>true</var> to route A2DP audio to/from Bluetooth
* headset; <var>false</var> disable A2DP audio
+ * @deprecated Do not use.
*/
- public void setBluetoothA2dpOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_BLUETOOTH_A2DP: 0, ROUTE_BLUETOOTH_A2DP);
+ @Deprecated public void setBluetoothA2dpOn(boolean on){
}
/**
@@ -692,7 +712,12 @@ public class AudioManager {
* false if otherwise
*/
public boolean isBluetoothA2dpOn() {
- return (getRoutingP(MODE_NORMAL) & ROUTE_BLUETOOTH_A2DP) == 0 ? false : true;
+ if (AudioSystem.getDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP,"")
+ == AudioSystem.DEVICE_STATE_UNAVAILABLE) {
+ return false;
+ } else {
+ return true;
+ }
}
/**
@@ -700,12 +725,9 @@ public class AudioManager {
*
* @param on set <var>true</var> to route audio to/from wired
* headset; <var>false</var> disable wired headset audio
- * @hide
+ * @deprecated Do not use.
*/
- public void setWiredHeadsetOn(boolean on){
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // MODE_INVALID indicates to AudioService that setRouting() was initiated by AudioManager
- setRoutingP(MODE_INVALID, on ? ROUTE_HEADSET: 0, ROUTE_HEADSET);
+ @Deprecated public void setWiredHeadsetOn(boolean on){
}
/**
@@ -713,10 +735,14 @@ public class AudioManager {
*
* @return true if audio is being routed to/from wired headset;
* false if otherwise
- * @hide
*/
public boolean isWiredHeadsetOn() {
- return (getRoutingP(MODE_NORMAL) & ROUTE_HEADSET) == 0 ? false : true;
+ if (AudioSystem.getDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADSET,"")
+ == AudioSystem.DEVICE_STATE_UNAVAILABLE) {
+ return false;
+ } else {
+ return true;
+ }
}
/**
@@ -726,12 +752,7 @@ public class AudioManager {
* <var>false</var> to turn mute off
*/
public void setMicrophoneMute(boolean on){
- IAudioService service = getService();
- try {
- service.setMicrophoneMute(on);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setMicrophoneMute", e);
- }
+ AudioSystem.muteMicrophone(on);
}
/**
@@ -740,13 +761,7 @@ public class AudioManager {
* @return true if microphone is muted, false if it's not
*/
public boolean isMicrophoneMute() {
- IAudioService service = getService();
- try {
- return service.isMicrophoneMute();
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in isMicrophoneMute", e);
- return false;
- }
+ return AudioSystem.isMicrophoneMuted();
}
/**
@@ -809,32 +824,39 @@ public class AudioManager {
/* Routing bits for setRouting/getRouting API */
/**
* Routing audio output to earpiece
+ * @deprecated
*/
- public static final int ROUTE_EARPIECE = AudioSystem.ROUTE_EARPIECE;
+ @Deprecated public static final int ROUTE_EARPIECE = AudioSystem.ROUTE_EARPIECE;
/**
* Routing audio output to spaker
+ * @deprecated
*/
- public static final int ROUTE_SPEAKER = AudioSystem.ROUTE_SPEAKER;
+ @Deprecated public static final int ROUTE_SPEAKER = AudioSystem.ROUTE_SPEAKER;
/**
* @deprecated use {@link #ROUTE_BLUETOOTH_SCO}
+ * @deprecated
*/
@Deprecated public static final int ROUTE_BLUETOOTH = AudioSystem.ROUTE_BLUETOOTH_SCO;
/**
* Routing audio output to bluetooth SCO
+ * @deprecated
*/
- public static final int ROUTE_BLUETOOTH_SCO = AudioSystem.ROUTE_BLUETOOTH_SCO;
+ @Deprecated public static final int ROUTE_BLUETOOTH_SCO = AudioSystem.ROUTE_BLUETOOTH_SCO;
/**
* Routing audio output to headset
+ * @deprecated
*/
- public static final int ROUTE_HEADSET = AudioSystem.ROUTE_HEADSET;
+ @Deprecated public static final int ROUTE_HEADSET = AudioSystem.ROUTE_HEADSET;
/**
* Routing audio output to bluetooth A2DP
+ * @deprecated
*/
- public static final int ROUTE_BLUETOOTH_A2DP = AudioSystem.ROUTE_BLUETOOTH_A2DP;
+ @Deprecated public static final int ROUTE_BLUETOOTH_A2DP = AudioSystem.ROUTE_BLUETOOTH_A2DP;
/**
* Used for mask parameter of {@link #setRouting(int,int,int)}.
+ * @deprecated
*/
- public static final int ROUTE_ALL = AudioSystem.ROUTE_ALL;
+ @Deprecated public static final int ROUTE_ALL = AudioSystem.ROUTE_ALL;
/**
* Sets the audio routing for a specified mode
@@ -846,16 +868,10 @@ public class AudioManager {
* ROUTE_xxx types. Unset bits indicate the route should be left unchanged
*
* @deprecated Do not set audio routing directly, use setSpeakerphoneOn(),
- * setBluetoothScoOn(), setBluetoothA2dpOn() and setWiredHeadsetOn() methods instead.
+ * setBluetoothScoOn() methods instead.
*/
-
+ @Deprecated
public void setRouting(int mode, int routes, int mask) {
- IAudioService service = getService();
- try {
- service.setRouting(mode, routes, mask);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setRouting", e);
- }
}
/**
@@ -869,13 +885,7 @@ public class AudioManager {
*/
@Deprecated
public int getRouting(int mode) {
- IAudioService service = getService();
- try {
- return service.getRouting(mode);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in getRouting", e);
- return -1;
- }
+ return -1;
}
/**
@@ -884,13 +894,7 @@ public class AudioManager {
* @return true if any music tracks are active.
*/
public boolean isMusicActive() {
- IAudioService service = getService();
- try {
- return service.isMusicActive();
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in isMusicActive", e);
- return false;
- }
+ return AudioSystem.isMusicActive();
}
/*
@@ -906,14 +910,32 @@ public class AudioManager {
*/
/**
* @hide
+ * @deprecated Use {@link #setPrameters(String)} instead
*/
- public void setParameter(String key, String value) {
- IAudioService service = getService();
- try {
- service.setParameter(key, value);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setParameter", e);
- }
+ @Deprecated public void setParameter(String key, String value) {
+ setParameters(key+"="+value);
+ }
+
+ /**
+ * Sets a variable number of parameter values to audio hardware.
+ *
+ * @param keyValuePairs list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
+ *
+ */
+ public void setParameters(String keyValuePairs) {
+ AudioSystem.setParameters(keyValuePairs);
+ }
+
+ /**
+ * Sets a varaible number of parameter values to audio hardware.
+ *
+ * @param keys list of parameters
+ * @return list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
+ */
+ public String getParameters(String keys) {
+ return AudioSystem.getParameters(keys);
}
/* Sound effect identifiers */
@@ -1082,31 +1104,4 @@ public class AudioManager {
* {@hide}
*/
private IBinder mICallBack = new Binder();
-
- /**
- * {@hide}
- */
- private void setRoutingP(int mode, int routes, int mask) {
- IAudioService service = getService();
- try {
- service.setRouting(mode, routes, mask);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in setRouting", e);
- }
- }
-
-
- /**
- * {@hide}
- */
- private int getRoutingP(int mode) {
- IAudioService service = getService();
- try {
- return service.getRouting(mode);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in getRouting", e);
- return -1;
- }
- }
-
}
diff --git a/media/java/android/media/AudioRecord.java b/media/java/android/media/AudioRecord.java
index 4d1535f..d96331f 100644
--- a/media/java/android/media/AudioRecord.java
+++ b/media/java/android/media/AudioRecord.java
@@ -86,7 +86,7 @@ public class AudioRecord
public static final int ERROR_INVALID_OPERATION = -3;
private static final int AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT = -16;
- private static final int AUDIORECORD_ERROR_SETUP_INVALIDCHANNELCOUNT = -17;
+ private static final int AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK = -17;
private static final int AUDIORECORD_ERROR_SETUP_INVALIDFORMAT = -18;
private static final int AUDIORECORD_ERROR_SETUP_INVALIDSOURCE = -19;
private static final int AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED = -20;
@@ -135,7 +135,7 @@ public class AudioRecord
/**
* The current audio channel configuration
*/
- private int mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ private int mChannelConfiguration = AudioFormat.CHANNEL_IN_MONO;
/**
* The encoding of the audio samples.
* @see AudioFormat#ENCODING_PCM_8BIT
@@ -193,8 +193,8 @@ public class AudioRecord
* @param sampleRateInHz the sample rate expressed in Hertz. Examples of rates are (but
* not limited to) 44100, 22050 and 11025.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_IN_MONO} and
+ * {@link AudioFormat#CHANNEL_IN_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
@@ -265,18 +265,18 @@ public class AudioRecord
//--------------
// channel config
switch (channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_DEFAULT:
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_IN_DEFAULT:
+ case AudioFormat.CHANNEL_IN_MONO:
mChannelCount = 1;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ mChannelConfiguration = AudioFormat.CHANNEL_IN_MONO;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_IN_STEREO:
mChannelCount = 2;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ mChannelConfiguration = AudioFormat.CHANNEL_IN_STEREO;
break;
default:
mChannelCount = 0;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_INVALID;
+ mChannelConfiguration = AudioFormat.CHANNEL_INVALID;
throw (new IllegalArgumentException("Unsupported channel configuration."));
}
@@ -368,8 +368,8 @@ public class AudioRecord
/**
* Returns the configured channel configuration.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO}
- * and {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}.
+ * See {@link AudioFormat#CHANNEL_IN_MONO}
+ * and {@link AudioFormat#CHANNEL_IN_STEREO}.
*/
public int getChannelConfiguration() {
return mChannelConfiguration;
@@ -425,8 +425,8 @@ public class AudioRecord
* will be polled for new data.
* @param sampleRateInHz the sample rate expressed in Hertz.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_IN_MONO} and
+ * {@link AudioFormat#CHANNEL_IN_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT}.
* @return {@link #ERROR_BAD_VALUE} if the recording parameters are not supported by the
@@ -438,14 +438,14 @@ public class AudioRecord
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_DEFAULT:
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_IN_DEFAULT:
+ case AudioFormat.CHANNEL_IN_MONO:
channelCount = 1;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_IN_STEREO:
channelCount = 2;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_INVALID:
+ case AudioFormat.CHANNEL_INVALID:
default:
loge("getMinBufferSize(): Invalid channel configuration.");
return AudioRecord.ERROR_BAD_VALUE;
diff --git a/media/java/android/media/AudioService.java b/media/java/android/media/AudioService.java
index 58c04f3..177a7cf 100644
--- a/media/java/android/media/AudioService.java
+++ b/media/java/android/media/AudioService.java
@@ -20,6 +20,12 @@ import android.app.ActivityManagerNative;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
+import android.content.IntentFilter;
+import android.bluetooth.BluetoothIntent;
+import android.content.BroadcastReceiver;
+import android.bluetooth.BluetoothHeadset;
+import android.bluetooth.BluetoothA2dp;
+
import android.content.pm.PackageManager;
import android.database.ContentObserver;
import android.media.MediaPlayer.OnCompletionListener;
@@ -94,16 +100,10 @@ public class AudioService extends IAudioService.Stub {
/** @see VolumeStreamState */
private VolumeStreamState[] mStreamStates;
private SettingsObserver mSettingsObserver;
-
- private boolean mMicMute;
+
private int mMode;
- private int[] mRoutes = new int[AudioSystem.NUM_MODES];
private Object mSettingsLock = new Object();
private boolean mMediaServerOk;
- private boolean mSpeakerIsOn;
- private boolean mBluetoothScoIsConnected;
- private boolean mHeadsetIsConnected;
- private boolean mBluetoothA2dpIsConnected;
private SoundPool mSoundPool;
private Object mSoundEffectsLock = new Object();
@@ -135,6 +135,23 @@ public class AudioService extends IAudioService.Stub {
{4, -1} // FX_FOCUS_RETURN
};
+ /* STREAM_VOLUME_ALIAS[] indicates for each stream if it uses the volume settings
+ * of another stream: This avoids multiplying the volume settings for hidden
+ * stream types that follow other stream behavior for volume settings
+ * NOTE: do not create loops in aliases! */
+ private int[] STREAM_VOLUME_ALIAS = new int[] {
+ AudioSystem.STREAM_VOICE_CALL, // STREAM_VOICE_CALL
+ AudioSystem.STREAM_SYSTEM, // STREAM_SYSTEM
+ AudioSystem.STREAM_RING, // STREAM_RING
+ AudioSystem.STREAM_MUSIC, // STREAM_MUSIC
+ AudioSystem.STREAM_ALARM, // STREAM_ALARM
+ AudioSystem.STREAM_NOTIFICATION, // STREAM_NOTIFICATION
+ AudioSystem.STREAM_VOICE_CALL, // STREAM_BLUETOOTH_SCO
+ AudioSystem.STREAM_SYSTEM, // STREAM_SYSTEM_ENFORCED
+ AudioSystem.STREAM_VOICE_CALL, // STREAM_DTMF
+ AudioSystem.STREAM_MUSIC // STREAM_TTS
+ };
+
private AudioSystem.ErrorCallback mAudioSystemCallback = new AudioSystem.ErrorCallback() {
public void onError(int error) {
switch (error) {
@@ -178,6 +195,21 @@ public class AudioService extends IAudioService.Stub {
*/
private int mVibrateSetting;
+ /** @see System#NOTIFICATIONS_USE_RING_VOLUME */
+ private int mNotificationsUseRingVolume;
+
+ // Broadcast receiver for device connections intent broadcasts
+ private final BroadcastReceiver mReceiver = new AudioServiceBroadcastReceiver();
+
+ //TODO: use common definitions with HeadsetObserver
+ private static final int BIT_HEADSET = (1 << 0);
+ private static final int BIT_HEADSET_NO_MIC = (1 << 1);
+ private static final int BIT_TTY = (1 << 2);
+ private static final int BIT_FM_HEADSET = (1 << 3);
+ private static final int BIT_FM_SPEAKER = (1 << 4);
+
+ private int mHeadsetState;
+
///////////////////////////////////////////////////////////////////////////
// Construction
///////////////////////////////////////////////////////////////////////////
@@ -188,18 +220,20 @@ public class AudioService extends IAudioService.Stub {
mContentResolver = context.getContentResolver();
mVolumePanel = new VolumePanel(context, this);
mSettingsObserver = new SettingsObserver();
-
+
createAudioSystemThread();
- createStreamStates();
readPersistedSettings();
- readAudioSettings();
+ createStreamStates();
mMediaServerOk = true;
AudioSystem.setErrorCallback(mAudioSystemCallback);
loadSoundEffects();
- mSpeakerIsOn = false;
- mBluetoothScoIsConnected = false;
- mHeadsetIsConnected = false;
- mBluetoothA2dpIsConnected = false;
+
+ // Register for device connection intent broadcasts.
+ IntentFilter intentFilter =
+ new IntentFilter(Intent.ACTION_HEADSET_PLUG);
+ intentFilter.addAction(BluetoothA2dp.SINK_STATE_CHANGED_ACTION);
+ intentFilter.addAction(BluetoothIntent.HEADSET_AUDIO_STATE_CHANGED_ACTION);
+ context.registerReceiver(mReceiver, intentFilter);
}
private void createAudioSystemThread() {
@@ -223,63 +257,23 @@ public class AudioService extends IAudioService.Stub {
}
private void createStreamStates() {
- final int[] volumeLevelsPhone =
- createVolumeLevels(0, AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_VOICE_CALL]);
- final int[] volumeLevelsCoarse =
- createVolumeLevels(0, AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_SYSTEM]);
- final int[] volumeLevelsFine =
- createVolumeLevels(0, AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_MUSIC]);
- final int[] volumeLevelsBtPhone =
- createVolumeLevels(0,
- AudioManager.MAX_STREAM_VOLUME[AudioManager.STREAM_BLUETOOTH_SCO]);
-
int numStreamTypes = AudioSystem.getNumStreamTypes();
VolumeStreamState[] streams = mStreamStates = new VolumeStreamState[numStreamTypes];
for (int i = 0; i < numStreamTypes; i++) {
- final int[] levels;
-
- switch (i) {
-
- case AudioSystem.STREAM_MUSIC:
- levels = volumeLevelsFine;
- break;
-
- case AudioSystem.STREAM_VOICE_CALL:
- levels = volumeLevelsPhone;
- break;
-
- case AudioSystem.STREAM_BLUETOOTH_SCO:
- levels = volumeLevelsBtPhone;
- break;
-
- default:
- levels = volumeLevelsCoarse;
- break;
- }
-
- if (i == AudioSystem.STREAM_BLUETOOTH_SCO) {
- streams[i] = new VolumeStreamState(AudioManager.DEFAULT_STREAM_VOLUME[i], i,levels);
- } else {
- streams[i] = new VolumeStreamState(System.VOLUME_SETTINGS[i], i, levels);
- }
+ streams[i] = new VolumeStreamState(System.VOLUME_SETTINGS[STREAM_VOLUME_ALIAS[i]], i);
}
- }
- private static int[] createVolumeLevels(int offset, int numlevels) {
- double curve = 1.0f; // 1.4f
- int [] volumes = new int[numlevels + offset];
- for (int i = 0; i < offset; i++) {
- volumes[i] = 0;
- }
-
- double val = 0;
- double max = Math.pow(numlevels - 1, curve);
- for (int i = 0; i < numlevels; i++) {
- val = Math.pow(i, curve) / max;
- volumes[offset + i] = (int) (val * 100.0f);
+ // Correct stream index values for streams with aliases
+ for (int i = 0; i < numStreamTypes; i++) {
+ if (STREAM_VOLUME_ALIAS[i] != i) {
+ int index = rescaleIndex(streams[i].mIndex, STREAM_VOLUME_ALIAS[i], i);
+ streams[i].mIndex = streams[i].getValidIndex(index);
+ setStreamVolumeIndex(i, index);
+ index = rescaleIndex(streams[i].mLastAudibleIndex, STREAM_VOLUME_ALIAS[i], i);
+ streams[i].mLastAudibleIndex = streams[i].getValidIndex(index);
+ }
}
- return volumes;
}
private void readPersistedSettings() {
@@ -291,12 +285,19 @@ public class AudioService extends IAudioService.Stub {
mRingerModeAffectedStreams = Settings.System.getInt(cr,
Settings.System.MODE_RINGER_STREAMS_AFFECTED,
- ((1 << AudioManager.STREAM_RING)|(1 << AudioManager.STREAM_NOTIFICATION)|(1 << AudioManager.STREAM_SYSTEM)));
+ ((1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_NOTIFICATION)|
+ (1 << AudioSystem.STREAM_SYSTEM)|(1 << AudioSystem.STREAM_SYSTEM_ENFORCED)));
mMuteAffectedStreams = System.getInt(cr,
System.MUTE_STREAMS_AFFECTED,
((1 << AudioSystem.STREAM_MUSIC)|(1 << AudioSystem.STREAM_RING)|(1 << AudioSystem.STREAM_SYSTEM)));
+ mNotificationsUseRingVolume = System.getInt(cr,
+ Settings.System.NOTIFICATIONS_USE_RING_VOLUME, 1);
+
+ if (mNotificationsUseRingVolume == 1) {
+ STREAM_VOLUME_ALIAS[AudioSystem.STREAM_NOTIFICATION] = AudioSystem.STREAM_RING;
+ }
// Each stream will read its own persisted settings
// Broadcast the sticky intent
@@ -307,25 +308,13 @@ public class AudioService extends IAudioService.Stub {
broadcastVibrateSetting(AudioManager.VIBRATE_TYPE_NOTIFICATION);
}
- private void readAudioSettings() {
- synchronized (mSettingsLock) {
- mMicMute = AudioSystem.isMicrophoneMuted();
- mMode = AudioSystem.getMode();
- for (int mode = 0; mode < AudioSystem.NUM_MODES; mode++) {
- mRoutes[mode] = AudioSystem.getRouting(mode);
- }
- }
+ private void setStreamVolumeIndex(int stream, int index) {
+ AudioSystem.setStreamVolumeIndex(stream, (index + 5)/10);
}
- private void applyAudioSettings() {
- synchronized (mSettingsLock) {
- AudioSystem.muteMicrophone(mMicMute);
- AudioSystem.setMode(mMode);
- for (int mode = 0; mode < AudioSystem.NUM_MODES; mode++) {
- AudioSystem.setRouting(mode, mRoutes[mode], AudioSystem.ROUTE_ALL);
- }
- }
- }
+ private int rescaleIndex(int index, int srcStream, int dstStream) {
+ return (index * mStreamStates[dstStream].getMaxIndex() + mStreamStates[srcStream].getMaxIndex() / 2) / mStreamStates[srcStream].getMaxIndex();
+ }
///////////////////////////////////////////////////////////////////////////
// IPC methods
@@ -354,44 +343,26 @@ public class AudioService extends IAudioService.Stub {
ensureValidDirection(direction);
ensureValidStreamType(streamType);
- boolean notificationsUseRingVolume = Settings.System.getInt(mContentResolver,
- Settings.System.NOTIFICATIONS_USE_RING_VOLUME, 1) == 1;
- if (notificationsUseRingVolume && streamType == AudioManager.STREAM_NOTIFICATION) {
- // Redirect the volume change to the ring stream
- streamType = AudioManager.STREAM_RING;
- }
- VolumeStreamState streamState = mStreamStates[streamType];
+ VolumeStreamState streamState = mStreamStates[STREAM_VOLUME_ALIAS[streamType]];
final int oldIndex = streamState.mIndex;
boolean adjustVolume = true;
// If either the client forces allowing ringer modes for this adjustment,
// or the stream type is one that is affected by ringer modes
if ((flags & AudioManager.FLAG_ALLOW_RINGER_MODES) != 0
- || streamType == AudioManager.STREAM_RING) {
+ || streamType == AudioSystem.STREAM_RING) {
// Check if the ringer mode changes with this volume adjustment. If
// it does, it will handle adjusting the volume, so we won't below
adjustVolume = checkForRingerModeChange(oldIndex, direction);
}
if (adjustVolume && streamState.adjustIndex(direction)) {
-
- boolean alsoUpdateNotificationVolume = notificationsUseRingVolume &&
- streamType == AudioManager.STREAM_RING;
- if (alsoUpdateNotificationVolume) {
- mStreamStates[AudioManager.STREAM_NOTIFICATION].adjustIndex(direction);
- }
-
// Post message to set system volume (it in turn will post a message
// to persist). Do not change volume if stream is muted.
if (streamState.muteCount() == 0) {
- sendMsg(mAudioHandler, MSG_SET_SYSTEM_VOLUME, streamType, SENDMSG_NOOP, 0, 0,
+ sendMsg(mAudioHandler, MSG_SET_SYSTEM_VOLUME, STREAM_VOLUME_ALIAS[streamType], SENDMSG_NOOP, 0, 0,
streamState, 0);
-
- if (alsoUpdateNotificationVolume) {
- sendMsg(mAudioHandler, MSG_SET_SYSTEM_VOLUME, AudioManager.STREAM_NOTIFICATION,
- SENDMSG_NOOP, 0, 0, mStreamStates[AudioManager.STREAM_NOTIFICATION], 0);
- }
}
}
@@ -404,9 +375,8 @@ public class AudioService extends IAudioService.Stub {
/** @see AudioManager#setStreamVolume(int, int, int) */
public void setStreamVolume(int streamType, int index, int flags) {
ensureValidStreamType(streamType);
- syncRingerAndNotificationStreamVolume(streamType, index, false);
-
- setStreamVolumeInt(streamType, index, false, true);
+ index = rescaleIndex(index * 10, streamType, STREAM_VOLUME_ALIAS[streamType]);
+ setStreamVolumeInt(STREAM_VOLUME_ALIAS[streamType], index, false, true);
// UI, etc.
mVolumePanel.postVolumeChanged(streamType, flags);
@@ -420,37 +390,12 @@ public class AudioService extends IAudioService.Stub {
intent.putExtra(AudioManager.EXTRA_VOLUME_STREAM_VALUE, getStreamVolume(streamType));
// Currently, sending the intent only when the stream is BLUETOOTH_SCO
- if (streamType == AudioManager.STREAM_BLUETOOTH_SCO) {
+ if (streamType == AudioSystem.STREAM_BLUETOOTH_SCO) {
mContext.sendBroadcast(intent);
}
}
/**
- * Sync the STREAM_RING and STREAM_NOTIFICATION volumes if mandated by the
- * value in Settings.
- *
- * @param streamType Type of the stream
- * @param index Volume index for the stream
- * @param force If true, set the volume even if the current and desired
- * volume as same
- */
- private void syncRingerAndNotificationStreamVolume(int streamType, int index, boolean force) {
- boolean notificationsUseRingVolume = Settings.System.getInt(mContentResolver,
- Settings.System.NOTIFICATIONS_USE_RING_VOLUME, 1) == 1;
- if (notificationsUseRingVolume) {
- if (streamType == AudioManager.STREAM_NOTIFICATION) {
- // Redirect the volume change to the ring stream
- streamType = AudioManager.STREAM_RING;
- }
- if (streamType == AudioManager.STREAM_RING) {
- // One-off to sync notification volume to ringer volume
- setStreamVolumeInt(AudioManager.STREAM_NOTIFICATION, index, force, true);
- }
- }
- }
-
-
- /**
* Sets the stream state's index, and posts a message to set system volume.
* This will not call out to the UI. Assumes a valid stream type.
*
@@ -491,13 +436,13 @@ public class AudioService extends IAudioService.Stub {
/** @see AudioManager#getStreamVolume(int) */
public int getStreamVolume(int streamType) {
ensureValidStreamType(streamType);
- return mStreamStates[streamType].mIndex;
+ return (mStreamStates[streamType].mIndex + 5) / 10;
}
/** @see AudioManager#getStreamMaxVolume(int) */
public int getStreamMaxVolume(int streamType) {
ensureValidStreamType(streamType);
- return mStreamStates[streamType].getMaxIndex();
+ return (mStreamStates[streamType].getMaxIndex() + 5) / 10;
}
/** @see AudioManager#getRingerMode() */
@@ -507,11 +452,12 @@ public class AudioService extends IAudioService.Stub {
/** @see AudioManager#setRingerMode(int) */
public void setRingerMode(int ringerMode) {
- if (ringerMode != mRingerMode) {
- setRingerModeInt(ringerMode, true);
-
- // Send sticky broadcast
- broadcastRingerMode();
+ synchronized (mSettingsLock) {
+ if (ringerMode != mRingerMode) {
+ setRingerModeInt(ringerMode, true);
+ // Send sticky broadcast
+ broadcastRingerMode();
+ }
}
}
@@ -541,7 +487,7 @@ public class AudioService extends IAudioService.Stub {
}
}
}
-
+
// Post a persist ringer mode msg
if (persist) {
sendMsg(mAudioHandler, MSG_PERSIST_RINGER_MODE, SHARED_MSG,
@@ -606,39 +552,28 @@ public class AudioService extends IAudioService.Stub {
return existingValue;
}
- /** @see AudioManager#setMicrophoneMute(boolean) */
- public void setMicrophoneMute(boolean on) {
- if (!checkAudioSettingsPermission("setMicrophoneMute()")) {
- return;
- }
- synchronized (mSettingsLock) {
- if (on != mMicMute) {
- AudioSystem.muteMicrophone(on);
- mMicMute = on;
- }
- }
- }
-
- /** @see AudioManager#isMicrophoneMute() */
- public boolean isMicrophoneMute() {
- return mMicMute;
- }
-
/** @see AudioManager#setMode(int) */
public void setMode(int mode) {
if (!checkAudioSettingsPermission("setMode()")) {
return;
}
+
+ if (mode < AudioSystem.MODE_CURRENT || mode > AudioSystem.MODE_IN_CALL) {
+ return;
+ }
+
synchronized (mSettingsLock) {
+ if (mode == AudioSystem.MODE_CURRENT) {
+ mode = mMode;
+ }
if (mode != mMode) {
- if (AudioSystem.setMode(mode) == AudioSystem.AUDIO_STATUS_OK) {
+ if (AudioSystem.setPhoneState(mode) == AudioSystem.AUDIO_STATUS_OK) {
mMode = mode;
}
}
int streamType = getActiveStreamType(AudioManager.USE_DEFAULT_STREAM_TYPE);
- int index = mStreamStates[streamType].mIndex;
- syncRingerAndNotificationStreamVolume(streamType, index, true);
- setStreamVolumeInt(streamType, index, true, true);
+ int index = mStreamStates[STREAM_VOLUME_ALIAS[streamType]].mIndex;
+ setStreamVolumeInt(STREAM_VOLUME_ALIAS[streamType], index, true, true);
}
}
@@ -647,187 +582,6 @@ public class AudioService extends IAudioService.Stub {
return mMode;
}
- /** @see AudioManager#setRouting(int, int, int) */
- public void setRouting(int mode, int routes, int mask) {
- int incallMask = 0;
- int ringtoneMask = 0;
- int normalMask = 0;
-
- if (!checkAudioSettingsPermission("setRouting()")) {
- return;
- }
- synchronized (mSettingsLock) {
- // Temporary fix for issue #1713090 until audio routing is refactored in eclair release.
- // mode AudioSystem.MODE_INVALID is used only by the following AudioManager methods:
- // setWiredHeadsetOn(), setBluetoothA2dpOn(), setBluetoothScoOn() and setSpeakerphoneOn().
- // If applications are using AudioManager.setRouting() that is now deprecated, the routing
- // command will be ignored.
- if (mode == AudioSystem.MODE_INVALID) {
- switch (mask) {
- case AudioSystem.ROUTE_SPEAKER:
- // handle setSpeakerphoneOn()
- if (routes != 0 && !mSpeakerIsOn) {
- mSpeakerIsOn = true;
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_SPEAKER;
- incallMask = AudioSystem.ROUTE_ALL;
- } else if (routes == 0 && mSpeakerIsOn) {
- mSpeakerIsOn = false;
- if (mBluetoothScoIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_BLUETOOTH_SCO;
- } else if (mHeadsetIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_HEADSET;
- } else {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_EARPIECE;
- }
- incallMask = AudioSystem.ROUTE_ALL;
- }
- break;
-
- case AudioSystem.ROUTE_BLUETOOTH_SCO:
- // handle setBluetoothScoOn()
- if (routes != 0 && !mBluetoothScoIsConnected) {
- mBluetoothScoIsConnected = true;
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_BLUETOOTH_SCO;
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_BLUETOOTH_SCO;
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_BLUETOOTH_SCO;
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than SCO headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- } else if (routes == 0 && mBluetoothScoIsConnected) {
- mBluetoothScoIsConnected = false;
- if (mHeadsetIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_HEADSET;
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- (AudioSystem.ROUTE_HEADSET|AudioSystem.ROUTE_SPEAKER);
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_HEADSET;
- } else {
- if (mSpeakerIsOn) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_SPEAKER;
- } else {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_EARPIECE;
- }
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
- }
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than SCO headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- break;
-
- case AudioSystem.ROUTE_HEADSET:
- // handle setWiredHeadsetOn()
- if (routes != 0 && !mHeadsetIsConnected) {
- mHeadsetIsConnected = true;
- // do not act upon headset connection if bluetooth SCO is connected to match phone app behavior
- if (!mBluetoothScoIsConnected) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_HEADSET;
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- (AudioSystem.ROUTE_HEADSET|AudioSystem.ROUTE_SPEAKER);
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_HEADSET;
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than wired headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- } else if (routes == 0 && mHeadsetIsConnected) {
- mHeadsetIsConnected = false;
- // do not act upon headset disconnection if bluetooth SCO is connected to match phone app behavior
- if (!mBluetoothScoIsConnected) {
- if (mSpeakerIsOn) {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_SPEAKER;
- } else {
- mRoutes[AudioSystem.MODE_IN_CALL] = AudioSystem.ROUTE_EARPIECE;
- }
- mRoutes[AudioSystem.MODE_RINGTONE] = (mRoutes[AudioSystem.MODE_RINGTONE] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
- mRoutes[AudioSystem.MODE_NORMAL] = (mRoutes[AudioSystem.MODE_NORMAL] & AudioSystem.ROUTE_BLUETOOTH_A2DP) |
- AudioSystem.ROUTE_SPEAKER;
-
- incallMask = AudioSystem.ROUTE_ALL;
- // A2DP has higher priority than wired headset, so headset connect/disconnect events
- // should not affect A2DP routing
- ringtoneMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_ALL & ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- }
- break;
-
- case AudioSystem.ROUTE_BLUETOOTH_A2DP:
- // handle setBluetoothA2dpOn()
- if (routes != 0 && !mBluetoothA2dpIsConnected) {
- mBluetoothA2dpIsConnected = true;
- mRoutes[AudioSystem.MODE_RINGTONE] |= AudioSystem.ROUTE_BLUETOOTH_A2DP;
- mRoutes[AudioSystem.MODE_NORMAL] |= AudioSystem.ROUTE_BLUETOOTH_A2DP;
- // the audio flinger chooses A2DP as a higher priority,
- // so there is no need to disable other routes.
- ringtoneMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- } else if (routes == 0 && mBluetoothA2dpIsConnected) {
- mBluetoothA2dpIsConnected = false;
- mRoutes[AudioSystem.MODE_RINGTONE] &= ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- mRoutes[AudioSystem.MODE_NORMAL] &= ~AudioSystem.ROUTE_BLUETOOTH_A2DP;
- // the audio flinger chooses A2DP as a higher priority,
- // so there is no need to disable other routes.
- ringtoneMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- normalMask = AudioSystem.ROUTE_BLUETOOTH_A2DP;
- }
- break;
- }
-
- // incallMask is != 0 means we must apply ne routing to MODE_IN_CALL mode
- if (incallMask != 0) {
- AudioSystem.setRouting(AudioSystem.MODE_IN_CALL,
- mRoutes[AudioSystem.MODE_IN_CALL],
- incallMask);
- }
- // ringtoneMask is != 0 means we must apply ne routing to MODE_RINGTONE mode
- if (ringtoneMask != 0) {
- AudioSystem.setRouting(AudioSystem.MODE_RINGTONE,
- mRoutes[AudioSystem.MODE_RINGTONE],
- ringtoneMask);
- }
- // normalMask is != 0 means we must apply ne routing to MODE_NORMAL mode
- if (normalMask != 0) {
- AudioSystem.setRouting(AudioSystem.MODE_NORMAL,
- mRoutes[AudioSystem.MODE_NORMAL],
- normalMask);
- }
-
- int streamType = getActiveStreamType(AudioManager.USE_DEFAULT_STREAM_TYPE);
- int index = mStreamStates[streamType].mIndex;
- syncRingerAndNotificationStreamVolume(streamType, index, true);
- setStreamVolumeInt(streamType, index, true, true);
- }
- }
- }
-
- /** @see AudioManager#getRouting(int) */
- public int getRouting(int mode) {
- return mRoutes[mode];
- }
-
- /** @see AudioManager#isMusicActive() */
- public boolean isMusicActive() {
- return AudioSystem.isMusicActive();
- }
-
- /** @see AudioManager#setParameter(String, String) */
- public void setParameter(String key, String value) {
- AudioSystem.setParameter(key, value);
- }
-
/** @see AudioManager#playSoundEffect(int) */
public void playSoundEffect(int effectType) {
sendMsg(mAudioHandler, MSG_PLAY_SOUND_EFFECT, SHARED_MSG, SENDMSG_NOOP,
@@ -930,14 +684,28 @@ public class AudioService extends IAudioService.Stub {
if (streamType != AudioSystem.STREAM_BLUETOOTH_SCO) {
String settingName = System.VOLUME_SETTINGS[streamType];
String lastAudibleSettingName = settingName + System.APPEND_FOR_LAST_AUDIBLE;
-
- streamState.mIndex = streamState.getValidIndex(Settings.System.getInt(mContentResolver,
- settingName,
- AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
- streamState.mLastAudibleIndex = streamState.getValidIndex(Settings.System.getInt(mContentResolver,
- lastAudibleSettingName,
- streamState.mIndex > 0 ? streamState.mIndex : AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
+ int index = Settings.System.getInt(mContentResolver,
+ settingName,
+ AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ if (STREAM_VOLUME_ALIAS[streamType] != streamType) {
+ index = rescaleIndex(index * 10, STREAM_VOLUME_ALIAS[streamType], streamType);
+ } else {
+ index *= 10;
+ }
+ streamState.mIndex = streamState.getValidIndex(index);
+
+ index = (index + 5) / 10;
+ index = Settings.System.getInt(mContentResolver,
+ lastAudibleSettingName,
+ (index > 0) ? index : AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ if (STREAM_VOLUME_ALIAS[streamType] != streamType) {
+ index = rescaleIndex(index * 10, STREAM_VOLUME_ALIAS[streamType], streamType);
+ } else {
+ index *= 10;
+ }
+ streamState.mLastAudibleIndex = streamState.getValidIndex(index);
}
+
// unmute stream that whas muted but is not affect by mute anymore
if (streamState.muteCount() != 0 && !isStreamAffectedByMute(streamType)) {
int size = streamState.mDeathHandlers.size();
@@ -948,7 +716,7 @@ public class AudioService extends IAudioService.Stub {
}
// apply stream volume
if (streamState.muteCount() == 0) {
- AudioSystem.setVolume(streamType, streamState.mVolumes[streamState.mIndex]);
+ setStreamVolumeIndex(streamType, streamState.mIndex);
}
}
@@ -969,7 +737,7 @@ public class AudioService extends IAudioService.Stub {
boolean adjustVolumeIndex = true;
int newRingerMode = mRingerMode;
- if (mRingerMode == AudioManager.RINGER_MODE_NORMAL && oldIndex == 1
+ if (mRingerMode == AudioManager.RINGER_MODE_NORMAL && (oldIndex + 5) / 10 == 1
&& direction == AudioManager.ADJUST_LOWER) {
newRingerMode = AudioManager.RINGER_MODE_VIBRATE;
} else if (mRingerMode == AudioManager.RINGER_MODE_VIBRATE) {
@@ -1026,7 +794,7 @@ public class AudioService extends IAudioService.Stub {
Log.w(TAG, "Couldn't connect to phone service", e);
}
- if ((getRouting(AudioSystem.MODE_IN_CALL) & AudioSystem.ROUTE_BLUETOOTH_SCO) != 0) {
+ if (AudioSystem.getForceUse(AudioSystem.FOR_COMMUNICATION) == AudioSystem.FORCE_BT_SCO) {
// Log.v(TAG, "getActiveStreamType: Forcing STREAM_BLUETOOTH_SCO...");
return AudioSystem.STREAM_BLUETOOTH_SCO;
} else if (isOffhook) {
@@ -1110,47 +878,36 @@ public class AudioService extends IAudioService.Stub {
private final String mLastAudibleVolumeIndexSettingName;
private final int mStreamType;
- private final int[] mVolumes;
+ private int mIndexMax;
private int mIndex;
private int mLastAudibleIndex;
private ArrayList<VolumeDeathHandler> mDeathHandlers; //handles mute/solo requests client death
- private VolumeStreamState(String settingName, int streamType, int[] volumes) {
+ private VolumeStreamState(String settingName, int streamType) {
mVolumeIndexSettingName = settingName;
mLastAudibleVolumeIndexSettingName = settingName + System.APPEND_FOR_LAST_AUDIBLE;
mStreamType = streamType;
- mVolumes = volumes;
final ContentResolver cr = mContentResolver;
- mIndex = getValidIndex(Settings.System.getInt(cr, mVolumeIndexSettingName, AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
- mLastAudibleIndex = getValidIndex(Settings.System.getInt(cr,
- mLastAudibleVolumeIndexSettingName, mIndex > 0 ? mIndex : AudioManager.DEFAULT_STREAM_VOLUME[streamType]));
-
- AudioSystem.setVolume(streamType, volumes[mIndex]);
- mDeathHandlers = new ArrayList<VolumeDeathHandler>();
- }
-
- /**
- * Constructor to be used when there is no setting associated with the VolumeStreamState.
- *
- * @param defaultVolume Default volume of the stream to use.
- * @param streamType Type of the stream.
- * @param volumes Volumes levels associated with this stream.
- */
- private VolumeStreamState(int defaultVolume, int streamType, int[] volumes) {
- mVolumeIndexSettingName = null;
- mLastAudibleVolumeIndexSettingName = null;
- mIndex = mLastAudibleIndex = defaultVolume;
- mStreamType = streamType;
- mVolumes = volumes;
- AudioSystem.setVolume(mStreamType, defaultVolume);
+ mIndexMax = AudioManager.MAX_STREAM_VOLUME[streamType];
+ mIndex = Settings.System.getInt(cr,
+ mVolumeIndexSettingName,
+ AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ mLastAudibleIndex = Settings.System.getInt(cr,
+ mLastAudibleVolumeIndexSettingName,
+ (mIndex > 0) ? mIndex : AudioManager.DEFAULT_STREAM_VOLUME[streamType]);
+ AudioSystem.initStreamVolume(streamType, 0, mIndexMax);
+ mIndexMax *= 10;
+ mIndex = getValidIndex(10 * mIndex);
+ mLastAudibleIndex = getValidIndex(10 * mLastAudibleIndex);
+ setStreamVolumeIndex(streamType, mIndex);
mDeathHandlers = new ArrayList<VolumeDeathHandler>();
}
public boolean adjustIndex(int deltaIndex) {
- return setIndex(mIndex + deltaIndex, true);
+ return setIndex(mIndex + deltaIndex * 10, true);
}
public boolean setIndex(int index, boolean lastAudible) {
@@ -1161,6 +918,13 @@ public class AudioService extends IAudioService.Stub {
if (lastAudible) {
mLastAudibleIndex = mIndex;
}
+ // Apply change to all streams using this one as alias
+ int numStreamTypes = AudioSystem.getNumStreamTypes();
+ for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
+ if (streamType != mStreamType && STREAM_VOLUME_ALIAS[streamType] == mStreamType) {
+ mStreamStates[streamType].setIndex(rescaleIndex(mIndex, mStreamType, streamType), lastAudible);
+ }
+ }
return true;
} else {
return false;
@@ -1168,7 +932,7 @@ public class AudioService extends IAudioService.Stub {
}
public int getMaxIndex() {
- return mVolumes.length - 1;
+ return mIndexMax;
}
public void mute(IBinder cb, boolean state) {
@@ -1183,8 +947,8 @@ public class AudioService extends IAudioService.Stub {
private int getValidIndex(int index) {
if (index < 0) {
return 0;
- } else if (index >= mVolumes.length) {
- return mVolumes.length - 1;
+ } else if (index > mIndexMax) {
+ return mIndexMax;
}
return index;
@@ -1318,8 +1082,16 @@ public class AudioService extends IAudioService.Stub {
private void setSystemVolume(VolumeStreamState streamState) {
// Adjust volume
- AudioSystem
- .setVolume(streamState.mStreamType, streamState.mVolumes[streamState.mIndex]);
+ setStreamVolumeIndex(streamState.mStreamType, streamState.mIndex);
+
+ // Apply change to all streams using this one as alias
+ int numStreamTypes = AudioSystem.getNumStreamTypes();
+ for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
+ if (streamType != streamState.mStreamType &&
+ STREAM_VOLUME_ALIAS[streamType] == streamState.mStreamType) {
+ setStreamVolumeIndex(streamType, mStreamStates[streamType].mIndex);
+ }
+ }
// Post a persist volume msg
sendMsg(mAudioHandler, MSG_PERSIST_VOLUME, streamState.mStreamType,
@@ -1327,12 +1099,10 @@ public class AudioService extends IAudioService.Stub {
}
private void persistVolume(VolumeStreamState streamState) {
- if (streamState.mStreamType != AudioManager.STREAM_BLUETOOTH_SCO) {
- System.putInt(mContentResolver, streamState.mVolumeIndexSettingName,
- streamState.mIndex);
- System.putInt(mContentResolver, streamState.mLastAudibleVolumeIndexSettingName,
- streamState.mLastAudibleIndex);
- }
+ System.putInt(mContentResolver, streamState.mVolumeIndexSettingName,
+ (streamState.mIndex + 5)/ 10);
+ System.putInt(mContentResolver, streamState.mLastAudibleVolumeIndexSettingName,
+ (streamState.mLastAudibleIndex + 5) / 10);
}
private void persistRingerMode() {
@@ -1426,18 +1196,17 @@ public class AudioService extends IAudioService.Stub {
case MSG_MEDIA_SERVER_STARTED:
Log.e(TAG, "Media server started.");
- // Restore audio routing and stream volumes
- applyAudioSettings();
+ // Restore stream volumes
int numStreamTypes = AudioSystem.getNumStreamTypes();
for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
- int volume;
+ int index;
VolumeStreamState streamState = mStreamStates[streamType];
if (streamState.muteCount() == 0) {
- volume = streamState.mVolumes[streamState.mIndex];
+ index = streamState.mIndex;
} else {
- volume = streamState.mVolumes[0];
+ index = 0;
}
- AudioSystem.setVolume(streamType, volume);
+ setStreamVolumeIndex(streamType, index);
}
setRingerMode(mRingerMode);
mMediaServerOk = true;
@@ -1451,28 +1220,144 @@ public class AudioService extends IAudioService.Stub {
}
private class SettingsObserver extends ContentObserver {
-
+
SettingsObserver() {
super(new Handler());
mContentResolver.registerContentObserver(Settings.System.getUriFor(
Settings.System.MODE_RINGER_STREAMS_AFFECTED), false, this);
+ mContentResolver.registerContentObserver(Settings.System.getUriFor(
+ Settings.System.NOTIFICATIONS_USE_RING_VOLUME), false, this);
}
@Override
public void onChange(boolean selfChange) {
super.onChange(selfChange);
-
- mRingerModeAffectedStreams = Settings.System.getInt(mContentResolver,
- Settings.System.MODE_RINGER_STREAMS_AFFECTED,
- 0);
+ synchronized (mSettingsLock) {
+ int ringerModeAffectedStreams = Settings.System.getInt(mContentResolver,
+ Settings.System.MODE_RINGER_STREAMS_AFFECTED,
+ 0);
+ if (ringerModeAffectedStreams != mRingerModeAffectedStreams) {
+ /*
+ * Ensure all stream types that should be affected by ringer mode
+ * are in the proper state.
+ */
+ mRingerModeAffectedStreams = ringerModeAffectedStreams;
+ setRingerModeInt(getRingerMode(), false);
+ }
- /*
- * Ensure all stream types that should be affected by ringer mode
- * are in the proper state.
- */
- setRingerModeInt(getRingerMode(), false);
+ int notificationsUseRingVolume = Settings.System.getInt(mContentResolver,
+ Settings.System.NOTIFICATIONS_USE_RING_VOLUME,
+ 1);
+ if (notificationsUseRingVolume != mNotificationsUseRingVolume) {
+ mNotificationsUseRingVolume = notificationsUseRingVolume;
+ if (mNotificationsUseRingVolume == 1) {
+ STREAM_VOLUME_ALIAS[AudioSystem.STREAM_NOTIFICATION] = AudioSystem.STREAM_RING;
+ } else {
+ STREAM_VOLUME_ALIAS[AudioSystem.STREAM_NOTIFICATION] = AudioSystem.STREAM_NOTIFICATION;
+ // Persist notification volume volume as it was not persisted while aliased to ring volume
+ sendMsg(mAudioHandler, MSG_PERSIST_VOLUME, AudioSystem.STREAM_NOTIFICATION,
+ SENDMSG_REPLACE, 0, 0, mStreamStates[AudioSystem.STREAM_NOTIFICATION], PERSIST_DELAY);
+ }
+ }
+ }
}
-
}
-
+
+ /**
+ * Receiver for misc intent broadcasts the Phone app cares about.
+ */
+ private class AudioServiceBroadcastReceiver extends BroadcastReceiver {
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ String action = intent.getAction();
+
+ if (action.equals(BluetoothA2dp.SINK_STATE_CHANGED_ACTION)) {
+ int state = intent.getIntExtra(BluetoothA2dp.SINK_STATE,
+ BluetoothA2dp.STATE_DISCONNECTED);
+ String address = intent.getStringExtra(BluetoothIntent.ADDRESS);
+ if (state == BluetoothA2dp.STATE_DISCONNECTED) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ address);
+ } else if (state == BluetoothA2dp.STATE_CONNECTED){
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ address);
+ }
+ } else if (action.equals(BluetoothIntent.HEADSET_AUDIO_STATE_CHANGED_ACTION)) {
+ int state = intent.getIntExtra(BluetoothIntent.HEADSET_AUDIO_STATE,
+ BluetoothHeadset.STATE_ERROR);
+ String address = intent.getStringExtra(BluetoothIntent.ADDRESS);
+ if (state == BluetoothHeadset.AUDIO_STATE_DISCONNECTED) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_SCO,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ address);
+ } else if (state == BluetoothHeadset.AUDIO_STATE_CONNECTED) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_BLUETOOTH_SCO,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ address);
+ }
+ } else if (action.equals(Intent.ACTION_HEADSET_PLUG)) {
+ int state = intent.getIntExtra("state", 0);
+ if ((state & BIT_HEADSET) == 0 &&
+ (mHeadsetState & BIT_HEADSET) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADSET,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_HEADSET) != 0 &&
+ (mHeadsetState & BIT_HEADSET) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADSET,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_HEADSET_NO_MIC) == 0 &&
+ (mHeadsetState & BIT_HEADSET_NO_MIC) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADPHONE,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_HEADSET_NO_MIC) != 0 &&
+ (mHeadsetState & BIT_HEADSET_NO_MIC) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_WIRED_HEADPHONE,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_TTY) == 0 &&
+ (mHeadsetState & BIT_TTY) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_TTY,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_TTY) != 0 &&
+ (mHeadsetState & BIT_TTY) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_TTY,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_FM_HEADSET) == 0 &&
+ (mHeadsetState & BIT_FM_HEADSET) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_HEADPHONE,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_FM_HEADSET) != 0 &&
+ (mHeadsetState & BIT_FM_HEADSET) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_HEADPHONE,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ if ((state & BIT_FM_SPEAKER) == 0 &&
+ (mHeadsetState & BIT_FM_SPEAKER) != 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_SPEAKER,
+ AudioSystem.DEVICE_STATE_UNAVAILABLE,
+ "");
+ } else if ((state & BIT_FM_SPEAKER) != 0 &&
+ (mHeadsetState & BIT_FM_SPEAKER) == 0) {
+ AudioSystem.setDeviceConnectionState(AudioSystem.DEVICE_OUT_FM_SPEAKER,
+ AudioSystem.DEVICE_STATE_AVAILABLE,
+ "");
+ }
+ mHeadsetState = state;
+ }
+ }
+ }
+
+
}
diff --git a/media/java/android/media/AudioSystem.java b/media/java/android/media/AudioSystem.java
index 5917ab9..d587f65 100644
--- a/media/java/android/media/AudioSystem.java
+++ b/media/java/android/media/AudioSystem.java
@@ -45,38 +45,21 @@ public class AudioSystem
public static final int STREAM_NOTIFICATION = 5;
/* @hide The audio stream for phone calls when connected on bluetooth */
public static final int STREAM_BLUETOOTH_SCO = 6;
+ /* @hide The audio stream for enforced system sounds in certain countries (e.g camera in Japan) */
+ public static final int STREAM_SYSTEM_ENFORCED = 7;
+ /* @hide The audio stream for DTMF tones */
+ public static final int STREAM_DTMF = 8;
+ /* @hide The audio stream for text to speech (TTS) */
+ public static final int STREAM_TTS = 9;
/**
* @deprecated Use {@link #numStreamTypes() instead}
*/
public static final int NUM_STREAMS = 5;
// Expose only the getter method publicly so we can change it in the future
- private static final int NUM_STREAM_TYPES = 7;
+ private static final int NUM_STREAM_TYPES = 10;
public static final int getNumStreamTypes() { return NUM_STREAM_TYPES; }
- /* max and min volume levels */
- /* Maximum volume setting, for use with setVolume(int,int) */
- public static final int MAX_VOLUME = 100;
- /* Minimum volume setting, for use with setVolume(int,int) */
- public static final int MIN_VOLUME = 0;
-
- /*
- * Sets the volume of a specified audio stream.
- *
- * param type the stream type to set the volume of (e.g. STREAM_MUSIC)
- * param volume the volume level to set (0-100)
- * return command completion status see AUDIO_STATUS_OK, see AUDIO_STATUS_ERROR
- */
- public static native int setVolume(int type, int volume);
-
- /*
- * Returns the volume of a specified audio stream.
- *
- * param type the stream type to get the volume of (e.g. STREAM_MUSIC)
- * return the current volume (0-100)
- */
- public static native int getVolume(int type);
-
/*
* Sets the microphone mute on or off.
*
@@ -101,17 +84,22 @@ public class AudioSystem
* it can route the audio appropriately.
* return command completion status see AUDIO_STATUS_OK, see AUDIO_STATUS_ERROR
*/
- public static native int setMode(int mode);
-
+ /** @deprecated use {@link #setPhoneState(int)} */
+ public static int setMode(int mode) {
+ return AUDIO_STATUS_ERROR;
+ }
/*
* Returns the current audio mode.
*
* return the current audio mode (NORMAL, RINGTONE, or IN_CALL).
* Returns the current current audio state from the HAL.
*/
- public static native int getMode();
+ /** @deprecated */
+ public static int getMode() {
+ return MODE_INVALID;
+ }
- /* modes for setMode/getMode/setRoute/getRoute */
+ /* modes for setPhoneState */
public static final int MODE_INVALID = -2;
public static final int MODE_CURRENT = -1;
public static final int MODE_NORMAL = 0;
@@ -121,15 +109,20 @@ public class AudioSystem
/* Routing bits for setRouting/getRouting API */
- public static final int ROUTE_EARPIECE = (1 << 0);
- public static final int ROUTE_SPEAKER = (1 << 1);
-
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_EARPIECE = (1 << 0);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_SPEAKER = (1 << 1);
/** @deprecated use {@link #ROUTE_BLUETOOTH_SCO} */
@Deprecated public static final int ROUTE_BLUETOOTH = (1 << 2);
- public static final int ROUTE_BLUETOOTH_SCO = (1 << 2);
- public static final int ROUTE_HEADSET = (1 << 3);
- public static final int ROUTE_BLUETOOTH_A2DP = (1 << 4);
- public static final int ROUTE_ALL = 0xFFFFFFFF;
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_BLUETOOTH_SCO = (1 << 2);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_HEADSET = (1 << 3);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_BLUETOOTH_A2DP = (1 << 4);
+ /** @deprecated */
+ @Deprecated public static final int ROUTE_ALL = 0xFFFFFFFF;
/*
* Sets the audio routing for a specified mode
@@ -141,7 +134,10 @@ public class AudioSystem
* ROUTE_xxx types. Unset bits indicate the route should be left unchanged
* return command completion status see AUDIO_STATUS_OK, see AUDIO_STATUS_ERROR
*/
- public static native int setRouting(int mode, int routes, int mask);
+ /** @deprecated use {@link #setDeviceConnectionState(int,int,String)} */
+ public static int setRouting(int mode, int routes, int mask) {
+ return AUDIO_STATUS_ERROR;
+ }
/*
* Returns the current audio routing bit vector for a specified mode.
@@ -150,7 +146,10 @@ public class AudioSystem
* return an audio route bit vector that can be compared with ROUTE_xxx
* bits
*/
- public static native int getRouting(int mode);
+ /** @deprecated use {@link #getDeviceConnectionState(int,String)} */
+ public static int getRouting(int mode) {
+ return 0;
+ }
/*
* Checks whether any music is active.
@@ -160,17 +159,23 @@ public class AudioSystem
public static native boolean isMusicActive();
/*
- * Sets a generic audio configuration parameter. The use of these parameters
+ * Sets a group generic audio configuration parameters. The use of these parameters
* are platform dependant, see libaudio
*
- * ** Temporary interface - DO NOT USE
- *
- * TODO: Replace with a more generic key:value get/set mechanism
+ * param keyValuePairs list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
+ */
+ public static native int setParameters(String keyValuePairs);
+
+ /*
+ * Gets a group generic audio configuration parameters. The use of these parameters
+ * are platform dependant, see libaudio
*
- * param key name of parameter to set. Must not be null.
- * param value value of parameter. Must not be null.
+ * param keys list of parameters
+ * return value: list of parameters key value pairs in the form:
+ * key1=value1;key2=value2;...
*/
- public static native void setParameter(String key, String value);
+ public static native String getParameters(String keys);
/*
private final static String TAG = "audio";
@@ -220,4 +225,68 @@ public class AudioSystem
mErrorCallback.onError(error);
}
}
+
+ /*
+ * AudioPolicyService methods
+ */
+
+ // output devices
+ public static final int DEVICE_OUT_EARPIECE = 0x1;
+ public static final int DEVICE_OUT_SPEAKER = 0x2;
+ public static final int DEVICE_OUT_WIRED_HEADSET = 0x4;
+ public static final int DEVICE_OUT_WIRED_HEADPHONE = 0x8;
+ public static final int DEVICE_OUT_BLUETOOTH_SCO = 0x10;
+ public static final int DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20;
+ public static final int DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40;
+ public static final int DEVICE_OUT_BLUETOOTH_A2DP = 0x80;
+ public static final int DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100;
+ public static final int DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200;
+ public static final int DEVICE_OUT_AUX_DIGITAL = 0x400;
+ public static final int DEVICE_OUT_FM_HEADPHONE = 0x800;
+ public static final int DEVICE_OUT_FM_SPEAKER = 0x1000;
+ public static final int DEVICE_OUT_TTY = 0x2000;
+ public static final int DEVICE_OUT_DEFAULT = 0x8000;
+ // input devices
+ public static final int DEVICE_IN_COMMUNICATION = 0x10000;
+ public static final int DEVICE_IN_AMBIENT = 0x20000;
+ public static final int DEVICE_IN_BUILTIN_MIC1 = 0x40000;
+ public static final int DEVICE_IN_BUILTIN_MIC2 = 0x80000;
+ public static final int DEVICE_IN_MIC_ARRAY = 0x100000;
+ public static final int DEVICE_IN_BLUETOOTH_SCO_HEADSET = 0x200000;
+ public static final int DEVICE_IN_WIRED_HEADSET = 0x400000;
+ public static final int DEVICE_IN_AUX_DIGITAL = 0x800000;
+ public static final int DEVICE_IN_DEFAULT = 0x80000000;
+
+ // device states
+ public static final int DEVICE_STATE_UNAVAILABLE = 0;
+ public static final int DEVICE_STATE_AVAILABLE = 1;
+
+ // phone state
+ public static final int PHONE_STATE_OFFCALL = 0;
+ public static final int PHONE_STATE_RINGING = 1;
+ public static final int PHONE_STATE_INCALL = 2;
+
+ // config for setForceUse
+ public static final int FORCE_NONE = 0;
+ public static final int FORCE_SPEAKER = 1;
+ public static final int FORCE_HEADPHONES = 2;
+ public static final int FORCE_BT_SCO = 3;
+ public static final int FORCE_BT_A2DP = 4;
+ public static final int FORCE_WIRED_ACCESSORY = 5;
+ public static final int FORCE_DEFAULT = FORCE_NONE;
+
+ // usage for serForceUse
+ public static final int FOR_COMMUNICATION = 0;
+ public static final int FOR_MEDIA = 1;
+ public static final int FOR_RECORD = 2;
+
+ public static native int setDeviceConnectionState(int device, int state, String device_address);
+ public static native int getDeviceConnectionState(int device, String device_address);
+ public static native int setPhoneState(int state);
+ public static native int setRingerMode(int mode, int mask);
+ public static native int setForceUse(int usage, int config);
+ public static native int getForceUse(int usage);
+ public static native int initStreamVolume(int stream, int indexMin, int indexMax);
+ public static native int setStreamVolumeIndex(int stream, int index);
+ public static native int getStreamVolumeIndex(int stream);
}
diff --git a/media/java/android/media/AudioTrack.java b/media/java/android/media/AudioTrack.java
index 5f1be9d..7fbe965 100644
--- a/media/java/android/media/AudioTrack.java
+++ b/media/java/android/media/AudioTrack.java
@@ -120,7 +120,7 @@ public class AudioTrack
public static final int ERROR_INVALID_OPERATION = -3;
private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16;
- private static final int ERROR_NATIVESETUP_INVALIDCHANNELCOUNT = -17;
+ private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17;
private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18;
private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19;
private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20;
@@ -198,7 +198,7 @@ public class AudioTrack
/**
* The current audio channel configuration.
*/
- private int mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
/**
* The encoding of the audio samples.
* @see AudioFormat#ENCODING_PCM_8BIT
@@ -235,8 +235,8 @@ public class AudioTrack
* @param sampleRateInHz the sample rate expressed in Hertz. Examples of rates are (but
* not limited to) 44100, 22050 and 11025.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_OUT_MONO} and
+ * {@link AudioFormat#CHANNEL_OUT_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
@@ -298,7 +298,8 @@ public class AudioTrack
&& (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
&& (streamType != AudioManager.STREAM_VOICE_CALL)
&& (streamType != AudioManager.STREAM_NOTIFICATION)
- && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)) {
+ && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
+ && (streamType != AudioManager.STREAM_DTMF)) {
throw (new IllegalArgumentException("Invalid stream type."));
} else {
mStreamType = streamType;
@@ -316,18 +317,18 @@ public class AudioTrack
//--------------
// channel config
switch (channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_DEFAULT:
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_OUT_DEFAULT:
+ case AudioFormat.CHANNEL_OUT_MONO:
mChannelCount = 1;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_OUT_STEREO:
mChannelCount = 2;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ mChannelConfiguration = AudioFormat.CHANNEL_OUT_STEREO;
break;
default:
mChannelCount = 0;
- mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_INVALID;
+ mChannelConfiguration = AudioFormat.CHANNEL_INVALID;
throw(new IllegalArgumentException("Unsupported channel configuration."));
}
@@ -452,8 +453,8 @@ public class AudioTrack
/**
* Returns the configured channel configuration.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO}
- * and {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}.
+ * See {@link AudioFormat#CHANNEL_OUT_MONO}
+ * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
*/
public int getChannelConfiguration() {
return mChannelConfiguration;
@@ -531,8 +532,8 @@ public class AudioTrack
* the expected frequency at which the buffer will be refilled with additional data to play.
* @param sampleRateInHz the sample rate expressed in Hertz.
* @param channelConfig describes the configuration of the audio channels.
- * See {@link AudioFormat#CHANNEL_CONFIGURATION_MONO} and
- * {@link AudioFormat#CHANNEL_CONFIGURATION_STEREO}
+ * See {@link AudioFormat#CHANNEL_OUT_MONO} and
+ * {@link AudioFormat#CHANNEL_OUT_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
@@ -544,10 +545,10 @@ public class AudioTrack
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) {
- case AudioFormat.CHANNEL_CONFIGURATION_MONO:
+ case AudioFormat.CHANNEL_OUT_MONO:
channelCount = 1;
break;
- case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
+ case AudioFormat.CHANNEL_OUT_STEREO:
channelCount = 2;
break;
default:
diff --git a/media/java/android/media/IAudioService.aidl b/media/java/android/media/IAudioService.aidl
index 9a8264f..bb4252b 100644
--- a/media/java/android/media/IAudioService.aidl
+++ b/media/java/android/media/IAudioService.aidl
@@ -29,9 +29,9 @@ interface IAudioService {
void setStreamVolume(int streamType, int index, int flags);
- void setStreamSolo(int streamType, boolean state, IBinder cb);
+ void setStreamSolo(int streamType, boolean state, IBinder cb);
- void setStreamMute(int streamType, boolean state, IBinder cb);
+ void setStreamMute(int streamType, boolean state, IBinder cb);
int getStreamVolume(int streamType);
@@ -46,23 +46,11 @@ interface IAudioService {
int getVibrateSetting(int vibrateType);
boolean shouldVibrate(int vibrateType);
-
- void setMicrophoneMute(boolean on);
-
- boolean isMicrophoneMute();
void setMode(int mode);
int getMode();
- void setRouting(int mode, int routes, int mask);
-
- int getRouting(int mode);
-
- boolean isMusicActive();
-
- void setParameter(String key, String value);
-
oneway void playSoundEffect(int effectType);
oneway void playSoundEffectVolume(int effectType, float volume);
diff --git a/media/java/android/media/JetPlayer.java b/media/java/android/media/JetPlayer.java
index 4fb0ead..2263605 100644
--- a/media/java/android/media/JetPlayer.java
+++ b/media/java/android/media/JetPlayer.java
@@ -89,7 +89,7 @@ public class JetPlayer
// Jet rendering audio parameters
private static final int JET_OUTPUT_RATE = 22050; // _SAMPLE_RATE_22050 in Android.mk
private static final int JET_OUTPUT_CHANNEL_CONFIG =
- AudioFormat.CHANNEL_CONFIGURATION_STEREO; // NUM_OUTPUT_CHANNELS=2 in Android.mk
+ AudioFormat.CHANNEL_OUT_STEREO; // NUM_OUTPUT_CHANNELS=2 in Android.mk
//--------------------------------------------
diff --git a/media/java/android/media/ToneGenerator.java b/media/java/android/media/ToneGenerator.java
index e5ee9a3..c60a1ac 100644
--- a/media/java/android/media/ToneGenerator.java
+++ b/media/java/android/media/ToneGenerator.java
@@ -724,9 +724,9 @@ public class ToneGenerator
public static final int TONE_CDMA_SIGNAL_OFF = 98;
/** Maximum volume, for use with {@link #ToneGenerator(int,int)} */
- public static final int MAX_VOLUME = AudioSystem.MAX_VOLUME;
+ public static final int MAX_VOLUME = 100;
/** Minimum volume setting, for use with {@link #ToneGenerator(int,int)} */
- public static final int MIN_VOLUME = AudioSystem.MIN_VOLUME;
+ public static final int MIN_VOLUME = 0;
/**
diff --git a/media/jni/soundpool/SoundPool.cpp b/media/jni/soundpool/SoundPool.cpp
index 1fdecdd..0d07abe 100644
--- a/media/jni/soundpool/SoundPool.cpp
+++ b/media/jni/soundpool/SoundPool.cpp
@@ -527,13 +527,14 @@ void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftV
// wrong audio audio buffer size (mAudioBufferSize)
unsigned long toggle = mToggle ^ 1;
void *userData = (void *)((unsigned long)this | toggle);
+ uint32_t channels = (numChannels == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO;
#ifdef USE_SHARED_MEM_BUFFER
newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
- numChannels, sample->getIMemory(), 0, callback, userData);
+ channels, sample->getIMemory(), 0, callback, userData);
#else
newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
- numChannels, frameCount, 0, callback, userData, bufferFrames);
+ channels, frameCount, 0, callback, userData, bufferFrames);
#endif
if (newTrack->initCheck() != NO_ERROR) {
LOGE("Error creating AudioTrack");
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 07c81f7..9d442c3 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -20,7 +20,8 @@ LOCAL_SRC_FILES:= \
mediametadataretriever.cpp \
ToneGenerator.cpp \
JetPlayer.cpp \
- IOMX.cpp
+ IOMX.cpp \
+ IAudioPolicyService.cpp
LOCAL_SHARED_LIBRARIES := \
libui libcutils libutils libbinder libsonivox
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 0a6f4f7..5e35564 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -28,6 +28,7 @@
#include <media/AudioSystem.h>
#include <media/AudioRecord.h>
+#include <media/mediarecorder.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
@@ -45,7 +46,7 @@ namespace android {
// ---------------------------------------------------------------------------
AudioRecord::AudioRecord()
- : mStatus(NO_INIT)
+ : mStatus(NO_INIT), mInput(0)
{
}
@@ -53,15 +54,15 @@ AudioRecord::AudioRecord(
int inputSource,
uint32_t sampleRate,
int format,
- int channelCount,
+ uint32_t channels,
int frameCount,
uint32_t flags,
callback_t cbf,
void* user,
int notificationFrames)
- : mStatus(NO_INIT)
+ : mStatus(NO_INIT), mInput(0)
{
- mStatus = set(inputSource, sampleRate, format, channelCount,
+ mStatus = set(inputSource, sampleRate, format, channels,
frameCount, flags, cbf, user, notificationFrames);
}
@@ -78,6 +79,7 @@ AudioRecord::~AudioRecord()
}
mAudioRecord.clear();
IPCThreadState::self()->flushCommands();
+ AudioSystem::releaseInput(mInput);
}
}
@@ -85,7 +87,7 @@ status_t AudioRecord::set(
int inputSource,
uint32_t sampleRate,
int format,
- int channelCount,
+ uint32_t channels,
int frameCount,
uint32_t flags,
callback_t cbf,
@@ -94,7 +96,7 @@ status_t AudioRecord::set(
bool threadCanCallJava)
{
- LOGV("set(): sampleRate %d, channelCount %d, frameCount %d",sampleRate, channelCount, frameCount);
+ LOGV("set(): sampleRate %d, channels %d, frameCount %d",sampleRate, channels, frameCount);
if (mAudioRecord != 0) {
return INVALID_OPERATION;
}
@@ -104,8 +106,8 @@ status_t AudioRecord::set(
return NO_INIT;
}
- if (inputSource == DEFAULT_INPUT) {
- inputSource = MIC_INPUT;
+ if (inputSource == AUDIO_SOURCE_DEFAULT) {
+ inputSource = AUDIO_SOURCE_MIC;
}
if (sampleRate == 0) {
@@ -115,15 +117,21 @@ status_t AudioRecord::set(
if (format == 0) {
format = AudioSystem::PCM_16_BIT;
}
- if (channelCount == 0) {
- channelCount = 1;
+ // validate parameters
+ if (!AudioSystem::isValidFormat(format)) {
+ LOGE("Invalid format");
+ return BAD_VALUE;
}
- // validate parameters
- if (format != AudioSystem::PCM_16_BIT) {
+ if (!AudioSystem::isInputChannel(channels)) {
return BAD_VALUE;
}
- if (channelCount != 1 && channelCount != 2) {
+ int channelCount = AudioSystem::popCount(channels);
+
+ mInput = AudioSystem::getInput(inputSource,
+ sampleRate, format, channels, (AudioSystem::audio_in_acoustics)flags);
+ if (mInput == 0) {
+ LOGE("Could not get audio output for stream type %d", inputSource);
return BAD_VALUE;
}
@@ -132,14 +140,22 @@ status_t AudioRecord::set(
if (AudioSystem::getInputBufferSize(sampleRate, format, channelCount, &inputBuffSizeInBytes)
!= NO_ERROR) {
LOGE("AudioSystem could not query the input buffer size.");
- return NO_INIT;
+ return NO_INIT;
}
+
if (inputBuffSizeInBytes == 0) {
LOGE("Recording parameters are not supported: sampleRate %d, channelCount %d, format %d",
sampleRate, channelCount, format);
return BAD_VALUE;
}
+
int frameSizeInBytes = channelCount * (format == AudioSystem::PCM_16_BIT ? 2 : 1);
+ if (AudioSystem::isLinearPCM(format)) {
+ frameSizeInBytes = channelCount * (format == AudioSystem::PCM_16_BIT ? sizeof(int16_t) : sizeof(int8_t));
+ } else {
+ frameSizeInBytes = sizeof(int8_t);
+ }
+
// We use 2* size of input buffer for ping pong use of record buffer.
int minFrameCount = 2 * inputBuffSizeInBytes / frameSizeInBytes;
@@ -157,11 +173,11 @@ status_t AudioRecord::set(
// open record channel
status_t status;
- sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), inputSource,
+ sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), mInput,
sampleRate, format,
channelCount,
frameCount,
- ((uint16_t)flags) << 16,
+ ((uint16_t)flags) << 16,
&status);
if (record == 0) {
LOGE("AudioFlinger could not create record track, status: %d", status);
@@ -188,7 +204,7 @@ status_t AudioRecord::set(
mFormat = format;
// Update buffer size in case it has been limited by AudioFlinger during track creation
mFrameCount = mCblk->frameCount;
- mChannelCount = channelCount;
+ mChannelCount = (uint8_t)channelCount;
mActive = 0;
mCbf = cbf;
mNotificationFrames = notificationFrames;
@@ -234,7 +250,11 @@ uint32_t AudioRecord::frameCount() const
int AudioRecord::frameSize() const
{
- return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ if (AudioSystem::isLinearPCM(mFormat)) {
+ return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ } else {
+ return sizeof(uint8_t);
+ }
}
int AudioRecord::inputSource() const
@@ -262,15 +282,18 @@ status_t AudioRecord::start()
}
if (android_atomic_or(1, &mActive) == 0) {
- mNewPosition = mCblk->user + mUpdatePeriod;
- mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
- mCblk->waitTimeMs = 0;
- if (t != 0) {
- t->run("ClientRecordThread", THREAD_PRIORITY_AUDIO_CLIENT);
- } else {
- setpriority(PRIO_PROCESS, 0, THREAD_PRIORITY_AUDIO_CLIENT);
+ ret = AudioSystem::startInput(mInput);
+ if (ret == NO_ERROR) {
+ mNewPosition = mCblk->user + mUpdatePeriod;
+ mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
+ mCblk->waitTimeMs = 0;
+ if (t != 0) {
+ t->run("ClientRecordThread", THREAD_PRIORITY_AUDIO_CLIENT);
+ } else {
+ setpriority(PRIO_PROCESS, 0, THREAD_PRIORITY_AUDIO_CLIENT);
+ }
+ ret = mAudioRecord->start();
}
- ret = mAudioRecord->start();
}
if (t != 0) {
@@ -301,6 +324,7 @@ status_t AudioRecord::stop()
} else {
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
}
+ AudioSystem::stopInput(mInput);
}
if (t != 0) {
@@ -421,7 +445,7 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
"this shouldn't happen (user=%08x, server=%08x)", cblk->user, cblk->server);
cblk->waitTimeMs = 0;
-
+
if (framesReq > framesReady) {
framesReq = framesReady;
}
@@ -437,7 +461,7 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
audioBuffer->channelCount= mChannelCount;
audioBuffer->format = mFormat;
audioBuffer->frameCount = framesReq;
- audioBuffer->size = framesReq*mChannelCount*sizeof(int16_t);
+ audioBuffer->size = framesReq*cblk->frameSize;
audioBuffer->raw = (int8_t*)cblk->buffer(u);
active = mActive;
return active ? status_t(NO_ERROR) : status_t(STOPPED);
@@ -468,7 +492,7 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize)
do {
- audioBuffer.frameCount = userSize/mChannelCount/sizeof(int16_t);
+ audioBuffer.frameCount = userSize/frameSize();
// Calling obtainBuffer() with a negative wait count causes
// an (almost) infinite wait time.
@@ -519,8 +543,8 @@ bool AudioRecord::processAudioBuffer(const sp<ClientRecordThread>& thread)
do {
audioBuffer.frameCount = frames;
- // Calling obtainBuffer() with a wait count of 1
- // limits wait time to WAIT_PERIOD_MS. This prevents from being
+ // Calling obtainBuffer() with a wait count of 1
+ // limits wait time to WAIT_PERIOD_MS. This prevents from being
// stuck here not being able to handle timed events (position, markers).
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
@@ -548,14 +572,14 @@ bool AudioRecord::processAudioBuffer(const sp<ClientRecordThread>& thread)
if (readSize > reqSize) readSize = reqSize;
audioBuffer.size = readSize;
- audioBuffer.frameCount = readSize/mChannelCount/sizeof(int16_t);
+ audioBuffer.frameCount = readSize/frameSize();
frames -= audioBuffer.frameCount;
releaseBuffer(&audioBuffer);
} while (frames);
-
+
// Manage overrun callback
if (mActive && (mCblk->framesAvailable_l() == 0)) {
LOGV("Overrun user: %x, server: %x, flowControlFlag %d", mCblk->user, mCblk->server, mCblk->flowControlFlag);
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 86d0542..1fc1024 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -20,8 +20,18 @@
#include <utils/Log.h>
#include <binder/IServiceManager.h>
#include <media/AudioSystem.h>
+#include <media/IAudioPolicyService.h>
#include <math.h>
+// ----------------------------------------------------------------------------
+// the sim build doesn't have gettid
+
+#ifndef HAVE_GETTID
+# define gettid getpid
+#endif
+
+// ----------------------------------------------------------------------------
+
namespace android {
// client singleton for AudioFlinger binder interface
@@ -30,10 +40,9 @@ sp<IAudioFlinger> AudioSystem::gAudioFlinger;
sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
// Cached values
-int AudioSystem::gOutSamplingRate[NUM_AUDIO_OUTPUT_TYPES];
-int AudioSystem::gOutFrameCount[NUM_AUDIO_OUTPUT_TYPES];
-uint32_t AudioSystem::gOutLatency[NUM_AUDIO_OUTPUT_TYPES];
-bool AudioSystem::gA2dpEnabled;
+DefaultKeyedVector<int, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
+DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
+
// Cached values for recording queries
uint32_t AudioSystem::gPrevInSamplingRate = 16000;
int AudioSystem::gPrevInFormat = AudioSystem::PCM_16_BIT;
@@ -65,42 +74,10 @@ const sp<IAudioFlinger>& AudioSystem::get_audio_flinger()
binder->linkToDeath(gAudioFlingerClient);
gAudioFlinger = interface_cast<IAudioFlinger>(binder);
gAudioFlinger->registerClient(gAudioFlingerClient);
- // Cache frequently accessed parameters
- for (int output = 0; output < NUM_AUDIO_OUTPUT_TYPES; output++) {
- gOutFrameCount[output] = (int)gAudioFlinger->frameCount(output);
- gOutSamplingRate[output] = (int)gAudioFlinger->sampleRate(output);
- gOutLatency[output] = gAudioFlinger->latency(output);
- }
- gA2dpEnabled = gAudioFlinger->isA2dpEnabled();
}
LOGE_IF(gAudioFlinger==0, "no AudioFlinger!?");
- return gAudioFlinger;
-}
-// routing helper functions
-status_t AudioSystem::speakerphone(bool state) {
- uint32_t routes = state ? ROUTE_SPEAKER : ROUTE_EARPIECE;
- return setRouting(MODE_IN_CALL, routes, ROUTE_ALL);
-}
-
-status_t AudioSystem::isSpeakerphoneOn(bool* state) {
- uint32_t routes = 0;
- status_t s = getRouting(MODE_IN_CALL, &routes);
- *state = !!(routes & ROUTE_SPEAKER);
- return s;
-}
-
-status_t AudioSystem::bluetoothSco(bool state) {
- uint32_t mask = ROUTE_BLUETOOTH_SCO;
- uint32_t routes = state ? mask : ROUTE_EARPIECE;
- return setRouting(MODE_IN_CALL, routes, ROUTE_ALL);
-}
-
-status_t AudioSystem::isBluetoothScoOn(bool* state) {
- uint32_t routes = 0;
- status_t s = getRouting(MODE_IN_CALL, &routes);
- *state = !!(routes & ROUTE_BLUETOOTH_SCO);
- return s;
+ return gAudioFlinger;
}
status_t AudioSystem::muteMicrophone(bool state) {
@@ -148,12 +125,12 @@ status_t AudioSystem::getMasterMute(bool* mute)
return NO_ERROR;
}
-status_t AudioSystem::setStreamVolume(int stream, float value)
+status_t AudioSystem::setStreamVolume(int stream, float value, void *output)
{
if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- af->setStreamVolume(stream, value);
+ af->setStreamVolume(stream, value, output);
return NO_ERROR;
}
@@ -166,12 +143,12 @@ status_t AudioSystem::setStreamMute(int stream, bool mute)
return NO_ERROR;
}
-status_t AudioSystem::getStreamVolume(int stream, float* volume)
+status_t AudioSystem::getStreamVolume(int stream, float* volume, void *output)
{
if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- *volume = af->streamVolume(stream);
+ *volume = af->streamVolume(stream, output);
return NO_ERROR;
}
@@ -192,43 +169,28 @@ status_t AudioSystem::setMode(int mode)
return af->setMode(mode);
}
-status_t AudioSystem::getMode(int* mode)
-{
+
+status_t AudioSystem::isMusicActive(bool* state) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- *mode = af->getMode();
+ *state = af->isMusicActive();
return NO_ERROR;
}
-status_t AudioSystem::setRouting(int mode, uint32_t routes, uint32_t mask)
-{
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
- return af->setRouting(mode, routes, mask);
-}
-status_t AudioSystem::getRouting(int mode, uint32_t* routes)
-{
+status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- uint32_t r = af->getRouting(mode);
- *routes = r;
- return NO_ERROR;
+ return af->setParameters(ioHandle, keyValuePairs);
}
-status_t AudioSystem::isMusicActive(bool* state) {
+String8 AudioSystem::getParameters(audio_io_handle_t ioHandle, const String8& keys) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
- *state = af->isMusicActive();
- return NO_ERROR;
-}
+ String8 result = String8("");
+ if (af == 0) return result;
-// Temporary interface, do not use
-// TODO: Replace with a more generic key:value get/set mechanism
-status_t AudioSystem::setParameter(const char* key, const char* value) {
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
- return af->setParameter(key, value);
+ result = af->getParameters(ioHandle, keys);
+ return result;
}
// convert volume steps to natural log scale
@@ -257,55 +219,108 @@ int AudioSystem::logToLinear(float volume)
status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType)
{
- int output = getOutput(streamType);
-
- if (output == NUM_AUDIO_OUTPUT_TYPES) return PERMISSION_DENIED;
+ OutputDescriptor *outputDesc;
+ audio_io_handle_t output;
+
+ if (streamType == DEFAULT) {
+ streamType = MUSIC;
+ }
+
+ output = getOutput((stream_type)streamType);
+ if (output == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ gLock.lock();
+ outputDesc = AudioSystem::gOutputs.valueFor(output);
+ if (outputDesc == 0) {
+ LOGV("getOutputSamplingRate() no output descriptor for output %p in gOutputs", output);
+ gLock.unlock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *samplingRate = af->sampleRate(output);
+ } else {
+ LOGV("getOutputSamplingRate() reading from output desc");
+ *samplingRate = outputDesc->samplingRate;
+ gLock.unlock();
+ }
+
+ LOGV("getOutputSamplingRate() streamType %d, output %p, sampling rate %d", streamType, output, *samplingRate);
- // gOutSamplingRate[] is updated by getOutput() which calls get_audio_flinger()
- LOGV("getOutputSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, gOutSamplingRate[output]);
-
- *samplingRate = gOutSamplingRate[output];
-
return NO_ERROR;
}
status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType)
{
- int output = getOutput(streamType);
+ OutputDescriptor *outputDesc;
+ audio_io_handle_t output;
+
+ if (streamType == DEFAULT) {
+ streamType = MUSIC;
+ }
- if (output == NUM_AUDIO_OUTPUT_TYPES) return PERMISSION_DENIED;
+ output = getOutput((stream_type)streamType);
+ if (output == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ gLock.lock();
+ outputDesc = AudioSystem::gOutputs.valueFor(output);
+ if (outputDesc == 0) {
+ gLock.unlock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *frameCount = af->frameCount(output);
+ } else {
+ *frameCount = outputDesc->frameCount;
+ gLock.unlock();
+ }
- // gOutFrameCount[] is updated by getOutput() which calls get_audio_flinger()
- LOGV("getOutputFrameCount() streamType %d, output %d, frame count %d", streamType, output, gOutFrameCount[output]);
+ LOGV("getOutputFrameCount() streamType %d, output %p, frameCount %d", streamType, output, *frameCount);
- *frameCount = gOutFrameCount[output];
-
return NO_ERROR;
}
status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType)
{
- int output = getOutput(streamType);
+ OutputDescriptor *outputDesc;
+ audio_io_handle_t output;
+
+ if (streamType == DEFAULT) {
+ streamType = MUSIC;
+ }
- if (output == NUM_AUDIO_OUTPUT_TYPES) return PERMISSION_DENIED;
+ output = getOutput((stream_type)streamType);
+ if (output == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ gLock.lock();
+ outputDesc = AudioSystem::gOutputs.valueFor(output);
+ if (outputDesc == 0) {
+ gLock.unlock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *latency = af->latency(output);
+ } else {
+ *latency = outputDesc->latency;
+ gLock.unlock();
+ }
- // gOutLatency[] is updated by getOutput() which calls get_audio_flinger()
- LOGV("getOutputLatency() streamType %d, output %d, latency %d", streamType, output, gOutLatency[output]);
+ LOGV("getOutputLatency() streamType %d, output %p, latency %d", streamType, output, *latency);
- *latency = gOutLatency[output];
-
return NO_ERROR;
}
-status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
+status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
size_t* buffSize)
{
// Do we have a stale gInBufferSize or are we requesting the input buffer size for new values
- if ((gInBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
+ if ((gInBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
|| (channelCount != gPrevInChannelCount)) {
// save the request params
gPrevInSamplingRate = sampleRate;
- gPrevInFormat = format;
+ gPrevInFormat = format;
gPrevInChannelCount = channelCount;
gInBuffSize = 0;
@@ -314,24 +329,18 @@ status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int ch
return PERMISSION_DENIED;
}
gInBuffSize = af->getInputBufferSize(sampleRate, format, channelCount);
- }
+ }
*buffSize = gInBuffSize;
-
+
return NO_ERROR;
}
// ---------------------------------------------------------------------------
-void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
+void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
Mutex::Autolock _l(AudioSystem::gLock);
- AudioSystem::gAudioFlinger.clear();
- for (int output = 0; output < NUM_AUDIO_OUTPUT_TYPES; output++) {
- gOutFrameCount[output] = 0;
- gOutSamplingRate[output] = 0;
- gOutLatency[output] = 0;
- }
- AudioSystem::gInBuffSize = 0;
+ AudioSystem::gAudioFlinger.clear();
if (gAudioErrorCallback) {
gAudioErrorCallback(DEAD_OBJECT);
@@ -339,33 +348,83 @@ void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) {
LOGW("AudioFlinger server died!");
}
-void AudioSystem::AudioFlingerClient::a2dpEnabledChanged(bool enabled) {
- gA2dpEnabled = enabled;
- LOGV("AudioFlinger A2DP enabled status changed! %d", enabled);
-}
+void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, void *param1, void *param2) {
+ LOGV("ioConfigChanged() event %d", event);
+ audio_io_handle_t ioHandle = (audio_io_handle_t)param1;
+ OutputDescriptor *desc;
+ uint32_t stream;
+
+ if (param1 == 0) return;
-void AudioSystem::setErrorCallback(audio_error_callback cb) {
Mutex::Autolock _l(AudioSystem::gLock);
- gAudioErrorCallback = cb;
-}
-int AudioSystem::getOutput(int streamType)
-{
- // make sure that gA2dpEnabled is valid by calling get_audio_flinger() which in turn
- // will call gAudioFlinger->isA2dpEnabled()
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return NUM_AUDIO_OUTPUT_TYPES;
+ switch (event) {
+ case STREAM_CONFIG_CHANGED:
+ if (param2 == 0) break;
+ stream = *(uint32_t *)param2;
+ LOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %p", stream, ioHandle);
+ if (gStreamOutputMap.indexOfKey(stream) >= 0) {
+ gStreamOutputMap.replaceValueFor(stream, ioHandle);
+ }
+ break;
+ case OUTPUT_OPENED: {
+ if (gOutputs.indexOfKey(ioHandle) >= 0) {
+ LOGV("ioConfigChanged() opening already existing output! %p", ioHandle);
+ break;
+ }
+ if (param2 == 0) break;
+ desc = (OutputDescriptor *)param2;
+
+ OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
+ gOutputs.add(ioHandle, outputDesc);
+ LOGV("ioConfigChanged() new output samplingRate %d, format %d channels %d frameCount %d latency %d",
+ outputDesc->samplingRate, outputDesc->format, outputDesc->channels, outputDesc->frameCount, outputDesc->latency);
+ } break;
+ case OUTPUT_CLOSED: {
+ if (gOutputs.indexOfKey(ioHandle) < 0) {
+ LOGW("ioConfigChanged() closing unknow output! %p", ioHandle);
+ break;
+ }
+ LOGV("ioConfigChanged() output %p closed", ioHandle);
+
+ gOutputs.removeItem(ioHandle);
+ for (int i = gStreamOutputMap.size() - 1; i >= 0 ; i--) {
+ if (gStreamOutputMap.valueAt(i) == ioHandle) {
+ gStreamOutputMap.removeItemsAt(i);
+ }
+ }
+ } break;
+
+ case OUTPUT_CONFIG_CHANGED: {
+ int index = gOutputs.indexOfKey(ioHandle);
+ if (index < 0) {
+ LOGW("ioConfigChanged() modifying unknow output! %p", ioHandle);
+ break;
+ }
+ if (param2 == 0) break;
+ desc = (OutputDescriptor *)param2;
+
+ LOGV("ioConfigChanged() new config for output %p samplingRate %d, format %d channels %d frameCount %d latency %d",
+ ioHandle, desc->samplingRate, desc->format,
+ desc->channels, desc->frameCount, desc->latency);
+ OutputDescriptor *outputDesc = gOutputs.valueAt(index);
+ delete outputDesc;
+ outputDesc = new OutputDescriptor(*desc);
+ gOutputs.replaceValueFor(ioHandle, outputDesc);
+ } break;
+ case INPUT_OPENED:
+ case INPUT_CLOSED:
+ case INPUT_CONFIG_CHANGED:
+ break;
- if (streamType == DEFAULT) {
- streamType = MUSIC;
- }
- if (gA2dpEnabled && routedToA2dpOutput(streamType)) {
- return AUDIO_OUTPUT_A2DP;
- } else {
- return AUDIO_OUTPUT_HARDWARE;
}
}
+void AudioSystem::setErrorCallback(audio_error_callback cb) {
+ Mutex::Autolock _l(gLock);
+ gAudioErrorCallback = cb;
+}
+
bool AudioSystem::routedToA2dpOutput(int streamType) {
switch(streamType) {
case MUSIC:
@@ -379,6 +438,451 @@ bool AudioSystem::routedToA2dpOutput(int streamType) {
}
+// client singleton for AudioPolicyService binder interface
+sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
+sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient;
+
+
+// establish binder interface to AudioFlinger service
+const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
+{
+ gLock.lock();
+ if (gAudioPolicyService.get() == 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_policy"));
+ if (binder != 0)
+ break;
+ LOGW("AudioPolicyService not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while(true);
+ if (gAudioPolicyServiceClient == NULL) {
+ gAudioPolicyServiceClient = new AudioPolicyServiceClient();
+ }
+ binder->linkToDeath(gAudioPolicyServiceClient);
+ gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
+ gLock.unlock();
+ } else {
+ gLock.unlock();
+ }
+ return gAudioPolicyService;
+}
+
+status_t AudioSystem::setDeviceConnectionState(audio_devices device,
+ device_connection_state state,
+ const char *device_address)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ return aps->setDeviceConnectionState(device, state, device_address);
+}
+
+AudioSystem::device_connection_state AudioSystem::getDeviceConnectionState(audio_devices device,
+ const char *device_address)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return DEVICE_STATE_UNAVAILABLE;
+
+ return aps->getDeviceConnectionState(device, device_address);
+}
+
+status_t AudioSystem::setPhoneState(int state)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ return aps->setPhoneState(state);
+}
+
+status_t AudioSystem::setRingerMode(uint32_t mode, uint32_t mask)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setRingerMode(mode, mask);
+}
+
+status_t AudioSystem::setForceUse(force_use usage, forced_config config)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setForceUse(usage, config);
+}
+
+AudioSystem::forced_config AudioSystem::getForceUse(force_use usage)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return FORCE_NONE;
+ return aps->getForceUse(usage);
+}
+
+
+audio_io_handle_t AudioSystem::getOutput(stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ output_flags flags)
+{
+ audio_io_handle_t output = NULL;
+ if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
+ Mutex::Autolock _l(gLock);
+ output = AudioSystem::gStreamOutputMap.valueFor(stream);
+ LOGV_IF((output != NULL), "getOutput() read %p from cache for stream %d", output, stream);
+ }
+ if (output == NULL) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return NULL;
+ output = aps->getOutput(stream, samplingRate, format, channels, flags);
+ if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
+ Mutex::Autolock _l(gLock);
+ AudioSystem::gStreamOutputMap.add(stream, output);
+ }
+ }
+ return output;
+}
+
+status_t AudioSystem::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->startOutput(output, stream);
+}
+
+status_t AudioSystem::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->stopOutput(output, stream);
+}
+
+void AudioSystem::releaseOutput(audio_io_handle_t output)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return;
+ aps->releaseOutput(output);
+}
+
+audio_io_handle_t AudioSystem::getInput(int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ audio_in_acoustics acoustics)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return NULL;
+ return aps->getInput(inputSource, samplingRate, format, channels, acoustics);
+}
+
+status_t AudioSystem::startInput(audio_io_handle_t input)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->startInput(input);
+}
+
+status_t AudioSystem::stopInput(audio_io_handle_t input)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->stopInput(input);
+}
+
+void AudioSystem::releaseInput(audio_io_handle_t input)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return;
+ aps->releaseInput(input);
+}
+
+status_t AudioSystem::initStreamVolume(stream_type stream,
+ int indexMin,
+ int indexMax)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->initStreamVolume(stream, indexMin, indexMax);
+}
+
+status_t AudioSystem::setStreamVolumeIndex(stream_type stream, int index)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setStreamVolumeIndex(stream, index);
+}
+
+status_t AudioSystem::getStreamVolumeIndex(stream_type stream, int *index)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getStreamVolumeIndex(stream, index);
+}
+
+// ---------------------------------------------------------------------------
+
+void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(AudioSystem::gLock);
+ AudioSystem::gAudioPolicyService.clear();
+
+ LOGW("AudioPolicyService server died!");
+}
+
+// ---------------------------------------------------------------------------
+
+
+// use emulated popcount optimization
+// http://www.df.lth.se/~john_e/gems/gem002d.html
+uint32_t AudioSystem::popCount(uint32_t u)
+{
+ u = ((u&0x55555555) + ((u>>1)&0x55555555));
+ u = ((u&0x33333333) + ((u>>2)&0x33333333));
+ u = ((u&0x0f0f0f0f) + ((u>>4)&0x0f0f0f0f));
+ u = ((u&0x00ff00ff) + ((u>>8)&0x00ff00ff));
+ u = ( u&0x0000ffff) + (u>>16);
+ return u;
+}
+
+bool AudioSystem::isOutputDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ ((device & ~AudioSystem::DEVICE_OUT_ALL) == 0)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isInputDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ ((device & ~AudioSystem::DEVICE_IN_ALL) == 0)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isA2dpDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ (device & (AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isBluetoothScoDevice(audio_devices device)
+{
+ if ((popCount(device) == 1 ) &&
+ (device & (AudioSystem::DEVICE_OUT_BLUETOOTH_SCO |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+ AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isLowVisibility(stream_type stream)
+{
+ if (stream == AudioSystem::SYSTEM || stream == AudioSystem::NOTIFICATION) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isInputChannel(uint32_t channel)
+{
+ if ((channel & ~AudioSystem::CHANNEL_IN_ALL) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isOutputChannel(uint32_t channel)
+{
+ if ((channel & ~AudioSystem::CHANNEL_OUT_ALL) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AudioSystem::isValidFormat(uint32_t format)
+{
+ switch (format & MAIN_FORMAT_MASK) {
+ case PCM:
+ case MP3:
+ case AMR_NB:
+ case AMR_WB:
+ case AAC:
+ case HE_AAC_V1:
+ case HE_AAC_V2:
+ case VORBIS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool AudioSystem::isLinearPCM(uint32_t format)
+{
+ switch (format) {
+ case PCM_16_BIT:
+ case PCM_8_BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+//------------------------- AudioParameter class implementation ---------------
+
+const char *AudioParameter::keyRouting = "routing";
+const char *AudioParameter::keySamplingRate = "sampling_rate";
+const char *AudioParameter::keyFormat = "format";
+const char *AudioParameter::keyChannels = "channels";
+const char *AudioParameter::keyFrameCount = "frame_count";
+
+AudioParameter::AudioParameter(const String8& keyValuePairs)
+{
+ char *str = new char[keyValuePairs.length()+1];
+ mKeyValuePairs = keyValuePairs;
+
+ strcpy(str, keyValuePairs.string());
+ char *pair = strtok(str, ";");
+ while (pair != NULL) {
+ if (strlen(pair) != 0) {
+ size_t eqIdx = strcspn(pair, "=");
+ String8 key = String8(pair, eqIdx);
+ String8 value;
+ if (eqIdx == strlen(pair)) {
+ value = String8("");
+ } else {
+ value = String8(pair + eqIdx + 1);
+ }
+ if (mParameters.indexOfKey(key) < 0) {
+ mParameters.add(key, value);
+ } else {
+ mParameters.replaceValueFor(key, value);
+ }
+ } else {
+ LOGV("AudioParameter() cstor empty key value pair");
+ }
+ pair = strtok(NULL, ";");
+ }
+
+ delete[] str;
+}
+
+AudioParameter::~AudioParameter()
+{
+ mParameters.clear();
+}
+
+String8 AudioParameter::toString()
+{
+ String8 str = String8("");
+
+ size_t size = mParameters.size();
+ for (size_t i = 0; i < size; i++) {
+ str += mParameters.keyAt(i);
+ str += "=";
+ str += mParameters.valueAt(i);
+ if (i < (size - 1)) str += ";";
+ }
+ return str;
+}
+
+status_t AudioParameter::add(const String8& key, const String8& value)
+{
+ if (mParameters.indexOfKey(key) < 0) {
+ mParameters.add(key, value);
+ return NO_ERROR;
+ } else {
+ mParameters.replaceValueFor(key, value);
+ return ALREADY_EXISTS;
+ }
+}
+
+status_t AudioParameter::addInt(const String8& key, const int value)
+{
+ char str[12];
+ if (snprintf(str, 12, "%d", value) > 0) {
+ String8 str8 = String8(str);
+ return add(key, str8);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::addFloat(const String8& key, const float value)
+{
+ char str[23];
+ if (snprintf(str, 23, "%.10f", value) > 0) {
+ String8 str8 = String8(str);
+ return add(key, str8);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::remove(const String8& key)
+{
+ if (mParameters.indexOfKey(key) >= 0) {
+ mParameters.removeItem(key);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::get(const String8& key, String8& value)
+{
+ if (mParameters.indexOfKey(key) >= 0) {
+ value = mParameters.valueFor(key);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::getInt(const String8& key, int& value)
+{
+ String8 str8;
+ status_t result = get(key, str8);
+ value = 0;
+ if (result == NO_ERROR) {
+ int val;
+ if (sscanf(str8.string(), "%d", &val) == 1) {
+ value = val;
+ } else {
+ result = INVALID_OPERATION;
+ }
+ }
+ return result;
+}
+
+status_t AudioParameter::getFloat(const String8& key, float& value)
+{
+ String8 str8;
+ status_t result = get(key, str8);
+ value = 0;
+ if (result == NO_ERROR) {
+ float val;
+ if (sscanf(str8.string(), "%f", &val) == 1) {
+ value = val;
+ } else {
+ result = INVALID_OPERATION;
+ }
+ }
+ return result;
+}
}; // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 7b9eda7..b147d25 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -54,7 +54,7 @@ AudioTrack::AudioTrack(
int streamType,
uint32_t sampleRate,
int format,
- int channelCount,
+ int channels,
int frameCount,
uint32_t flags,
callback_t cbf,
@@ -62,7 +62,7 @@ AudioTrack::AudioTrack(
int notificationFrames)
: mStatus(NO_INIT)
{
- mStatus = set(streamType, sampleRate, format, channelCount,
+ mStatus = set(streamType, sampleRate, format, channels,
frameCount, flags, cbf, user, notificationFrames, 0);
}
@@ -70,7 +70,7 @@ AudioTrack::AudioTrack(
int streamType,
uint32_t sampleRate,
int format,
- int channelCount,
+ int channels,
const sp<IMemory>& sharedBuffer,
uint32_t flags,
callback_t cbf,
@@ -78,7 +78,7 @@ AudioTrack::AudioTrack(
int notificationFrames)
: mStatus(NO_INIT)
{
- mStatus = set(streamType, sampleRate, format, channelCount,
+ mStatus = set(streamType, sampleRate, format, channels,
0, flags, cbf, user, notificationFrames, sharedBuffer);
}
@@ -97,6 +97,7 @@ AudioTrack::~AudioTrack()
}
mAudioTrack.clear();
IPCThreadState::self()->flushCommands();
+ AudioSystem::releaseOutput(getOutput());
}
}
@@ -104,7 +105,7 @@ status_t AudioTrack::set(
int streamType,
uint32_t sampleRate,
int format,
- int channelCount,
+ int channels,
int frameCount,
uint32_t flags,
callback_t cbf,
@@ -150,63 +151,84 @@ status_t AudioTrack::set(
if (format == 0) {
format = AudioSystem::PCM_16_BIT;
}
- if (channelCount == 0) {
- channelCount = 2;
+ if (channels == 0) {
+ channels = AudioSystem::CHANNEL_OUT_STEREO;
}
// validate parameters
- if (((format != AudioSystem::PCM_8_BIT) || sharedBuffer != 0) &&
- (format != AudioSystem::PCM_16_BIT)) {
+ if (!AudioSystem::isValidFormat(format)) {
LOGE("Invalid format");
return BAD_VALUE;
}
- if (channelCount != 1 && channelCount != 2) {
- LOGE("Invalid channel number");
+
+ // force direct flag if format is not linear PCM
+ if (!AudioSystem::isLinearPCM(format)) {
+ flags |= AudioSystem::OUTPUT_FLAG_DIRECT;
+ }
+
+ if (!AudioSystem::isOutputChannel(channels)) {
+ LOGE("Invalid channel mask");
return BAD_VALUE;
}
+ uint32_t channelCount = AudioSystem::popCount(channels);
- // Ensure that buffer depth covers at least audio hardware latency
- uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
- if (minBufCount < 2) minBufCount = 2;
+ audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,
+ sampleRate, format, channels, (AudioSystem::output_flags)flags);
- // When playing from shared buffer, playback will start even if last audioflinger
- // block is partly filled.
- if (sharedBuffer != 0 && minBufCount > 1) {
- minBufCount--;
+ if (output == 0) {
+ LOGE("Could not get audio output for stream type %d", streamType);
+ return BAD_VALUE;
}
- int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
-
- if (sharedBuffer == 0) {
- if (frameCount == 0) {
- frameCount = minFrameCount;
- }
- if (notificationFrames == 0) {
- notificationFrames = frameCount/2;
- }
- // Make sure that application is notified with sufficient margin
- // before underrun
- if (notificationFrames > frameCount/2) {
- notificationFrames = frameCount/2;
+ if (!AudioSystem::isLinearPCM(format)) {
+ if (sharedBuffer != 0) {
+ frameCount = sharedBuffer->size();
}
} else {
- // Ensure that buffer alignment matches channelcount
- if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
- LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
- return BAD_VALUE;
- }
- frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
- }
+ // Ensure that buffer depth covers at least audio hardware latency
+ uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
+ if (minBufCount < 2) minBufCount = 2;
- if (frameCount < minFrameCount) {
- LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);
- return BAD_VALUE;
+ int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+
+ if (sharedBuffer == 0) {
+ if (frameCount == 0) {
+ frameCount = minFrameCount;
+ }
+ if (notificationFrames == 0) {
+ notificationFrames = frameCount/2;
+ }
+ // Make sure that application is notified with sufficient margin
+ // before underrun
+ if (notificationFrames > frameCount/2) {
+ notificationFrames = frameCount/2;
+ }
+ if (frameCount < minFrameCount) {
+ LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);
+ return BAD_VALUE;
+ }
+ } else {
+ // Ensure that buffer alignment matches channelcount
+ if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
+ LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
+ return BAD_VALUE;
+ }
+ frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
+ }
}
// create the track
status_t status;
sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
- streamType, sampleRate, format, channelCount, frameCount, flags, sharedBuffer, &status);
+ streamType,
+ sampleRate,
+ format,
+ channelCount,
+ frameCount,
+ ((uint16_t)flags) << 16,
+ sharedBuffer,
+ output,
+ &status);
if (track == 0) {
LOGE("AudioFlinger could not create track, status: %d", status);
@@ -245,6 +267,7 @@ status_t AudioTrack::set(
mVolume[RIGHT] = 1.0f;
mStreamType = streamType;
mFormat = format;
+ mChannels = channels;
mChannelCount = channelCount;
mSharedBuffer = sharedBuffer;
mMuted = false;
@@ -259,6 +282,7 @@ status_t AudioTrack::set(
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
+ mFlags = flags;
return NO_ERROR;
}
@@ -297,7 +321,11 @@ uint32_t AudioTrack::frameCount() const
int AudioTrack::frameSize() const
{
- return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ if (AudioSystem::isLinearPCM(mFormat)) {
+ return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ } else {
+ return sizeof(uint8_t);
+ }
}
sp<IMemory>& AudioTrack::sharedBuffer()
@@ -323,6 +351,7 @@ void AudioTrack::start()
}
if (android_atomic_or(1, &mActive) == 0) {
+ AudioSystem::startOutput(getOutput(), (AudioSystem::stream_type)mStreamType);
mNewPosition = mCblk->server + mUpdatePeriod;
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
mCblk->waitTimeMs = 0;
@@ -367,6 +396,7 @@ void AudioTrack::stop()
} else {
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
}
+ AudioSystem::stopOutput(getOutput(), (AudioSystem::stream_type)mStreamType);
}
if (t != 0) {
@@ -382,12 +412,12 @@ bool AudioTrack::stopped() const
void AudioTrack::flush()
{
LOGV("flush");
-
+
// clear playback marker and periodic update counter
mMarkerPosition = 0;
mMarkerReached = false;
mUpdatePeriod = 0;
-
+
if (!mActive) {
mAudioTrack->flush();
@@ -403,6 +433,7 @@ void AudioTrack::pause()
if (android_atomic_and(~1, &mActive) == 1) {
mActive = 0;
mAudioTrack->pause();
+ AudioSystem::stopOutput(getOutput(), (AudioSystem::stream_type)mStreamType);
}
}
@@ -455,7 +486,6 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount
{
audio_track_cblk_t* cblk = mCblk;
-
Mutex::Autolock _l(cblk->lock);
if (loopCount == 0) {
@@ -476,7 +506,7 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount
LOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, framecount %d",
loopStart, loopEnd, mFrameCount);
return BAD_VALUE;
- }
+ }
cblk->loopStart = loopStart;
cblk->loopEnd = loopEnd;
@@ -555,7 +585,7 @@ status_t AudioTrack::setPosition(uint32_t position)
mCblk->server = position;
mCblk->forceReady = 1;
-
+
return NO_ERROR;
}
@@ -571,7 +601,7 @@ status_t AudioTrack::getPosition(uint32_t *position)
status_t AudioTrack::reload()
{
if (!stopped()) return INVALID_OPERATION;
-
+
flush();
mCblk->stepUser(mFrameCount);
@@ -579,6 +609,12 @@ status_t AudioTrack::reload()
return NO_ERROR;
}
+audio_io_handle_t AudioTrack::getOutput()
+{
+ return AudioSystem::getOutput((AudioSystem::stream_type)mStreamType,
+ mCblk->sampleRate, mFormat, mChannels, (AudioSystem::output_flags)mFlags);
+}
+
// -------------------------------------------------------------------------
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
@@ -608,7 +644,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
return WOULD_BLOCK;
timeout = 0;
result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
- if (__builtin_expect(result!=NO_ERROR, false)) {
+ if (__builtin_expect(result!=NO_ERROR, false)) {
cblk->waitTimeMs += waitTimeMs;
if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
// timing out when a loop has been set and we have already written upto loop end
@@ -616,7 +652,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
if (cblk->user < cblk->loopEnd) {
LOGW( "obtainBuffer timed out (is the CPU pegged?) %p "
"user=%08x, server=%08x", this, cblk->user, cblk->server);
- //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
+ //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
cblk->lock.unlock();
mAudioTrack->start();
cblk->lock.lock();
@@ -624,7 +660,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
}
cblk->waitTimeMs = 0;
}
-
+
if (--waitCount == 0) {
return TIMED_OUT;
}
@@ -636,7 +672,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
}
cblk->waitTimeMs = 0;
-
+
if (framesReq > framesAvail) {
framesReq = framesAvail;
}
@@ -653,12 +689,16 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
"but didn't need to be locked. We recovered, but "
"this shouldn't happen (user=%08x, server=%08x)", cblk->user, cblk->server);
- audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
- audioBuffer->channelCount= mChannelCount;
- audioBuffer->format = AudioSystem::PCM_16_BIT;
- audioBuffer->frameCount = framesReq;
- audioBuffer->size = framesReq*mChannelCount*sizeof(int16_t);
- audioBuffer->raw = (int8_t *)cblk->buffer(u);
+ audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
+ audioBuffer->channelCount = mChannelCount;
+ audioBuffer->frameCount = framesReq;
+ audioBuffer->size = framesReq * cblk->frameSize;
+ if (AudioSystem::isLinearPCM(mFormat)) {
+ audioBuffer->format = AudioSystem::PCM_16_BIT;
+ } else {
+ audioBuffer->format = mFormat;
+ }
+ audioBuffer->raw = (int8_t *)cblk->buffer(u);
active = mActive;
return active ? status_t(NO_ERROR) : status_t(STOPPED);
}
@@ -690,10 +730,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
Buffer audioBuffer;
do {
- audioBuffer.frameCount = userSize/mChannelCount;
- if (mFormat == AudioSystem::PCM_16_BIT) {
- audioBuffer.frameCount >>= 1;
- }
+ audioBuffer.frameCount = userSize/frameSize();
+
// Calling obtainBuffer() with a negative wait count causes
// an (almost) infinite wait time.
status_t err = obtainBuffer(&audioBuffer, -1);
@@ -705,6 +743,7 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
}
size_t toWrite;
+
if (mFormat == AudioSystem::PCM_8_BIT) {
// Divide capacity by 2 to take expansion into account
toWrite = audioBuffer.size>>1;
@@ -742,13 +781,13 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
if (mCblk->flowControlFlag == 0) {
mCbf(EVENT_UNDERRUN, mUserData, 0);
if (mCblk->server == mCblk->frameCount) {
- mCbf(EVENT_BUFFER_END, mUserData, 0);
+ mCbf(EVENT_BUFFER_END, mUserData, 0);
}
mCblk->flowControlFlag = 1;
if (mSharedBuffer != 0) return false;
}
}
-
+
// Manage loop end callback
while (mLoopCount > mCblk->loopCount) {
int loopCount = -1;
@@ -767,7 +806,7 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
}
// Manage new position callback
- if(mUpdatePeriod > 0) {
+ if (mUpdatePeriod > 0) {
while (mCblk->server >= mNewPosition) {
mCbf(EVENT_NEW_POS, mUserData, (void *)&mNewPosition);
mNewPosition += mUpdatePeriod;
@@ -784,10 +823,10 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
do {
audioBuffer.frameCount = frames;
-
- // Calling obtainBuffer() with a wait count of 1
- // limits wait time to WAIT_PERIOD_MS. This prevents from being
- // stuck here not being able to handle timed events (position, markers, loops).
+
+ // Calling obtainBuffer() with a wait count of 1
+ // limits wait time to WAIT_PERIOD_MS. This prevents from being
+ // stuck here not being able to handle timed events (position, markers, loops).
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
@@ -832,7 +871,11 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
}
audioBuffer.size = writtenSize;
- audioBuffer.frameCount = writtenSize/mChannelCount/sizeof(int16_t);
+ // NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for
+ // 8 bit PCM data: in this case, mCblk->frameSize is based on a sampel size of
+ // 16 bit.
+ audioBuffer.frameCount = writtenSize/mCblk->frameSize;
+
frames -= audioBuffer.frameCount;
releaseBuffer(&audioBuffer);
@@ -949,7 +992,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount)
// we switch to normal obtainBuffer() timeout period
if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
bufferTimeoutMs = MAX_RUN_TIMEOUT_MS - 1;
- }
+ }
// It is possible that we receive a flush()
// while the mixer is processing a block: in this case,
// stepServer() is called After the flush() has reset u & s and
@@ -981,7 +1024,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount)
void* audio_track_cblk_t::buffer(uint32_t offset) const
{
- return (int16_t *)this->buffers + (offset-userBase)*this->channels;
+ return (int8_t *)this->buffers + (offset - userBase) * this->frameSize;
}
uint32_t audio_track_cblk_t::framesAvailable()
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 6fc0cb7..9385367 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -16,6 +16,7 @@
*/
#define LOG_TAG "IAudioFlinger"
+//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <stdint.h>
@@ -44,17 +45,21 @@ enum {
STREAM_VOLUME,
STREAM_MUTE,
SET_MODE,
- GET_MODE,
- SET_ROUTING,
- GET_ROUTING,
SET_MIC_MUTE,
GET_MIC_MUTE,
IS_MUSIC_ACTIVE,
- SET_PARAMETER,
+ SET_PARAMETERS,
+ GET_PARAMETERS,
REGISTER_CLIENT,
GET_INPUTBUFFERSIZE,
- WAKE_UP,
- IS_A2DP_ENABLED
+ OPEN_OUTPUT,
+ OPEN_DUPLICATE_OUTPUT,
+ CLOSE_OUTPUT,
+ SUSPEND_OUTPUT,
+ RESTORE_OUTPUT,
+ OPEN_INPUT,
+ CLOSE_INPUT,
+ SET_STREAM_OUTPUT
};
class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -74,6 +79,7 @@ public:
int frameCount,
uint32_t flags,
const sp<IMemory>& sharedBuffer,
+ void *output,
status_t *status)
{
Parcel data, reply;
@@ -86,6 +92,7 @@ public:
data.writeInt32(frameCount);
data.writeInt32(flags);
data.writeStrongBinder(sharedBuffer->asBinder());
+ data.write(&output, sizeof(void *));
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
LOGE("createTrack error: %s", strerror(-lStatus));
@@ -99,7 +106,7 @@ public:
virtual sp<IAudioRecord> openRecord(
pid_t pid,
- int inputSource,
+ void *input,
uint32_t sampleRate,
int format,
int channelCount,
@@ -110,7 +117,7 @@ public:
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(pid);
- data.writeInt32(inputSource);
+ data.write(&input, sizeof(void *));
data.writeInt32(sampleRate);
data.writeInt32(format);
data.writeInt32(channelCount);
@@ -124,47 +131,47 @@ public:
return interface_cast<IAudioRecord>(reply.readStrongBinder());
}
- virtual uint32_t sampleRate(int output) const
+ virtual uint32_t sampleRate(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(SAMPLE_RATE, data, &reply);
return reply.readInt32();
}
- virtual int channelCount(int output) const
+ virtual int channelCount(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(CHANNEL_COUNT, data, &reply);
return reply.readInt32();
}
- virtual int format(int output) const
+ virtual int format(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(FORMAT, data, &reply);
return reply.readInt32();
}
- virtual size_t frameCount(int output) const
+ virtual size_t frameCount(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(FRAME_COUNT, data, &reply);
return reply.readInt32();
}
- virtual uint32_t latency(int output) const
+ virtual uint32_t latency(void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(output);
+ data.write(&output, sizeof(void *));
remote()->transact(LATENCY, data, &reply);
return reply.readInt32();
}
@@ -203,12 +210,13 @@ public:
return reply.readInt32();
}
- virtual status_t setStreamVolume(int stream, float value)
+ virtual status_t setStreamVolume(int stream, float value, void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(stream);
data.writeFloat(value);
+ data.write(&output, sizeof(void *));
remote()->transact(SET_STREAM_VOLUME, data, &reply);
return reply.readInt32();
}
@@ -223,11 +231,12 @@ public:
return reply.readInt32();
}
- virtual float streamVolume(int stream) const
+ virtual float streamVolume(int stream, void *output) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(stream);
+ data.write(&output, sizeof(void *));
remote()->transact(STREAM_VOLUME, data, &reply);
return reply.readFloat();
}
@@ -241,111 +250,205 @@ public:
return reply.readInt32();
}
- virtual status_t setRouting(int mode, uint32_t routes, uint32_t mask)
+ virtual status_t setMode(int mode)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(mode);
- data.writeInt32(routes);
- data.writeInt32(mask);
- remote()->transact(SET_ROUTING, data, &reply);
+ remote()->transact(SET_MODE, data, &reply);
return reply.readInt32();
}
- virtual uint32_t getRouting(int mode) const
+ virtual status_t setMicMute(bool state)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(mode);
- remote()->transact(GET_ROUTING, data, &reply);
+ data.writeInt32(state);
+ remote()->transact(SET_MIC_MUTE, data, &reply);
return reply.readInt32();
}
- virtual status_t setMode(int mode)
+ virtual bool getMicMute() const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(mode);
- remote()->transact(SET_MODE, data, &reply);
+ remote()->transact(GET_MIC_MUTE, data, &reply);
return reply.readInt32();
}
- virtual int getMode() const
+ virtual bool isMusicActive() const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_MODE, data, &reply);
+ remote()->transact(IS_MUSIC_ACTIVE, data, &reply);
return reply.readInt32();
}
- virtual status_t setMicMute(bool state)
+ virtual status_t setParameters(void *ioHandle, const String8& keyValuePairs)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(state);
- remote()->transact(SET_MIC_MUTE, data, &reply);
+ data.write(&ioHandle, sizeof(void *));
+ data.writeString8(keyValuePairs);
+ remote()->transact(SET_PARAMETERS, data, &reply);
return reply.readInt32();
}
- virtual bool getMicMute() const
+ virtual String8 getParameters(void *ioHandle, const String8& keys)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_MIC_MUTE, data, &reply);
+ data.write(&ioHandle, sizeof(void *));
+ data.writeString8(keys);
+ remote()->transact(GET_PARAMETERS, data, &reply);
+ return reply.readString8();
+ }
+
+ virtual void registerClient(const sp<IAudioFlingerClient>& client)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeStrongBinder(client->asBinder());
+ remote()->transact(REGISTER_CLIENT, data, &reply);
+ }
+
+ virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(sampleRate);
+ data.writeInt32(format);
+ data.writeInt32(channelCount);
+ remote()->transact(GET_INPUTBUFFERSIZE, data, &reply);
return reply.readInt32();
}
- virtual bool isMusicActive() const
+ virtual void *openOutput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ uint32_t flags)
{
Parcel data, reply;
+ uint32_t devices = pDevices ? *pDevices : 0;
+ uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
+ uint32_t format = pFormat ? *pFormat : 0;
+ uint32_t channels = pChannels ? *pChannels : 0;
+ uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
+
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(IS_MUSIC_ACTIVE, data, &reply);
+ data.writeInt32(devices);
+ data.writeInt32(samplingRate);
+ data.writeInt32(format);
+ data.writeInt32(channels);
+ data.writeInt32(latency);
+ data.writeInt32(flags);
+ remote()->transact(OPEN_OUTPUT, data, &reply);
+ void *output;
+ reply.read(&output, sizeof(void *));
+ LOGV("openOutput() returned output, %p", output);
+ devices = reply.readInt32();
+ if (pDevices) *pDevices = devices;
+ samplingRate = reply.readInt32();
+ if (pSamplingRate) *pSamplingRate = samplingRate;
+ format = reply.readInt32();
+ if (pFormat) *pFormat = format;
+ channels = reply.readInt32();
+ if (pChannels) *pChannels = channels;
+ latency = reply.readInt32();
+ if (pLatencyMs) *pLatencyMs = latency;
+ return output;
+ }
+
+ virtual void *openDuplicateOutput(void *output1, void *output2)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.write(&output1, sizeof(void *));
+ data.write(&output2, sizeof(void *));
+ remote()->transact(OPEN_DUPLICATE_OUTPUT, data, &reply);
+ void *output;
+ reply.read(&output, sizeof(void *));
+ return output;
+ }
+
+ virtual status_t closeOutput(void *output)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.write(&output, sizeof(void *));
+ remote()->transact(CLOSE_OUTPUT, data, &reply);
return reply.readInt32();
}
- virtual status_t setParameter(const char* key, const char* value)
+ virtual status_t suspendOutput(void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeCString(key);
- data.writeCString(value);
- remote()->transact(SET_PARAMETER, data, &reply);
+ data.write(&output, sizeof(void *));
+ remote()->transact(SUSPEND_OUTPUT, data, &reply);
return reply.readInt32();
}
-
- virtual void registerClient(const sp<IAudioFlingerClient>& client)
+
+ virtual status_t restoreOutput(void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeStrongBinder(client->asBinder());
- remote()->transact(REGISTER_CLIENT, data, &reply);
+ data.write(&output, sizeof(void *));
+ remote()->transact(RESTORE_OUTPUT, data, &reply);
+ return reply.readInt32();
}
-
- virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
+
+ virtual void *openInput(uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics)
{
Parcel data, reply;
+ uint32_t devices = pDevices ? *pDevices : 0;
+ uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
+ uint32_t format = pFormat ? *pFormat : 0;
+ uint32_t channels = pChannels ? *pChannels : 0;
+
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(sampleRate);
+ data.writeInt32(devices);
+ data.writeInt32(samplingRate);
data.writeInt32(format);
- data.writeInt32(channelCount);
- remote()->transact(GET_INPUTBUFFERSIZE, data, &reply);
- return reply.readInt32();
+ data.writeInt32(channels);
+ data.writeInt32(acoustics);
+ remote()->transact(OPEN_INPUT, data, &reply);
+ void *input;
+ reply.read(&input, sizeof(void *));
+ devices = reply.readInt32();
+ if (pDevices) *pDevices = devices;
+ samplingRate = reply.readInt32();
+ if (pSamplingRate) *pSamplingRate = samplingRate;
+ format = reply.readInt32();
+ if (pFormat) *pFormat = format;
+ channels = reply.readInt32();
+ if (pChannels) *pChannels = channels;
+ return input;
}
-
- virtual void wakeUp()
+
+ virtual status_t closeInput(void *input)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(WAKE_UP, data, &reply, IBinder::FLAG_ONEWAY);
- return;
+ data.write(&input, sizeof(void *));
+ remote()->transact(CLOSE_INPUT, data, &reply);
+ return reply.readInt32();
}
- virtual bool isA2dpEnabled() const
+ virtual status_t setStreamOutput(uint32_t stream, void *output)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(IS_A2DP_ENABLED, data, &reply);
- return (bool)reply.readInt32();
+ data.writeInt32(stream);
+ data.write(&output, sizeof(void *));
+ remote()->transact(SET_STREAM_OUTPUT, data, &reply);
+ return reply.readInt32();
}
};
@@ -367,10 +470,12 @@ status_t BnAudioFlinger::onTransact(
size_t bufferCount = data.readInt32();
uint32_t flags = data.readInt32();
sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
+ void *output;
+ data.read(&output, sizeof(void *));
status_t status;
sp<IAudioTrack> track = createTrack(pid,
streamType, sampleRate, format,
- channelCount, bufferCount, flags, buffer, &status);
+ channelCount, bufferCount, flags, buffer, output, &status);
reply->writeInt32(status);
reply->writeStrongBinder(track->asBinder());
return NO_ERROR;
@@ -378,14 +483,15 @@ status_t BnAudioFlinger::onTransact(
case OPEN_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
pid_t pid = data.readInt32();
- int inputSource = data.readInt32();
+ void *input;
+ data.read(&input, sizeof(void *));
uint32_t sampleRate = data.readInt32();
int format = data.readInt32();
int channelCount = data.readInt32();
size_t bufferCount = data.readInt32();
uint32_t flags = data.readInt32();
status_t status;
- sp<IAudioRecord> record = openRecord(pid, inputSource,
+ sp<IAudioRecord> record = openRecord(pid, input,
sampleRate, format, channelCount, bufferCount, flags, &status);
reply->writeInt32(status);
reply->writeStrongBinder(record->asBinder());
@@ -393,31 +499,36 @@ status_t BnAudioFlinger::onTransact(
} break;
case SAMPLE_RATE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( sampleRate(output) );
return NO_ERROR;
} break;
case CHANNEL_COUNT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( channelCount(output) );
return NO_ERROR;
} break;
case FORMAT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( format(output) );
return NO_ERROR;
} break;
case FRAME_COUNT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( frameCount(output) );
return NO_ERROR;
} break;
case LATENCY: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int output = data.readInt32();
+ void *output;
+ data.read(&output, sizeof(void *));
reply->writeInt32( latency(output) );
return NO_ERROR;
} break;
@@ -444,7 +555,10 @@ status_t BnAudioFlinger::onTransact(
case SET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int stream = data.readInt32();
- reply->writeInt32( setStreamVolume(stream, data.readFloat()) );
+ float volume = data.readFloat();
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32( setStreamVolume(stream, volume, output) );
return NO_ERROR;
} break;
case SET_STREAM_MUTE: {
@@ -456,7 +570,9 @@ status_t BnAudioFlinger::onTransact(
case STREAM_VOLUME: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int stream = data.readInt32();
- reply->writeFloat( streamVolume(stream) );
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeFloat( streamVolume(stream, output) );
return NO_ERROR;
} break;
case STREAM_MUTE: {
@@ -465,31 +581,12 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( streamMute(stream) );
return NO_ERROR;
} break;
- case SET_ROUTING: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int mode = data.readInt32();
- uint32_t routes = data.readInt32();
- uint32_t mask = data.readInt32();
- reply->writeInt32( setRouting(mode, routes, mask) );
- return NO_ERROR;
- } break;
- case GET_ROUTING: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int mode = data.readInt32();
- reply->writeInt32( getRouting(mode) );
- return NO_ERROR;
- } break;
case SET_MODE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int mode = data.readInt32();
reply->writeInt32( setMode(mode) );
return NO_ERROR;
} break;
- case GET_MODE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( getMode() );
- return NO_ERROR;
- } break;
case SET_MIC_MUTE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int state = data.readInt32();
@@ -506,13 +603,23 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( isMusicActive() );
return NO_ERROR;
} break;
- case SET_PARAMETER: {
+ case SET_PARAMETERS: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- const char *key = data.readCString();
- const char *value = data.readCString();
- reply->writeInt32( setParameter(key, value) );
+ void *ioHandle;
+ data.read(&ioHandle, sizeof(void *));
+ String8 keyValuePairs(data.readString8());
+ reply->writeInt32(setParameters(ioHandle, keyValuePairs));
return NO_ERROR;
- } break;
+ } break;
+ case GET_PARAMETERS: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *ioHandle;
+ data.read(&ioHandle, sizeof(void *));
+ String8 keys(data.readString8());
+ reply->writeString8(getParameters(ioHandle, keys));
+ return NO_ERROR;
+ } break;
+
case REGISTER_CLIENT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(data.readStrongBinder());
@@ -527,14 +634,93 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( getInputBufferSize(sampleRate, format, channelCount) );
return NO_ERROR;
} break;
- case WAKE_UP: {
+ case OPEN_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ uint32_t devices = data.readInt32();
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ uint32_t latency = data.readInt32();
+ uint32_t flags = data.readInt32();
+ void *output = openOutput(&devices,
+ &samplingRate,
+ &format,
+ &channels,
+ &latency,
+ flags);
+ LOGV("OPEN_OUTPUT output, %p", output);
+ reply->write(&output, sizeof(void *));
+ reply->writeInt32(devices);
+ reply->writeInt32(samplingRate);
+ reply->writeInt32(format);
+ reply->writeInt32(channels);
+ reply->writeInt32(latency);
+ return NO_ERROR;
+ } break;
+ case OPEN_DUPLICATE_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output1;
+ void *output2;
+ data.read(&output1, sizeof(void *));
+ data.read(&output2, sizeof(void *));
+ void *output = openDuplicateOutput(output1, output2);
+ reply->write(&output, sizeof(void *));
+ return NO_ERROR;
+ } break;
+ case CLOSE_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(closeOutput(output));
+ return NO_ERROR;
+ } break;
+ case SUSPEND_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(suspendOutput(output));
+ return NO_ERROR;
+ } break;
+ case RESTORE_OUTPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ void *output;
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(restoreOutput(output));
+ return NO_ERROR;
+ } break;
+ case OPEN_INPUT: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ uint32_t devices = data.readInt32();
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ uint32_t acoutics = data.readInt32();
+
+ void *input = openInput(&devices,
+ &samplingRate,
+ &format,
+ &channels,
+ acoutics);
+ reply->write(&input, sizeof(void *));
+ reply->writeInt32(devices);
+ reply->writeInt32(samplingRate);
+ reply->writeInt32(format);
+ reply->writeInt32(channels);
+ return NO_ERROR;
+ } break;
+ case CLOSE_INPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- wakeUp();
+ void *input;
+ data.read(&input, sizeof(void *));
+ reply->writeInt32(closeInput(input));
return NO_ERROR;
} break;
- case IS_A2DP_ENABLED: {
+ case SET_STREAM_OUTPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( (int)isA2dpEnabled() );
+ void *output;
+ uint32_t stream = data.readInt32();
+ data.read(&output, sizeof(void *));
+ reply->writeInt32(setStreamOutput(stream, output));
return NO_ERROR;
} break;
default:
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 75699b4..eaae977 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -23,11 +23,12 @@
#include <binder/Parcel.h>
#include <media/IAudioFlingerClient.h>
+#include <media/AudioSystem.h>
namespace android {
enum {
- AUDIO_OUTPUT_CHANGED = IBinder::FIRST_CALL_TRANSACTION
+ IO_CONFIG_CHANGED = IBinder::FIRST_CALL_TRANSACTION
};
class BpAudioFlingerClient : public BpInterface<IAudioFlingerClient>
@@ -38,12 +39,25 @@ public:
{
}
- void a2dpEnabledChanged(bool enabled)
+ void ioConfigChanged(int event, void *param1, void *param2)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlingerClient::getInterfaceDescriptor());
- data.writeInt32((int)enabled);
- remote()->transact(AUDIO_OUTPUT_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
+ data.writeInt32(event);
+ data.write(&param1, sizeof(void *));
+ if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
+ uint32_t stream = *(uint32_t *)param2;
+ LOGV("ioConfigChanged stream %d", stream);
+ data.writeInt32(stream);
+ } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
+ AudioSystem::OutputDescriptor *desc = (AudioSystem::OutputDescriptor *)param2;
+ data.writeInt32(desc->samplingRate);
+ data.writeInt32(desc->format);
+ data.writeInt32(desc->channels);
+ data.writeInt32(desc->frameCount);
+ data.writeInt32(desc->latency);
+ }
+ remote()->transact(IO_CONFIG_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
}
};
@@ -55,10 +69,27 @@ status_t BnAudioFlingerClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
- case AUDIO_OUTPUT_CHANGED: {
+ case IO_CONFIG_CHANGED: {
CHECK_INTERFACE(IAudioFlingerClient, data, reply);
- bool enabled = (bool)data.readInt32();
- a2dpEnabledChanged(enabled);
+ int event = data.readInt32();
+ void *param1;
+ void *param2 = 0;
+ AudioSystem::OutputDescriptor desc;
+ uint32_t stream;
+ data.read(&param1, sizeof(void *));
+ if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
+ stream = data.readInt32();
+ param2 = &stream;
+ LOGV("STREAM_CONFIG_CHANGED stream %d", stream);
+ } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
+ desc.samplingRate = data.readInt32();
+ desc.format = data.readInt32();
+ desc.channels = data.readInt32();
+ desc.frameCount = data.readInt32();
+ desc.latency = data.readInt32();
+ param2 = &desc;
+ }
+ ioConfigChanged(event, param1, param2);
return NO_ERROR;
} break;
default:
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
new file mode 100644
index 0000000..0d8a329
--- /dev/null
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -0,0 +1,423 @@
+/*
+**
+** Copyright 2009, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IAudioPolicyService"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+
+#include <media/IAudioPolicyService.h>
+
+namespace android {
+
+enum {
+ SET_DEVICE_CONNECTION_STATE = IBinder::FIRST_CALL_TRANSACTION,
+ GET_DEVICE_CONNECTION_STATE,
+ SET_PHONE_STATE,
+ SET_RINGER_MODE,
+ SET_FORCE_USE,
+ GET_FORCE_USE,
+ GET_OUTPUT,
+ START_OUTPUT,
+ STOP_OUTPUT,
+ RELEASE_OUTPUT,
+ GET_INPUT,
+ START_INPUT,
+ STOP_INPUT,
+ RELEASE_INPUT,
+ INIT_STREAM_VOLUME,
+ SET_STREAM_VOLUME,
+ GET_STREAM_VOLUME
+};
+
+class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
+{
+public:
+ BpAudioPolicyService(const sp<IBinder>& impl)
+ : BpInterface<IAudioPolicyService>(impl)
+ {
+ }
+
+ virtual status_t setDeviceConnectionState(
+ AudioSystem::audio_devices device,
+ AudioSystem::device_connection_state state,
+ const char *device_address)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(device));
+ data.writeInt32(static_cast <uint32_t>(state));
+ data.writeCString(device_address);
+ remote()->transact(SET_DEVICE_CONNECTION_STATE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(
+ AudioSystem::audio_devices device,
+ const char *device_address)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(device));
+ data.writeCString(device_address);
+ remote()->transact(GET_DEVICE_CONNECTION_STATE, data, &reply);
+ return static_cast <AudioSystem::device_connection_state>(reply.readInt32());
+ }
+
+ virtual status_t setPhoneState(int state)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(state);
+ remote()->transact(SET_PHONE_STATE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t setRingerMode(uint32_t mode, uint32_t mask)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(mode);
+ data.writeInt32(mask);
+ remote()->transact(SET_RINGER_MODE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(usage));
+ data.writeInt32(static_cast <uint32_t>(config));
+ remote()->transact(SET_FORCE_USE, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(usage));
+ remote()->transact(GET_FORCE_USE, data, &reply);
+ return static_cast <AudioSystem::forced_config> (reply.readInt32());
+ }
+
+ virtual audio_io_handle_t getOutput(
+ AudioSystem::stream_type stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::output_flags flags)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ data.writeInt32(samplingRate);
+ data.writeInt32(static_cast <uint32_t>(format));
+ data.writeInt32(channels);
+ data.writeInt32(static_cast <uint32_t>(flags));
+ remote()->transact(GET_OUTPUT, data, &reply);
+ audio_io_handle_t output;
+ reply.read(&output, sizeof(audio_io_handle_t));
+ return output;
+ }
+
+ virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&output, sizeof(audio_io_handle_t));
+ data.writeInt32(stream);
+ remote()->transact(START_OUTPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&output, sizeof(audio_io_handle_t));
+ data.writeInt32(stream);
+ remote()->transact(STOP_OUTPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual void releaseOutput(audio_io_handle_t output)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&output, sizeof(audio_io_handle_t));
+ remote()->transact(RELEASE_OUTPUT, data, &reply);
+ }
+
+ virtual audio_io_handle_t getInput(
+ int inputSource,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ AudioSystem::audio_in_acoustics acoustics)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(inputSource);
+ data.writeInt32(samplingRate);
+ data.writeInt32(static_cast <uint32_t>(format));
+ data.writeInt32(channels);
+ data.writeInt32(static_cast <uint32_t>(acoustics));
+ remote()->transact(GET_INPUT, data, &reply);
+ audio_io_handle_t input;
+ reply.read(&input, sizeof(audio_io_handle_t));
+ return input;
+ }
+
+ virtual status_t startInput(audio_io_handle_t input)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&input, sizeof(audio_io_handle_t));
+ remote()->transact(START_INPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t stopInput(audio_io_handle_t input)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&input, sizeof(audio_io_handle_t));
+ remote()->transact(STOP_INPUT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual void releaseInput(audio_io_handle_t input)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&input, sizeof(audio_io_handle_t));
+ remote()->transact(RELEASE_INPUT, data, &reply);
+ }
+
+ virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ int indexMin,
+ int indexMax)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ data.writeInt32(indexMin);
+ data.writeInt32(indexMax);
+ remote()->transact(INIT_STREAM_VOLUME, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ data.writeInt32(index);
+ remote()->transact(SET_STREAM_VOLUME, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ remote()->transact(GET_STREAM_VOLUME, data, &reply);
+ int lIndex = reply.readInt32();
+ if (index) *index = lIndex;
+ return static_cast <status_t> (reply.readInt32());
+ }
+};
+
+IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
+
+// ----------------------------------------------------------------------
+
+
+status_t BnAudioPolicyService::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case SET_DEVICE_CONNECTION_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32());
+ AudioSystem::device_connection_state state = static_cast <AudioSystem::device_connection_state>(data.readInt32());
+ const char *device_address = data.readCString();
+ reply->writeInt32(static_cast <uint32_t>(setDeviceConnectionState(device, state, device_address)));
+ return NO_ERROR;
+ } break;
+
+ case GET_DEVICE_CONNECTION_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32());
+ const char *device_address = data.readCString();
+ reply->writeInt32(static_cast <uint32_t>(getDeviceConnectionState(device, device_address)));
+ return NO_ERROR;
+ } break;
+
+ case SET_PHONE_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ reply->writeInt32(static_cast <uint32_t>(setPhoneState(data.readInt32())));
+ return NO_ERROR;
+ } break;
+
+ case SET_RINGER_MODE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ uint32_t mode = data.readInt32();
+ uint32_t mask = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(setRingerMode(mode, mask)));
+ return NO_ERROR;
+ } break;
+
+ case SET_FORCE_USE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
+ AudioSystem::forced_config config = static_cast <AudioSystem::forced_config>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config)));
+ return NO_ERROR;
+ } break;
+
+ case GET_FORCE_USE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(getForceUse(usage)));
+ return NO_ERROR;
+ } break;
+
+ case GET_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ AudioSystem::output_flags flags = static_cast <AudioSystem::output_flags>(data.readInt32());
+
+ audio_io_handle_t output = getOutput(stream,
+ samplingRate,
+ format,
+ channels,
+ flags);
+ reply->write(&output, sizeof(audio_io_handle_t));
+ return NO_ERROR;
+ } break;
+
+ case START_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t output;
+ data.read(&output, sizeof(audio_io_handle_t));
+ uint32_t stream = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(startOutput(output, (AudioSystem::stream_type)stream)));
+ return NO_ERROR;
+ } break;
+
+ case STOP_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t output;
+ data.read(&output, sizeof(audio_io_handle_t));
+ uint32_t stream = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(stopOutput(output, (AudioSystem::stream_type)stream)));
+ return NO_ERROR;
+ } break;
+
+ case RELEASE_OUTPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t output;
+ data.read(&output, sizeof(audio_io_handle_t));
+ releaseOutput(output);
+ return NO_ERROR;
+ } break;
+
+ case GET_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ int inputSource = data.readInt32();
+ uint32_t samplingRate = data.readInt32();
+ uint32_t format = data.readInt32();
+ uint32_t channels = data.readInt32();
+ AudioSystem::audio_in_acoustics acoustics = static_cast <AudioSystem::audio_in_acoustics>(data.readInt32());
+ audio_io_handle_t input = getInput(inputSource,
+ samplingRate,
+ format,
+ channels,
+ acoustics);
+ reply->write(&input, sizeof(audio_io_handle_t));
+ return NO_ERROR;
+ } break;
+
+ case START_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t input;
+ data.read(&input, sizeof(audio_io_handle_t));
+ reply->writeInt32(static_cast <uint32_t>(startInput(input)));
+ return NO_ERROR;
+ } break;
+
+ case STOP_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t input;
+ data.read(&input, sizeof(audio_io_handle_t));
+ reply->writeInt32(static_cast <uint32_t>(stopInput(input)));
+ return NO_ERROR;
+ } break;
+
+ case RELEASE_INPUT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_io_handle_t input;
+ data.read(&input, sizeof(audio_io_handle_t));
+ releaseInput(input);
+ return NO_ERROR;
+ } break;
+
+ case INIT_STREAM_VOLUME: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ int indexMin = data.readInt32();
+ int indexMax = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(initStreamVolume(stream, indexMin,indexMax)));
+ return NO_ERROR;
+ } break;
+
+ case SET_STREAM_VOLUME: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ int index = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index)));
+ return NO_ERROR;
+ } break;
+
+ case GET_STREAM_VOLUME: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ int index;
+ status_t status = getStreamVolumeIndex(stream, &index);
+ reply->writeInt32(index);
+ reply->writeInt32(static_cast <uint32_t>(status));
+ return NO_ERROR;
+ } break;
+
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 586aacb..ee9e1d8 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -99,7 +99,7 @@ int JetPlayer::init()
mAudioTrack->set(AudioSystem::MUSIC, //TODO parametrize this
pLibConfig->sampleRate,
1, // format = PCM 16bits per sample,
- pLibConfig->numChannels,
+ (pLibConfig->numChannels == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
mTrackBufferSize,
0);
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 5435da7..3ea64ae 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1001,7 +1001,7 @@ bool ToneGenerator::initAudioTrack() {
// Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
mpAudioTrack
- = new AudioTrack(mStreamType, 0, AudioSystem::PCM_16_BIT, 1, 0, 0, audioCallback, this, 0);
+ = new AudioTrack(mStreamType, 0, AudioSystem::PCM_16_BIT, AudioSystem::CHANNEL_OUT_MONO, 0, 0, audioCallback, this, 0);
if (mpAudioTrack == 0) {
LOGE("AudioTrack allocation failed");
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 77f7434..1d960c5 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1310,11 +1310,21 @@ status_t MediaPlayerService::AudioOutput::open(
AudioTrack *t;
if (mCallback != NULL) {
t = new AudioTrack(
- mStreamType, sampleRate, format, channelCount, frameCount,
- 0 /* flags */, CallbackWrapper, this);
+ mStreamType,
+ sampleRate,
+ format,
+ (channelCount == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ frameCount,
+ 0 /* flags */,
+ CallbackWrapper,
+ this);
} else {
t = new AudioTrack(
- mStreamType, sampleRate, format, channelCount, frameCount);
+ mStreamType,
+ sampleRate,
+ format,
+ (channelCount == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ frameCount);
}
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index fbea0d4..7094cfa 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -28,6 +28,7 @@
#include <AudioFlinger.h>
#include <CameraService.h>
#include <MediaPlayerService.h>
+#include <AudioPolicyService.h>
#include <private/android_filesystem_config.h>
using namespace android;
@@ -40,6 +41,7 @@ int main(int argc, char** argv)
AudioFlinger::instantiate();
MediaPlayerService::instantiate();
CameraService::instantiate();
+ AudioPolicyService::instantiate();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java b/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java
index aefedc3..cea3a5a 100644
--- a/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java
+++ b/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/MediaAudioTrackTest.java
@@ -140,7 +140,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorMono16MusicStream: " + res.mResultLog, res.mResult);
@@ -153,7 +153,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorStereo16MusicStream: " + res.mResultLog, res.mResult);
@@ -166,7 +166,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorMono16MusicStatic: " + res.mResultLog, res.mResult);
@@ -179,7 +179,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorStereo16MusicStatic: " + res.mResultLog, res.mResult);
@@ -196,7 +196,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorMono8MusicStream: " + res.mResultLog, res.mResult);
@@ -208,7 +208,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STREAM,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_INITIALIZED);
assertTrue("testConstructorStereo8MusicStream: " + res.mResultLog, res.mResult);
@@ -220,7 +220,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorMono8MusicStatic: " + res.mResultLog, res.mResult);
@@ -232,7 +232,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
TestResults res = constructorTestMultiSampleRate(
AudioManager.STREAM_MUSIC, AudioTrack.MODE_STATIC,
- AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_8BIT,
+ AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_8BIT,
AudioTrack.STATE_NO_STATIC_DATA);
assertTrue("testConstructorStereo8MusicStatic: " + res.mResultLog, res.mResult);
@@ -248,15 +248,15 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
public void testConstructorStreamType() throws Exception {
// constants for test
final int TYPE_TEST_SR = 22050;
- final int TYPE_TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TYPE_TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TYPE_TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TYPE_TEST_MODE = AudioTrack.MODE_STREAM;
final int[] STREAM_TYPES = { AudioManager.STREAM_ALARM, AudioManager.STREAM_BLUETOOTH_SCO,
AudioManager.STREAM_MUSIC, AudioManager.STREAM_NOTIFICATION,
AudioManager.STREAM_RING, AudioManager.STREAM_SYSTEM,
- AudioManager.STREAM_VOICE_CALL };
+ AudioManager.STREAM_VOICE_CALL, AudioManager.STREAM_DTMF, };
final String[] STREAM_NAMES = { "STREAM_ALARM", "STREAM_BLUETOOTH_SCO", "STREAM_MUSIC",
- "STREAM_NOTIFICATION", "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL" };
+ "STREAM_NOTIFICATION", "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL", "STREAM_DTMF" };
boolean localTestRes = true;
AudioTrack track = null;
@@ -303,7 +303,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterInit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -324,7 +324,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionIncrease";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -352,7 +352,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterFlush";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -382,7 +382,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterStop";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -413,7 +413,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionAfterPause";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -448,7 +448,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetStereoVolumeMax";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -474,7 +474,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetStereoVolumeMin";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -500,7 +500,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetStereoVolumeMid";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -526,7 +526,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRate";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -552,7 +552,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRateZero";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -574,7 +574,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRateTwiceOutputSR";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -601,7 +601,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetGetPlaybackRate";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -628,7 +628,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackRateUninit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -655,7 +655,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionPlaying";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -682,7 +682,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionStopped";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -710,7 +710,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionPaused";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -738,7 +738,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetPlaybackHeadPositionTooFar";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -770,7 +770,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsStream";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -794,7 +794,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsStartAfterEnd";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -818,7 +818,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsSuccess";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -842,7 +842,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsLoopTooLong";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -868,7 +868,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsStartTooFar";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -896,7 +896,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testSetLoopPointsEndTooFar";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STATIC;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -929,7 +929,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteOffsetTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -953,7 +953,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortOffsetTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -977,7 +977,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteSizeTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1001,7 +1001,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortSizeTooBig";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1025,7 +1025,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteNegativeOffset";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1049,7 +1049,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortNegativeOffset";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1073,7 +1073,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByteNegativeSize";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1097,7 +1097,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShortNegativeSize";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1121,7 +1121,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByte";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1145,7 +1145,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShort";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1169,7 +1169,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteByte8bit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1193,7 +1193,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constants for test
final String TEST_NAME = "testWriteShort8bit";
final int TEST_SR = 22050;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1221,7 +1221,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constant for test
final String TEST_NAME = "testGetMinBufferSizeTooLowSR";
final int TEST_SR = 3999;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
@@ -1238,7 +1238,7 @@ public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaF
// constant for testg
final String TEST_NAME = "testGetMinBufferSizeTooHighSR";
final int TEST_SR = 48001;
- final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
+ final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;