summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CleanSpec.mk52
-rw-r--r--camera/CameraParameters.cpp5
-rw-r--r--cmds/stagefright/Android.mk8
-rw-r--r--cmds/stagefright/SimplePlayer.cpp6
-rw-r--r--cmds/stagefright/stagefright.cpp6
-rw-r--r--include/camera/CameraParameters.h10
-rw-r--r--include/media/AudioRecord.h95
-rw-r--r--include/media/AudioSystem.h42
-rw-r--r--include/media/AudioTrack.h131
-rw-r--r--include/media/EffectsFactoryApi.h18
-rw-r--r--include/media/IAudioFlinger.h17
-rw-r--r--include/media/IAudioPolicyService.h5
-rw-r--r--include/media/SoundPool.h6
-rw-r--r--include/media/ToneGenerator.h2
-rw-r--r--include/media/nbaio/NBAIO.h14
-rw-r--r--include/media/stagefright/MediaDefs.h1
-rw-r--r--include/private/media/AudioTrackShared.h89
-rwxr-xr-xlibvideoeditor/lvpp/Android.mk1
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorPlayer.cpp4
-rw-r--r--media/libeffects/downmix/Android.mk2
-rw-r--r--media/libeffects/downmix/EffectDownmix.c23
-rw-r--r--media/libeffects/downmix/EffectDownmix.h3
-rw-r--r--media/libeffects/lvm/lib/Android.mk9
-rw-r--r--media/libeffects/lvm/wrapper/Android.mk11
-rw-r--r--media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp40
-rw-r--r--[-rwxr-xr-x]media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp28
-rw-r--r--[-rwxr-xr-x]media/libeffects/preprocessing/Android.mk2
-rw-r--r--[-rwxr-xr-x]media/libeffects/preprocessing/PreProcessing.cpp28
-rw-r--r--media/libeffects/testlibs/EffectEqualizer.cpp19
-rw-r--r--media/libeffects/testlibs/EffectReverb.c19
-rw-r--r--media/libeffects/testlibs/EffectReverb.h3
-rw-r--r--media/libeffects/visualizer/Android.mk2
-rw-r--r--media/libeffects/visualizer/EffectVisualizer.cpp22
-rw-r--r--media/libmedia/Android.mk2
-rw-r--r--media/libmedia/AudioEffect.cpp21
-rw-r--r--media/libmedia/AudioRecord.cpp189
-rw-r--r--media/libmedia/AudioSystem.cpp44
-rw-r--r--media/libmedia/AudioTrack.cpp601
-rw-r--r--media/libmedia/IAudioFlinger.cpp40
-rw-r--r--media/libmedia/IAudioFlingerClient.cpp3
-rw-r--r--media/libmedia/IAudioPolicyService.cpp9
-rw-r--r--media/libmedia/SoundPool.cpp6
-rw-r--r--media/libmedia/ToneGenerator.cpp2
-rw-r--r--media/libmedia/Visualizer.cpp6
-rw-r--r--media/libmedia_native/Android.mk11
-rw-r--r--media/libmediaplayerservice/Android.mk1
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp4
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.cpp2
-rw-r--r--media/libmediaplayerservice/nuplayer/RTSPSource.cpp7
-rw-r--r--media/libnbaio/NBAIO.cpp124
-rw-r--r--media/libstagefright/ACodec.cpp12
-rw-r--r--media/libstagefright/Android.mk1
-rw-r--r--media/libstagefright/AudioSource.cpp2
-rw-r--r--[-rwxr-xr-x]media/libstagefright/CameraSource.cpp0
-rw-r--r--[-rwxr-xr-x]media/libstagefright/MPEG4Writer.cpp0
-rw-r--r--media/libstagefright/MediaDefs.cpp1
-rw-r--r--[-rwxr-xr-x]media/libstagefright/OMXCodec.cpp2
-rw-r--r--[-rwxr-xr-x]media/libstagefright/SkipCutBuffer.cpp0
-rw-r--r--media/libstagefright/StagefrightMediaScanner.cpp2
-rw-r--r--media/libstagefright/ThrottledSource.cpp12
-rw-r--r--media/libstagefright/WAVExtractor.cpp61
-rw-r--r--media/libstagefright/codecs/gsm/Android.mk4
-rw-r--r--media/libstagefright/codecs/gsm/dec/Android.mk21
-rw-r--r--media/libstagefright/codecs/gsm/dec/MODULE_LICENSE_APACHE20
-rw-r--r--media/libstagefright/codecs/gsm/dec/NOTICE190
-rw-r--r--media/libstagefright/codecs/gsm/dec/SoftGSM.cpp269
-rw-r--r--media/libstagefright/codecs/gsm/dec/SoftGSM.h65
-rw-r--r--media/libstagefright/codecs/on2/dec/SoftVPX.cpp2
-rw-r--r--media/libstagefright/include/FragmentedMP4Parser.h2
-rw-r--r--media/libstagefright/include/ThrottledSource.h36
-rw-r--r--media/libstagefright/matroska/MatroskaExtractor.cpp74
-rw-r--r--media/libstagefright/mp4/FragmentedMP4Parser.cpp4
-rw-r--r--media/libstagefright/omx/SoftOMXPlugin.cpp1
-rw-r--r--media/libstagefright/wifi-display/sink/TunnelRenderer.cpp5
-rw-r--r--services/audioflinger/Android.mk9
-rw-r--r--services/audioflinger/AudioFlinger.cpp7856
-rw-r--r--services/audioflinger/AudioFlinger.h1584
-rw-r--r--services/audioflinger/AudioMixer.cpp56
-rw-r--r--services/audioflinger/AudioMixer.h33
-rw-r--r--services/audioflinger/AudioPolicyService.cpp21
-rw-r--r--services/audioflinger/AudioPolicyService.h10
-rw-r--r--services/audioflinger/Effects.cpp1684
-rw-r--r--services/audioflinger/Effects.h359
-rw-r--r--services/audioflinger/PlaybackTracks.h285
-rw-r--r--services/audioflinger/RecordTracks.h62
-rw-r--r--services/audioflinger/StateQueue.h68
-rw-r--r--services/audioflinger/Threads.cpp4426
-rw-r--r--services/audioflinger/Threads.h801
-rw-r--r--services/audioflinger/TrackBase.h139
-rw-r--r--services/audioflinger/Tracks.cpp1789
-rw-r--r--services/camera/libcameraservice/Android.mk4
-rw-r--r--services/camera/libcameraservice/Camera2Client.cpp13
-rw-r--r--services/camera/libcameraservice/Camera2Device.cpp14
-rw-r--r--services/camera/libcameraservice/CameraClient.cpp4
-rw-r--r--services/camera/libcameraservice/CameraHardwareInterface.h5
-rw-r--r--services/camera/libcameraservice/camera2/BurstCapture.cpp9
-rw-r--r--services/camera/libcameraservice/camera2/CallbackProcessor.cpp4
-rw-r--r--services/camera/libcameraservice/camera2/CaptureSequencer.cpp14
-rw-r--r--services/camera/libcameraservice/camera2/FrameProcessor.cpp5
-rw-r--r--services/camera/libcameraservice/camera2/JpegCompressor.cpp6
-rw-r--r--services/camera/libcameraservice/camera2/JpegProcessor.cpp5
-rw-r--r--services/camera/libcameraservice/camera2/Parameters.cpp19
-rw-r--r--services/camera/libcameraservice/camera2/Parameters.h2
-rw-r--r--services/camera/libcameraservice/camera2/StreamingProcessor.cpp3
-rw-r--r--services/camera/libcameraservice/camera2/ZslProcessor.cpp5
105 files changed, 11595 insertions, 10260 deletions
diff --git a/CleanSpec.mk b/CleanSpec.mk
new file mode 100644
index 0000000..e6d9ebf
--- /dev/null
+++ b/CleanSpec.mk
@@ -0,0 +1,52 @@
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If you don't need to do a full clean build but would like to touch
+# a file or delete some intermediate files, add a clean step to the end
+# of the list. These steps will only be run once, if they haven't been
+# run before.
+#
+# E.g.:
+# $(call add-clean-step, touch -c external/sqlite/sqlite3.h)
+# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates)
+#
+# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with
+# files that are missing or have been moved.
+#
+# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory.
+# Use $(OUT_DIR) to refer to the "out" directory.
+#
+# If you need to re-do something that's already mentioned, just copy
+# the command and add it to the bottom of the list. E.g., if a change
+# that you made last week required touching a file and a change you
+# made today requires touching the same file, just copy the old
+# touch step and add it to the end of the list.
+#
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+# For example:
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates)
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
+#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
+#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libmedia_native_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/libmedia_native.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/system/lib/libmedia_native.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libmedia_native.so)
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index fd91bf2..d10f2e5 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -90,6 +90,7 @@ const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint";
const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
+const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
const char CameraParameters::TRUE[] = "true";
const char CameraParameters::FALSE[] = "false";
@@ -167,6 +168,10 @@ const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
+// Values for light fx settings
+const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
+const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
+
CameraParameters::CameraParameters()
: mMap()
{
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 1247588..f60b1a4 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -8,7 +8,7 @@ LOCAL_SRC_FILES:= \
SineSource.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libmedia_native libutils libbinder libstagefright_foundation \
+ libstagefright libmedia libutils libbinder libstagefright_foundation \
libjpeg libgui
LOCAL_C_INCLUDES:= \
@@ -104,7 +104,7 @@ LOCAL_SRC_FILES:= \
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libgui \
- libstagefright_foundation libmedia libmedia_native libcutils
+ libstagefright_foundation libmedia libcutils
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -127,7 +127,7 @@ LOCAL_SRC_FILES:= \
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libmedia_native libgui libcutils libui
+ libmedia libgui libcutils libui
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -151,7 +151,7 @@ LOCAL_SRC_FILES:= \
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libmedia_native libgui libcutils libui
+ libmedia libgui libcutils libui
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 7636906..eb3296e 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -297,9 +297,11 @@ status_t SimplePlayer::onPrepare() {
AString mime;
CHECK(format->findString("mime", &mime));
+ bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
+
if (!haveAudio && !strncasecmp(mime.c_str(), "audio/", 6)) {
haveAudio = true;
- } else if (!haveVideo && !strncasecmp(mime.c_str(), "video/", 6)) {
+ } else if (!haveVideo && isVideo) {
haveVideo = true;
} else {
continue;
@@ -320,7 +322,7 @@ status_t SimplePlayer::onPrepare() {
err = state->mCodec->configure(
format,
- mNativeWindow->getSurfaceTextureClient(),
+ isVideo ? mNativeWindow->getSurfaceTextureClient() : NULL,
NULL /* crypto */,
0 /* flags */);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index b92a8a0..1e0e7f8 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -589,7 +589,7 @@ static void performSeekTest(const sp<MediaSource> &source) {
}
static void usage(const char *me) {
- fprintf(stderr, "usage: %s\n", me);
+ fprintf(stderr, "usage: %s [options] [input_filename]\n", me);
fprintf(stderr, " -h(elp)\n");
fprintf(stderr, " -a(udio)\n");
fprintf(stderr, " -n repetitions\n");
@@ -607,8 +607,8 @@ static void usage(const char *me) {
"(video only)\n");
fprintf(stderr, " -S allocate buffers from a surface\n");
fprintf(stderr, " -T allocate buffers from a surface texture\n");
- fprintf(stderr, " -d(ump) filename (raw stream data to a file)\n");
- fprintf(stderr, " -D(ump) filename (decoded PCM data to a file)\n");
+ fprintf(stderr, " -d(ump) output_filename (raw stream data to a file)\n");
+ fprintf(stderr, " -D(ump) output_filename (decoded PCM data to a file)\n");
}
static void dumpCodecProfiles(const sp<IOMX>& omx, bool queryDecoders) {
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index 5540d32..d521543 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -525,6 +525,10 @@ public:
// stream and record stabilized videos.
static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
+ // Supported modes for special effects with light.
+ // Example values: "lowlight,hdr".
+ static const char KEY_LIGHTFX[];
+
// Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
static const char TRUE[];
static const char FALSE[];
@@ -664,6 +668,12 @@ public:
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+ // Values for light special effects
+ // Low-light enhancement mode
+ static const char LIGHTFX_LOWLIGHT[];
+ // High-dynamic range mode
+ static const char LIGHTFX_HDR[];
+
private:
DefaultKeyedVector<String8,String8> mMap;
};
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 156c592..ae444c3 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -43,46 +43,43 @@ public:
*/
enum event_type {
EVENT_MORE_DATA = 0, // Request to read more data from PCM buffer.
- EVENT_OVERRUN = 1, // PCM buffer overrun occured.
+ EVENT_OVERRUN = 1, // PCM buffer overrun occurred.
EVENT_MARKER = 2, // Record head is at the specified marker position
// (See setMarkerPosition()).
EVENT_NEW_POS = 3, // Record head is at a new position
// (See setPositionUpdatePeriod()).
};
- /* Create Buffer on the stack and pass it to obtainBuffer()
- * and releaseBuffer().
+ /* Client should declare Buffer on the stack and pass address to obtainBuffer()
+ * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA.
*/
class Buffer
{
public:
- enum {
- MUTE = 0x00000001
- };
- uint32_t flags;
- int channelCount;
- audio_format_t format;
- size_t frameCount;
+ size_t frameCount; // number of sample frames corresponding to size;
+ // on input it is the number of frames available,
+ // on output is the number of frames actually drained
+
size_t size; // total size in bytes == frameCount * frameSize
union {
void* raw;
- short* i16;
- int8_t* i8;
+ short* i16; // signed 16-bit
+ int8_t* i8; // unsigned 8-bit, offset by 0x80
};
};
/* As a convenience, if a callback is supplied, a handler thread
* is automatically created with the appropriate priority. This thread
- * invokes the callback when a new buffer becomes ready or an overrun condition occurs.
+ * invokes the callback when a new buffer becomes ready or various conditions occur.
* Parameters:
*
* event: type of event notified (see enum AudioRecord::event_type).
* user: Pointer to context for use by the callback receiver.
* info: Pointer to optional parameter according to event type:
* - EVENT_MORE_DATA: pointer to AudioRecord::Buffer struct. The callback must not read
- * more bytes than indicated by 'size' field and update 'size' if less bytes are
- * read.
+ * more bytes than indicated by 'size' field and update 'size' if fewer bytes are
+ * consumed.
* - EVENT_OVERRUN: unused.
* - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
* - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
@@ -98,7 +95,7 @@ public:
* - BAD_VALUE: unsupported configuration
*/
- static status_t getMinFrameCount(int* frameCount,
+ static status_t getMinFrameCount(size_t* frameCount,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask);
@@ -108,7 +105,7 @@ public:
*/
AudioRecord();
- /* Creates an AudioRecord track and registers it with AudioFlinger.
+ /* Creates an AudioRecord object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
* Unspecified values are set to the audio hardware's current
* values.
@@ -120,10 +117,13 @@ public:
* format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
* channelMask: Channel mask.
- * frameCount: Total size of track PCM buffer in frames. This defines the
- * latency of the track.
+ * frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the
+ * latency of the track. The actual size selected by the AudioRecord could
+ * be larger if the requested size is not compatible with current audio HAL
+ * latency. Zero means to use a default value.
* cbf: Callback function. If not null, this function is called periodically
- * to provide new PCM data.
+ * to consume new PCM data.
* user: Context for use by the callback receiver.
* notificationFrames: The callback function is called each time notificationFrames PCM
* frames are ready in record track output buffer.
@@ -154,7 +154,7 @@ public:
* - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
* - PERMISSION_DENIED: recording is not allowed for the requesting process
- * */
+ */
status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
uint32_t sampleRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
@@ -168,14 +168,14 @@ public:
/* Result of constructing the AudioRecord. This must be checked
- * before using any AudioRecord API (except for set()), using
+ * before using any AudioRecord API (except for set()), because using
* an uninitialized AudioRecord produces undefined results.
* See set() method above for possible return codes.
*/
status_t initCheck() const;
- /* Returns this track's latency in milliseconds.
- * This includes the latency due to AudioRecord buffer size
+ /* Returns this track's estimated latency in milliseconds.
+ * This includes the latency due to AudioRecord buffer size,
* and audio hardware driver.
*/
uint32_t latency() const;
@@ -183,15 +183,15 @@ public:
/* getters, see constructor and set() */
audio_format_t format() const;
- int channelCount() const;
- uint32_t frameCount() const;
- size_t frameSize() const;
+ uint32_t channelCount() const;
+ size_t frameCount() const;
+ size_t frameSize() const { return mFrameSize; }
audio_source_t inputSource() const;
/* After it's created the track is not active. Call start() to
* make it active. If set, the callback will start being called.
- * if event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
+ * If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
* the specified event occurs on the specified trigger session.
*/
status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
@@ -199,12 +199,12 @@ public:
/* Stop a track. If set, the callback will cease being called and
* obtainBuffer returns STOPPED. Note that obtainBuffer() still works
- * and will fill up buffers until the pool is exhausted.
+ * and will drain buffers until the pool is exhausted.
*/
void stop();
bool stopped() const;
- /* get sample rate for this record track
+ /* Get sample rate for this record track in Hz.
*/
uint32_t getSampleRate() const;
@@ -258,7 +258,7 @@ public:
*/
status_t getPosition(uint32_t *position) const;
- /* returns a handle on the audio input used by this AudioRecord.
+ /* Returns a handle on the audio input used by this AudioRecord.
*
* Parameters:
* none.
@@ -268,7 +268,7 @@ public:
*/
audio_io_handle_t getInput() const;
- /* returns the audio session ID associated with this AudioRecord.
+ /* Returns the audio session ID associated with this AudioRecord.
*
* Parameters:
* none.
@@ -278,22 +278,30 @@ public:
*/
int getSessionId() const;
- /* obtains a buffer of "frameCount" frames. The buffer must be
- * filled entirely. If the track is stopped, obtainBuffer() returns
+ /* Obtains a buffer of "frameCount" frames. The buffer must be
+ * drained entirely, and then released with releaseBuffer().
+ * If the track is stopped, obtainBuffer() returns
* STOPPED instead of NO_ERROR as long as there are buffers available,
* at which point NO_MORE_BUFFERS is returned.
- * Buffers will be returned until the pool (buffercount())
+ * Buffers will be returned until the pool
* is exhausted, at which point obtainBuffer() will either block
* or return WOULD_BLOCK depending on the value of the "blocking"
* parameter.
+ *
+ * Interpretation of waitCount:
+ * +n limits wait time to n * WAIT_PERIOD_MS,
+ * -1 causes an (almost) infinite wait time,
+ * 0 non-blocking.
*/
enum {
- NO_MORE_BUFFERS = 0x80000001,
+ NO_MORE_BUFFERS = 0x80000001, // same name in AudioFlinger.h, ok to be different value
STOPPED = 1
};
status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+
+ /* Release an emptied buffer of "frameCount" frames for AudioFlinger to re-fill. */
void releaseBuffer(Buffer* audioBuffer);
@@ -302,16 +310,16 @@ public:
*/
ssize_t read(void* buffer, size_t size);
- /* Return the amount of input frames lost in the audio driver since the last call of this
+ /* Return the number of input frames lost in the audio driver since the last call of this
* function. Audio driver is expected to reset the value to 0 and restart counting upon
* returning the current value by this function call. Such loss typically occurs when the
* user space process is blocked longer than the capacity of audio driver buffers.
- * Unit: the number of input audio frames
+ * Units: the number of input audio frames.
*/
unsigned int getInputFramesLost() const;
private:
- /* copying audio tracks is not allowed */
+ /* copying audio record objects is not allowed */
AudioRecord(const AudioRecord& other);
AudioRecord& operator = (const AudioRecord& other);
@@ -343,8 +351,7 @@ private:
status_t openRecord_l(uint32_t sampleRate,
audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
audio_io_handle_t input);
audio_io_handle_t getInput_l();
status_t restoreRecord_l(audio_track_cblk_t*& cblk);
@@ -355,7 +362,7 @@ private:
bool mActive; // protected by mLock
// for client callback handler
- callback_t mCbf;
+ callback_t mCbf; // callback handler for events, or NULL
void* mUserData;
// for notification APIs
@@ -367,9 +374,10 @@ private:
uint32_t mUpdatePeriod; // in ms
// constant after constructor or set()
- uint32_t mFrameCount;
+ size_t mFrameCount;
audio_format_t mFormat;
uint8_t mChannelCount;
+ size_t mFrameSize; // app-level frame size == AudioFlinger frame size
audio_source_t mInputSource;
status_t mStatus;
uint32_t mLatency;
@@ -381,6 +389,7 @@ private:
sp<IAudioRecord> mAudioRecord;
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk;
+ void* mBuffers; // starting address of buffers in shared memory
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 49e1afc..126ef12 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -87,29 +87,26 @@ public:
static float linearToLog(int volume);
static int logToLinear(float volume);
- static status_t getOutputSamplingRate(int* samplingRate, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
- static status_t getOutputFrameCount(int* frameCount, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
- static status_t getOutputLatency(uint32_t* latency, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputSamplingRate(uint32_t* samplingRate,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputFrameCount(size_t* frameCount,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputLatency(uint32_t* latency,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
static status_t getSamplingRate(audio_io_handle_t output,
audio_stream_type_t streamType,
- int* samplingRate);
+ uint32_t* samplingRate);
// returns the number of frames per audio HAL write buffer. Corresponds to
// audio_stream->get_buffer_size()/audio_stream_frame_size()
static status_t getFrameCount(audio_io_handle_t output,
audio_stream_type_t stream,
- int* frameCount);
+ size_t* frameCount);
// returns the audio output stream latency in ms. Corresponds to
// audio_stream_out->get_latency()
static status_t getLatency(audio_io_handle_t output,
audio_stream_type_t stream,
uint32_t* latency);
- // DEPRECATED
- static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT);
-
- // DEPRECATED
- static status_t getOutputFrameCount(int* frameCount, int stream = AUDIO_STREAM_DEFAULT);
-
static bool routedToA2dpOutput(audio_stream_type_t streamType);
static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
@@ -126,10 +123,11 @@ public:
// - BAD_VALUE: invalid parameter
// NOTE: this feature is not supported on all hardware platforms and it is
// necessary to check returned status before using the returned values.
- static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
// return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
- static unsigned int getInputFramesLost(audio_io_handle_t ioHandle);
+ static size_t getInputFramesLost(audio_io_handle_t ioHandle);
static int newAudioSessionId();
static void acquireAudioSessionId(int audioSession);
@@ -147,8 +145,8 @@ public:
NUM_CONFIG_EVENTS
};
- // audio output descriptor used to cache output configurations in client process to avoid frequent calls
- // through IAudioFlinger
+ // audio output descriptor used to cache output configurations in client process to avoid
+ // frequent calls through IAudioFlinger
class OutputDescriptor {
public:
OutputDescriptor()
@@ -162,8 +160,8 @@ public:
};
// Events used to synchronize actions between audio sessions.
- // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until playback
- // is complete on another audio session.
+ // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
+ // playback is complete on another audio session.
// See definitions in MediaSyncEvent.java
enum sync_event_t {
SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event
@@ -183,8 +181,10 @@ public:
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
- static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address);
- static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address);
+ static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
+ const char *device_address);
+ static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+ const char *device_address);
static status_t setPhoneState(audio_mode_t state);
static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
@@ -237,8 +237,8 @@ public:
static const sp<IAudioPolicyService>& get_audio_policy_service();
// helpers for android.media.AudioManager.getProperty(), see description there for meaning
- static int32_t getPrimaryOutputSamplingRate();
- static int32_t getPrimaryOutputFrameCount();
+ static uint32_t getPrimaryOutputSamplingRate();
+ static size_t getPrimaryOutputFrameCount();
// ----------------------------------------------------------------------------
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 34108b3..f1b77ab 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -49,13 +49,17 @@ public:
};
/* Events used by AudioTrack callback function (audio_track_cblk_t).
+ * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
*/
enum event_type {
EVENT_MORE_DATA = 0, // Request to write more data to PCM buffer.
- EVENT_UNDERRUN = 1, // PCM buffer underrun occured.
- EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from loop start if loop count was not 0.
- EVENT_MARKER = 3, // Playback head is at the specified marker position (See setMarkerPosition()).
- EVENT_NEW_POS = 4, // Playback head is at a new position (See setPositionUpdatePeriod()).
+ EVENT_UNDERRUN = 1, // PCM buffer underrun occurred.
+ EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from
+ // loop start if loop count was not 0.
+ EVENT_MARKER = 3, // Playback head is at the specified marker position
+ // (See setMarkerPosition()).
+ EVENT_NEW_POS = 4, // Playback head is at a new position
+ // (See setPositionUpdatePeriod()).
EVENT_BUFFER_END = 5 // Playback head is at the end of the buffer.
};
@@ -66,14 +70,6 @@ public:
class Buffer
{
public:
- enum {
- MUTE = 0x00000001
- };
- uint32_t flags; // 0 or MUTE
- audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT
- // accessed directly by WebKit ANP callback
- int channelCount; // will be removed in the future, do not use
-
size_t frameCount; // number of sample frames corresponding to size;
// on input it is the number of frames desired,
// on output is the number of frames actually filled
@@ -114,7 +110,7 @@ public:
* - NO_INIT: audio server or audio hardware not initialized
*/
- static status_t getMinFrameCount(int* frameCount,
+ static status_t getMinFrameCount(size_t* frameCount,
audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
uint32_t sampleRate = 0);
@@ -123,7 +119,7 @@ public:
*/
AudioTrack();
- /* Creates an audio track and registers it with AudioFlinger.
+ /* Creates an AudioTrack object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
* Unspecified values are set to the audio hardware's current
* values.
@@ -137,12 +133,13 @@ public:
* 16 bits per sample).
* channelMask: Channel mask.
* frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the
* latency of the track. The actual size selected by the AudioTrack could be
* larger if the requested size is not compatible with current audio HAL
* latency. Zero means to use a default value.
* flags: See comments on audio_output_flags_t in <system/audio.h>.
* cbf: Callback function. If not null, this function is called periodically
- * to request new PCM data.
+ * to provide new PCM data.
* user: Context for use by the callback receiver.
* notificationFrames: The callback function is called each time notificationFrames PCM
* frames have been consumed from track input buffer.
@@ -162,18 +159,6 @@ public:
int notificationFrames = 0,
int sessionId = 0);
- // DEPRECATED
- explicit AudioTrack( int streamType,
- uint32_t sampleRate = 0,
- int format = AUDIO_FORMAT_DEFAULT,
- int channelMask = 0,
- int frameCount = 0,
- uint32_t flags = (uint32_t) AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = 0,
- void* user = 0,
- int notificationFrames = 0,
- int sessionId = 0);
-
/* Creates an audio track and registers it with AudioFlinger. With this constructor,
* the PCM data to be rendered by AudioTrack is passed in a shared memory buffer
* identified by the argument sharedBuffer. This prototype is for static buffer playback.
@@ -206,7 +191,7 @@ public:
* - INVALID_OPERATION: AudioTrack is already initialized
* - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
- * */
+ */
status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
uint32_t sampleRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
@@ -238,13 +223,13 @@ public:
audio_stream_type_t streamType() const;
audio_format_t format() const;
- int channelCount() const;
+ uint32_t channelCount() const;
uint32_t frameCount() const;
/* Return channelCount * (bit depth per channel / 8).
* channelCount is determined from channelMask, and bit depth comes from format.
*/
- size_t frameSize() const;
+ size_t frameSize() const { return mFrameSize; }
sp<IMemory>& sharedBuffer();
@@ -280,9 +265,14 @@ public:
/* Set volume for this track, mostly used for games' sound effects
* left and right volumes. Levels must be >= 0.0 and <= 1.0.
+ * This is the older API. New applications should use setVolume(float) when possible.
*/
status_t setVolume(float left, float right);
- void getVolume(float* left, float* right) const;
+
+ /* Set volume for all channels. This is the preferred API for new applications,
+ * especially for multi-channel content.
+ */
+ status_t setVolume(float volume);
/* Set the send level for this track. An auxiliary effect should be attached
* to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
@@ -290,9 +280,11 @@ public:
status_t setAuxEffectSendLevel(float level);
void getAuxEffectSendLevel(float* level) const;
- /* Set sample rate for this track, mostly used for games' sound effects
+ /* Set sample rate for this track in Hz, mostly used for games' sound effects
*/
- status_t setSampleRate(int sampleRate);
+ status_t setSampleRate(uint32_t sampleRate);
+
+ /* Return current sample rate in Hz, or 0 if unknown */
uint32_t getSampleRate() const;
/* Enables looping and sets the start and end points of looping.
@@ -312,7 +304,8 @@ public:
/* Sets marker position. When playback reaches the number of frames specified, a callback with
* event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
* notification callback.
- * If the AudioTrack has been opened with no callback function associated, the operation will fail.
+ * If the AudioTrack has been opened with no callback function associated, the operation will
+ * fail.
*
* Parameters:
*
@@ -330,7 +323,8 @@ public:
* a callback with event type EVENT_NEW_POS is called.
* Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
* callback.
- * If the AudioTrack has been opened with no callback function associated, the operation will fail.
+ * If the AudioTrack has been opened with no callback function associated, the operation will
+ * fail.
*
* Parameters:
*
@@ -359,7 +353,8 @@ public:
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation
* - INVALID_OPERATION: the AudioTrack is not stopped.
- * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack buffer
+ * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack
+ * buffer
*/
status_t setPosition(uint32_t position);
status_t getPosition(uint32_t *position);
@@ -413,7 +408,7 @@ public:
* If the track is stopped, obtainBuffer() returns
* STOPPED instead of NO_ERROR as long as there are buffers available,
* at which point NO_MORE_BUFFERS is returned.
- * Buffers will be returned until the pool (buffercount())
+ * Buffers will be returned until the pool
* is exhausted, at which point obtainBuffer() will either block
* or return WOULD_BLOCK depending on the value of the "blocking"
* parameter.
@@ -422,6 +417,18 @@ public:
* +n limits wait time to n * WAIT_PERIOD_MS,
* -1 causes an (almost) infinite wait time,
* 0 non-blocking.
+ *
+ * Buffer fields
+ * On entry:
+ * frameCount number of frames requested
+ * After error return:
+ * frameCount 0
+ * size 0
+ * raw undefined
+ * After successful return:
+ * frameCount actual number of frames available, <= number requested
+ * size actual number of bytes available
+ * raw pointer to the buffer
*/
enum {
@@ -482,11 +489,11 @@ protected:
// body of AudioTrackThread::threadLoop()
bool processAudioBuffer(const sp<AudioTrackThread>& thread);
+ // caller must hold lock on mLock for all _l methods
status_t createTrack_l(audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
audio_output_flags_t flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output);
@@ -502,37 +509,63 @@ protected:
float mVolume[2];
float mSendLevel;
- uint32_t mFrameCount;
+ size_t mFrameCount; // corresponds to current IAudioTrack
+ size_t mReqFrameCount; // frame count to request the next time a new
+ // IAudioTrack is needed
+
+ audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
- audio_track_cblk_t* mCblk;
- audio_format_t mFormat;
+ // Starting address of buffers in shared memory. If there is a shared buffer, mBuffers
+ // is the value of pointer() for the shared buffer, otherwise mBuffers points
+ // immediately after the control block. This address is for the mapping within client
+ // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
+ void* mBuffers;
+
+ audio_format_t mFormat; // as requested by client, not forced to 16-bit
audio_stream_type_t mStreamType;
uint8_t mChannelCount;
uint8_t mMuted;
uint8_t mReserved;
audio_channel_mask_t mChannelMask;
+
+ // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.
+ // For 8-bit PCM data, mFrameSizeAF is
+ // twice as large because data is expanded to 16-bit before being stored in buffer.
+ size_t mFrameSize; // app-level frame size
+ size_t mFrameSizeAF; // AudioFlinger frame size
+
status_t mStatus;
uint32_t mLatency;
bool mActive; // protected by mLock
callback_t mCbf; // callback handler for events, or NULL
- void* mUserData;
- uint32_t mNotificationFramesReq; // requested number of frames between each notification callback
- uint32_t mNotificationFramesAct; // actual number of frames between each notification callback
+ void* mUserData; // for client callback handler
+
+ // for notification APIs
+ uint32_t mNotificationFramesReq; // requested number of frames between each
+ // notification callback
+ uint32_t mNotificationFramesAct; // actual number of frames between each
+ // notification callback
sp<IMemory> mSharedBuffer;
int mLoopCount;
uint32_t mRemainingFrames;
- uint32_t mMarkerPosition;
+ uint32_t mMarkerPosition; // in frames
bool mMarkerReached;
- uint32_t mNewPosition;
- uint32_t mUpdatePeriod;
+ uint32_t mNewPosition; // in frames
+ uint32_t mUpdatePeriod; // in frames
+
bool mFlushed; // FIXME will be made obsolete by making flush() synchronous
audio_output_flags_t mFlags;
int mSessionId;
int mAuxEffectId;
+
+ // When locking both mLock and mCblk->lock, must lock in this order to avoid deadlock:
+ // 1. mLock
+ // 2. mCblk->lock
+ // It is OK to lock only mCblk->lock.
mutable Mutex mLock;
- status_t mRestoreStatus;
+
bool mIsTimed;
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index 65c26f4..b1ed7b0 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -74,7 +74,8 @@ int EffectQueryNumberEffects(uint32_t *pNumEffects);
// -ENOENT no more effect available
// -ENODEV factory failed to initialize
// -EINVAL invalid pDescriptor
-// -ENOSYS effect list has changed since last execution of EffectQueryNumberEffects()
+// -ENOSYS effect list has changed since last execution of
+// EffectQueryNumberEffects()
// *pDescriptor: updated with the effect descriptor.
//
////////////////////////////////////////////////////////////////////////////////
@@ -91,12 +92,12 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor);
//
// Input:
// pEffectUuid: pointer to the effect uuid.
-// sessionId: audio session to which this effect instance will be attached. All effects created
-// with the same session ID are connected in series and process the same signal stream.
-// Knowing that two effects are part of the same effect chain can help the library implement
-// some kind of optimizations.
-// ioId: identifies the output or input stream this effect is directed to at audio HAL. For future
-// use especially with tunneled HW accelerated effects
+// sessionId: audio session to which this effect instance will be attached. All effects
+// created with the same session ID are connected in series and process the same signal
+// stream. Knowing that two effects are part of the same effect chain can help the
+// library implement some kind of optimizations.
+// ioId: identifies the output or input stream this effect is directed to at audio HAL.
+// For future use especially with tunneled HW accelerated effects
//
// Input/Output:
// pHandle: address where to return the effect handle.
@@ -109,7 +110,8 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor);
// *pHandle: updated with the effect handle.
//
////////////////////////////////////////////////////////////////////////////////
-int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle);
+int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+ effect_handle_t *pHandle);
////////////////////////////////////////////////////////////////////////////////
//
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5170a87..9727143 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -61,8 +61,8 @@ public:
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
- track_flags_t flags,
+ size_t frameCount,
+ track_flags_t *flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid, // -1 means unused, otherwise must be valid non-0
@@ -75,7 +75,7 @@ public:
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
track_flags_t flags,
pid_t tid, // -1 means unused, otherwise must be valid non-0
int *sessionId,
@@ -123,7 +123,8 @@ public:
virtual status_t setParameters(audio_io_handle_t ioHandle,
const String8& keyValuePairs) = 0;
- virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const = 0;
+ virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
+ const = 0;
// register a current process for audio output change notifications
virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
@@ -156,10 +157,10 @@ public:
virtual status_t setVoiceVolume(float volume) = 0;
- virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+ virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
audio_io_handle_t output) const = 0;
- virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
+ virtual size_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
virtual int newAudioSessionId() = 0;
@@ -191,8 +192,8 @@ public:
// helpers for android.media.AudioManager.getProperty(), see description there for meaning
// FIXME move these APIs to AudioPolicy to permit a more accurate implementation
// that looks on primary device for a stream with fast flag, primary flag, or first one.
- virtual int32_t getPrimaryOutputSamplingRate() = 0;
- virtual int32_t getPrimaryOutputFrameCount() = 0;
+ virtual uint32_t getPrimaryOutputSamplingRate() = 0;
+ virtual size_t getPrimaryOutputFrameCount() = 0;
};
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index cc2e069..f5b0604 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -44,9 +44,10 @@ public:
audio_policy_dev_state_t state,
const char *device_address) = 0;
virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
- const char *device_address) = 0;
+ const char *device_address) = 0;
virtual status_t setPhoneState(audio_mode_t state) = 0;
- virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0;
+ virtual status_t setForceUse(audio_policy_force_use_t usage,
+ audio_policy_forced_cfg_t config) = 0;
virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 002b045..7bf3069 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -65,8 +65,10 @@ public:
sp<IMemory> getIMemory() { return mData; }
// hack
- void init(int numChannels, int sampleRate, audio_format_t format, size_t size, sp<IMemory> data ) {
- mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size; mData = data; }
+ void init(int numChannels, int sampleRate, audio_format_t format, size_t size,
+ sp<IMemory> data ) {
+ mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size;
+ mData = data; }
private:
void init();
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 29c8fd9..0529bcd 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -263,7 +263,7 @@ private:
unsigned short mLoopCounter; // Current tone loopback count
- int mSamplingRate; // AudioFlinger Sampling rate
+ uint32_t mSamplingRate; // AudioFlinger Sampling rate
AudioTrack *mpAudioTrack; // Pointer to audio track used for playback
Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API
Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index 81f42ed..f5d6eb5 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -45,17 +45,15 @@ enum {
// Negotiation of format is based on the data provider and data sink, or the data consumer and
// data source, exchanging prioritized arrays of offers and counter-offers until a single offer is
// mutually agreed upon. Each offer is an NBAIO_Format. For simplicity and performance,
-// NBAIO_Format is an enum that ties together the most important combinations of the various
+// NBAIO_Format is a typedef that ties together the most important combinations of the various
// attributes, rather than a struct with separate fields for format, sample rate, channel count,
// interleave, packing, alignment, etc. The reason is that NBAIO_Format tries to abstract out only
-// the combinations that are actually needed within AudioFligner. If the list of combinations grows
+// the combinations that are actually needed within AudioFlinger. If the list of combinations grows
// too large, then this decision should be re-visited.
-enum NBAIO_Format {
- Format_Invalid,
- Format_SR44_1_C2_I16, // 44.1 kHz PCM stereo interleaved 16-bit signed
- Format_SR48_C2_I16, // 48 kHz PCM stereo interleaved 16-bit signed
- Format_SR44_1_C1_I16, // 44.1 kHz PCM mono interleaved 16-bit signed
- Format_SR48_C1_I16, // 48 kHz PCM mono interleaved 16-bit signed
+// Sample rate and channel count are explicit, PCM interleaved 16-bit is assumed.
+typedef unsigned NBAIO_Format;
+enum {
+ Format_Invalid
};
// Return the frame size of an NBAIO_Format in bytes
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 457d5d7..81de6e4 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -42,6 +42,7 @@ extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5b133f3..48b6b21 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -27,33 +27,16 @@ namespace android {
// ----------------------------------------------------------------------------
// Maximum cumulated timeout milliseconds before restarting audioflinger thread
-#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP init time
+#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP
+ // init time
#define MAX_RUN_TIMEOUT_MS 1000
#define WAIT_PERIOD_MS 10
-#define RESTORE_TIMEOUT_MS 5000 // Maximum waiting time for a track to be restored
-
-#define CBLK_UNDERRUN_MSK 0x0001
-#define CBLK_UNDERRUN_ON 0x0001 // underrun (out) or overrrun (in) indication
-#define CBLK_UNDERRUN_OFF 0x0000 // no underrun
-#define CBLK_DIRECTION_MSK 0x0002
-#define CBLK_DIRECTION_OUT 0x0002 // this cblk is for an AudioTrack
-#define CBLK_DIRECTION_IN 0x0000 // this cblk is for an AudioRecord
-#define CBLK_FORCEREADY_MSK 0x0004
-#define CBLK_FORCEREADY_ON 0x0004 // track is considered ready immediately by AudioFlinger
-#define CBLK_FORCEREADY_OFF 0x0000 // track is ready when buffer full
-#define CBLK_INVALID_MSK 0x0008
-#define CBLK_INVALID_ON 0x0008 // track buffer is invalidated by AudioFlinger:
-#define CBLK_INVALID_OFF 0x0000 // must be re-created
-#define CBLK_DISABLED_MSK 0x0010
-#define CBLK_DISABLED_ON 0x0010 // track disabled by AudioFlinger due to underrun:
-#define CBLK_DISABLED_OFF 0x0000 // must be re-started
-#define CBLK_RESTORING_MSK 0x0020
-#define CBLK_RESTORING_ON 0x0020 // track is being restored after invalidation
-#define CBLK_RESTORING_OFF 0x0000 // by AudioFlinger
-#define CBLK_RESTORED_MSK 0x0040
-#define CBLK_RESTORED_ON 0x0040 // track has been restored after invalidation
-#define CBLK_RESTORED_OFF 0x0040 // by AudioFlinger
-#define CBLK_FAST 0x0080 // AudioFlinger successfully created a fast track
+
+#define CBLK_UNDERRUN 0x01 // set: underrun (out) or overrrun (in), clear: no underrun or overrun
+#define CBLK_FORCEREADY 0x02 // set: track is considered ready immediately by AudioFlinger,
+ // clear: track is ready when buffer full
+#define CBLK_INVALID 0x04 // track buffer invalidated by AudioFlinger, need to re-create
+#define CBLK_DISABLED 0x08 // track disabled by AudioFlinger due to underrun, need to re-start
// Important: do not add any virtual methods, including ~
struct audio_track_cblk_t
@@ -70,12 +53,14 @@ struct audio_track_cblk_t
uint32_t userBase;
uint32_t serverBase;
- // if there is a shared buffer, "buffers" is the value of pointer() for the shared
- // buffer, otherwise "buffers" points immediately after the control block
- void* buffers;
- uint32_t frameCount;
+ int mPad1; // unused, but preserves cache line alignment
- // Cache line boundary
+ size_t frameCount_; // used during creation to pass actual track buffer size
+ // from AudioFlinger to client, and not referenced again
+ // FIXME remove here and replace by createTrack() in/out parameter
+ // renamed to "_" to detect incorrect use
+
+ // Cache line boundary (32 bytes)
uint32_t loopStart;
uint32_t loopEnd; // read-only for server, read/write for client
@@ -91,16 +76,14 @@ public:
uint32_t sampleRate;
- // NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for
- // 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of
- // 16 bit because data is converted to 16 bit before being stored in buffer
+ uint8_t mPad2; // unused
// read-only for client, server writes once at initialization and is then read-only
- uint8_t frameSize; // would normally be size_t, but 8 bits is plenty
uint8_t mName; // normal tracks: track name, fast tracks: track index
// used by client only
- uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
+ uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting
+ // audioflinger
uint16_t waitTimeMs; // Cumulated wait time, used by client only
private:
@@ -114,13 +97,29 @@ public:
// Since the control block is always located in shared memory, this constructor
// is only used for placement new(). It is never used for regular new() or stack.
audio_track_cblk_t();
- uint32_t stepUser(uint32_t frameCount); // called by client only, where
- // client includes regular AudioTrack and AudioFlinger::PlaybackThread::OutputTrack
- bool stepServer(uint32_t frameCount); // called by server only
- void* buffer(uint32_t offset) const;
- uint32_t framesAvailable();
- uint32_t framesAvailable_l();
- uint32_t framesReady(); // called by server only
+
+ // called by client only, where client includes regular
+ // AudioTrack and AudioFlinger::PlaybackThread::OutputTrack
+ uint32_t stepUserIn(size_t stepCount, size_t frameCount) { return stepUser(stepCount, frameCount, false); }
+ uint32_t stepUserOut(size_t stepCount, size_t frameCount) { return stepUser(stepCount, frameCount, true); }
+
+ bool stepServer(size_t stepCount, size_t frameCount, bool isOut);
+
+ // if there is a shared buffer, "buffers" is the value of pointer() for the shared
+ // buffer, otherwise "buffers" points immediately after the control block
+ void* buffer(void *buffers, uint32_t frameSize, uint32_t offset) const;
+
+ uint32_t framesAvailableIn(size_t frameCount)
+ { return framesAvailable(frameCount, false); }
+ uint32_t framesAvailableOut(size_t frameCount)
+ { return framesAvailable(frameCount, true); }
+ uint32_t framesAvailableIn_l(size_t frameCount)
+ { return framesAvailable_l(frameCount, false); }
+ uint32_t framesAvailableOut_l(size_t frameCount)
+ { return framesAvailable_l(frameCount, true); }
+ uint32_t framesReadyIn() { return framesReady(false); }
+ uint32_t framesReadyOut() { return framesReady(true); }
+
bool tryLock();
// No barriers on the following operations, so the ordering of loads/stores
@@ -146,6 +145,12 @@ public:
return mVolumeLR;
}
+private:
+ // isOut == true means AudioTrack, isOut == false means AudioRecord
+ uint32_t stepUser(size_t stepCount, size_t frameCount, bool isOut);
+ uint32_t framesAvailable(size_t frameCount, bool isOut);
+ uint32_t framesAvailable_l(size_t frameCount, bool isOut);
+ uint32_t framesReady(bool isOut);
};
diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk
index 0ed7e6c..778c5ac 100755
--- a/libvideoeditor/lvpp/Android.mk
+++ b/libvideoeditor/lvpp/Android.mk
@@ -54,7 +54,6 @@ LOCAL_SHARED_LIBRARIES := \
libGLESv2 \
libgui \
libmedia \
- libmedia_native \
libdrmframework \
libstagefright \
libstagefright_foundation \
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
index fc9fb49..a47fc15 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -406,8 +406,8 @@ status_t VideoEditorPlayer::VeAudioOutput::open(
}
ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
if (mTrack) close();
- int afSampleRate;
- int afFrameCount;
+ uint32_t afSampleRate;
+ size_t afFrameCount;
int frameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) !=
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
index 95ca6fd..3052ad9 100644
--- a/media/libeffects/downmix/Android.mk
+++ b/media/libeffects/downmix/Android.mk
@@ -25,4 +25,6 @@ LOCAL_C_INCLUDES := \
LOCAL_PRELINK_MODULE := false
+LOCAL_CFLAGS += -fvisibility=hidden
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 5bf052a..f17a6e8 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -58,13 +58,13 @@ const struct effect_interface_s gDownmixInterface = {
NULL /* no process_reverse function, no reference stream needed */
};
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
tag : AUDIO_EFFECT_LIBRARY_TAG,
version : EFFECT_LIBRARY_API_VERSION,
name : "Downmix Library",
implementor : "The Android Open Source Project",
- query_num_effects : DownmixLib_QueryNumberEffects,
- query_effect : DownmixLib_QueryEffect,
create_effect : DownmixLib_Create,
release_effect : DownmixLib_Release,
get_descriptor : DownmixLib_GetDescriptor,
@@ -159,25 +159,6 @@ void Downmix_testIndexComputation(uint32_t mask) {
/*--- Effect Library Interface Implementation ---*/
-int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects) {
- ALOGV("DownmixLib_QueryNumberEffects()");
- *pNumEffects = kNbEffects;
- return 0;
-}
-
-int32_t DownmixLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) {
- ALOGV("DownmixLib_QueryEffect() index=%d", index);
- if (pDescriptor == NULL) {
- return -EINVAL;
- }
- if (index >= (uint32_t)kNbEffects) {
- return -EINVAL;
- }
- memcpy(pDescriptor, gDescriptors[index], sizeof(effect_descriptor_t));
- return 0;
-}
-
-
int32_t DownmixLib_Create(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h
index be3ca3f..cb6b957 100644
--- a/media/libeffects/downmix/EffectDownmix.h
+++ b/media/libeffects/downmix/EffectDownmix.h
@@ -65,9 +65,6 @@ const uint32_t kUnsupported =
* Effect API
*------------------------------------
*/
-int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects);
-int32_t DownmixLib_QueryEffect(uint32_t index,
- effect_descriptor_t *pDescriptor);
int32_t DownmixLib_Create(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
diff --git a/media/libeffects/lvm/lib/Android.mk b/media/libeffects/lvm/lib/Android.mk
index f49267e..bb56c75 100644
--- a/media/libeffects/lvm/lib/Android.mk
+++ b/media/libeffects/lvm/lib/Android.mk
@@ -105,8 +105,6 @@ LOCAL_SRC_FILES:= \
LOCAL_MODULE:= libmusicbundle
-
-
LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/Eq/lib \
$(LOCAL_PATH)/Eq/src \
@@ -121,8 +119,12 @@ LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/StereoWidening/src \
$(LOCAL_PATH)/StereoWidening/lib
+LOCAL_CFLAGS += -fvisibility=hidden
+
include $(BUILD_STATIC_LIBRARY)
+
+
# Reverb library
include $(CLEAR_VARS)
@@ -168,12 +170,11 @@ LOCAL_SRC_FILES:= \
LOCAL_MODULE:= libreverb
-
-
LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/Reverb/lib \
$(LOCAL_PATH)/Reverb/src \
$(LOCAL_PATH)/Common/lib \
$(LOCAL_PATH)/Common/src
+LOCAL_CFLAGS += -fvisibility=hidden
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libeffects/lvm/wrapper/Android.mk b/media/libeffects/lvm/wrapper/Android.mk
index 4313424..f1af389 100644
--- a/media/libeffects/lvm/wrapper/Android.mk
+++ b/media/libeffects/lvm/wrapper/Android.mk
@@ -9,28 +9,27 @@ LOCAL_ARM_MODE := arm
LOCAL_SRC_FILES:= \
Bundle/EffectBundle.cpp
+LOCAL_CFLAGS += -fvisibility=hidden
+
LOCAL_MODULE:= libbundlewrapper
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx
-
-
LOCAL_STATIC_LIBRARIES += libmusicbundle
LOCAL_SHARED_LIBRARIES := \
libcutils \
libdl
-
LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/Bundle \
$(LOCAL_PATH)/../lib/Common/lib/ \
$(LOCAL_PATH)/../lib/Bundle/lib/ \
$(call include-path-for, audio-effects)
-
include $(BUILD_SHARED_LIBRARY)
+
# reverb wrapper
include $(CLEAR_VARS)
@@ -39,12 +38,12 @@ LOCAL_ARM_MODE := arm
LOCAL_SRC_FILES:= \
Reverb/EffectReverb.cpp
+LOCAL_CFLAGS += -fvisibility=hidden
+
LOCAL_MODULE:= libreverbwrapper
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx
-
-
LOCAL_STATIC_LIBRARIES += libreverb
LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index d706c2d..94b9acf 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -158,42 +158,6 @@ int Volume_getParameter (EffectContext *pContext,
int Effect_setEnabled(EffectContext *pContext, bool enabled);
/* Effect Library Interface Implementation */
-extern "C" int EffectQueryNumberEffects(uint32_t *pNumEffects){
- ALOGV("\n\tEffectQueryNumberEffects start");
- *pNumEffects = 4;
- ALOGV("\tEffectQueryNumberEffects creating %d effects", *pNumEffects);
- ALOGV("\tEffectQueryNumberEffects end\n");
- return 0;
-} /* end EffectQueryNumberEffects */
-
-extern "C" int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor){
- ALOGV("\n\tEffectQueryEffect start");
- ALOGV("\tEffectQueryEffect processing index %d", index);
-
- if (pDescriptor == NULL){
- ALOGV("\tLVM_ERROR : EffectQueryEffect was passed NULL pointer");
- return -EINVAL;
- }
- if (index > 3){
- ALOGV("\tLVM_ERROR : EffectQueryEffect index out of range %d", index);
- return -ENOENT;
- }
- if(index == LVM_BASS_BOOST){
- ALOGV("\tEffectQueryEffect processing LVM_BASS_BOOST");
- *pDescriptor = gBassBoostDescriptor;
- }else if(index == LVM_VIRTUALIZER){
- ALOGV("\tEffectQueryEffect processing LVM_VIRTUALIZER");
- *pDescriptor = gVirtualizerDescriptor;
- } else if(index == LVM_EQUALIZER){
- ALOGV("\tEffectQueryEffect processing LVM_EQUALIZER");
- *pDescriptor = gEqualizerDescriptor;
- } else if(index == LVM_VOLUME){
- ALOGV("\tEffectQueryEffect processing LVM_VOLUME");
- *pDescriptor = gVolumeDescriptor;
- }
- ALOGV("\tEffectQueryEffect end\n");
- return 0;
-} /* end EffectQueryEffect */
extern "C" int EffectCreate(const effect_uuid_t *uuid,
int32_t sessionId,
@@ -3299,13 +3263,13 @@ const struct effect_interface_s gLvmEffectInterface = {
NULL,
}; /* end gLvmEffectInterface */
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
tag : AUDIO_EFFECT_LIBRARY_TAG,
version : EFFECT_LIBRARY_API_VERSION,
name : "Effect Bundle Library",
implementor : "NXP Software Ltd.",
- query_num_effects : android::EffectQueryNumberEffects,
- query_effect : android::EffectQueryEffect,
create_effect : android::EffectCreate,
release_effect : android::EffectRelease,
get_descriptor : android::EffectGetDescriptor,
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index 941d651..87e2c85 100755..100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -186,30 +186,6 @@ int Reverb_getParameter (ReverbContext *pContext,
int Reverb_LoadPreset (ReverbContext *pContext);
/* Effect Library Interface Implementation */
-extern "C" int EffectQueryNumberEffects(uint32_t *pNumEffects){
- ALOGV("\n\tEffectQueryNumberEffects start");
- *pNumEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
- ALOGV("\tEffectQueryNumberEffects creating %d effects", *pNumEffects);
- ALOGV("\tEffectQueryNumberEffects end\n");
- return 0;
-} /* end EffectQueryNumberEffects */
-
-extern "C" int EffectQueryEffect(uint32_t index,
- effect_descriptor_t *pDescriptor){
- ALOGV("\n\tEffectQueryEffect start");
- ALOGV("\tEffectQueryEffect processing index %d", index);
- if (pDescriptor == NULL){
- ALOGV("\tLVM_ERROR : EffectQueryEffect was passed NULL pointer");
- return -EINVAL;
- }
- if (index >= sizeof(gDescriptors) / sizeof(const effect_descriptor_t *)) {
- ALOGV("\tLVM_ERROR : EffectQueryEffect index out of range %d", index);
- return -ENOENT;
- }
- *pDescriptor = *gDescriptors[index];
- ALOGV("\tEffectQueryEffect end\n");
- return 0;
-} /* end EffectQueryEffect */
extern "C" int EffectCreate(const effect_uuid_t *uuid,
int32_t sessionId,
@@ -2170,13 +2146,13 @@ const struct effect_interface_s gReverbInterface = {
NULL,
}; /* end gReverbInterface */
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
tag : AUDIO_EFFECT_LIBRARY_TAG,
version : EFFECT_LIBRARY_API_VERSION,
name : "Reverb Library",
implementor : "NXP Software Ltd.",
- query_num_effects : android::EffectQueryNumberEffects,
- query_effect : android::EffectQueryEffect,
create_effect : android::EffectCreate,
release_effect : android::EffectRelease,
get_descriptor : android::EffectGetDescriptor,
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index c13b9d4..dfa1711 100755..100644
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -29,4 +29,6 @@ else
LOCAL_SHARED_LIBRARIES += libdl
endif
+LOCAL_CFLAGS += -fvisibility=hidden
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 597866a..25586e8 100755..100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -1818,30 +1818,6 @@ const struct effect_interface_s sEffectInterfaceReverse = {
// Effect Library Interface Implementation
//------------------------------------------------------------------------------
-int PreProcessingLib_QueryNumberEffects(uint32_t *pNumEffects)
-{
- if (PreProc_Init() != 0) {
- return sInitStatus;
- }
- if (pNumEffects == NULL) {
- return -EINVAL;
- }
- *pNumEffects = PREPROC_NUM_EFFECTS;
- return sInitStatus;
-}
-
-int PreProcessingLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor)
-{
- if (PreProc_Init() != 0) {
- return sInitStatus;
- }
- if (index >= PREPROC_NUM_EFFECTS) {
- return -EINVAL;
- }
- *pDescriptor = *sDescriptors[index];
- return 0;
-}
-
int PreProcessingLib_Create(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
@@ -1913,13 +1889,13 @@ int PreProcessingLib_GetDescriptor(const effect_uuid_t *uuid,
return 0;
}
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
tag : AUDIO_EFFECT_LIBRARY_TAG,
version : EFFECT_LIBRARY_API_VERSION,
name : "Audio Preprocessing Library",
implementor : "The Android Open Source Project",
- query_num_effects : PreProcessingLib_QueryNumberEffects,
- query_effect : PreProcessingLib_QueryEffect,
create_effect : PreProcessingLib_Create,
release_effect : PreProcessingLib_Release,
get_descriptor : PreProcessingLib_GetDescriptor
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index 90ebe1f..c35453b 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -123,23 +123,6 @@ int Equalizer_setParameter(AudioEqualizer * pEqualizer, int32_t *pParam, void *p
//--- Effect Library Interface Implementation
//
-extern "C" int EffectQueryNumberEffects(uint32_t *pNumEffects) {
- *pNumEffects = 1;
- return 0;
-} /* end EffectQueryNumberEffects */
-
-extern "C" int EffectQueryEffect(uint32_t index,
- effect_descriptor_t *pDescriptor) {
- if (pDescriptor == NULL) {
- return -EINVAL;
- }
- if (index > 0) {
- return -EINVAL;
- }
- *pDescriptor = gEqualizerDescriptor;
- return 0;
-} /* end EffectQueryNext */
-
extern "C" int EffectCreate(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
@@ -771,8 +754,6 @@ audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
version : EFFECT_LIBRARY_API_VERSION,
name : "Test Equalizer Library",
implementor : "The Android Open Source Project",
- query_num_effects : android::EffectQueryNumberEffects,
- query_effect : android::EffectQueryEffect,
create_effect : android::EffectCreate,
release_effect : android::EffectRelease,
get_descriptor : android::EffectGetDescriptor,
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index a87a834..c37f392 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -94,23 +94,6 @@ static const effect_descriptor_t * const gDescriptors[] = {
/*--- Effect Library Interface Implementation ---*/
-int EffectQueryNumberEffects(uint32_t *pNumEffects) {
- *pNumEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
- return 0;
-}
-
-int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) {
- if (pDescriptor == NULL) {
- return -EINVAL;
- }
- if (index >= sizeof(gDescriptors) / sizeof(const effect_descriptor_t *)) {
- return -EINVAL;
- }
- memcpy(pDescriptor, gDescriptors[index],
- sizeof(effect_descriptor_t));
- return 0;
-}
-
int EffectCreate(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
@@ -2222,8 +2205,6 @@ audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
.version = EFFECT_LIBRARY_API_VERSION,
.name = "Test Equalizer Library",
.implementor = "The Android Open Source Project",
- .query_num_effects = EffectQueryNumberEffects,
- .query_effect = EffectQueryEffect,
.create_effect = EffectCreate,
.release_effect = EffectRelease,
.get_descriptor = EffectGetDescriptor,
diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h
index 1fb14a7..e5248fe 100644
--- a/media/libeffects/testlibs/EffectReverb.h
+++ b/media/libeffects/testlibs/EffectReverb.h
@@ -300,9 +300,6 @@ typedef struct reverb_module_s {
* Effect API
*------------------------------------
*/
-int EffectQueryNumberEffects(uint32_t *pNumEffects);
-int EffectQueryEffect(uint32_t index,
- effect_descriptor_t *pDescriptor);
int EffectCreate(const effect_uuid_t *effectUID,
int32_t sessionId,
int32_t ioId,
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index 76b5110..49cf4fa 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -6,7 +6,7 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
EffectVisualizer.cpp
-LOCAL_CFLAGS+= -O2
+LOCAL_CFLAGS+= -O2 -fvisibility=hidden
LOCAL_SHARED_LIBRARIES := \
libcutils \
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 44baf93..e7eccf1 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -177,23 +177,6 @@ int Visualizer_init(VisualizerContext *pContext)
//--- Effect Library Interface Implementation
//
-int VisualizerLib_QueryNumberEffects(uint32_t *pNumEffects) {
- *pNumEffects = 1;
- return 0;
-}
-
-int VisualizerLib_QueryEffect(uint32_t index,
- effect_descriptor_t *pDescriptor) {
- if (pDescriptor == NULL) {
- return -EINVAL;
- }
- if (index > 0) {
- return -EINVAL;
- }
- *pDescriptor = gVisualizerDescriptor;
- return 0;
-}
-
int VisualizerLib_Create(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
@@ -574,14 +557,13 @@ const struct effect_interface_s gVisualizerInterface = {
NULL,
};
-
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
tag : AUDIO_EFFECT_LIBRARY_TAG,
version : EFFECT_LIBRARY_API_VERSION,
name : "Visualizer Library",
implementor : "The Android Open Source Project",
- query_num_effects : VisualizerLib_QueryNumberEffects,
- query_effect : VisualizerLib_QueryEffect,
create_effect : VisualizerLib_Create,
release_effect : VisualizerLib_Release,
get_descriptor : VisualizerLib_GetDescriptor,
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 54666fb..f2b6441 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -54,7 +54,7 @@ LOCAL_SRC_FILES:= \
LOCAL_SHARED_LIBRARIES := \
libui libcutils libutils libbinder libsonivox libicuuc libexpat \
libcamera_client libstagefright_foundation \
- libgui libdl libaudioutils libmedia_native
+ libgui libdl libaudioutils
LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 680604b..3317d57 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -152,7 +152,8 @@ status_t AudioEffect::set(const effect_uuid_t *type,
mCblk->buffer = (uint8_t *)mCblk + bufOffset;
iEffect->asBinder()->linkToDeath(mIEffectClient);
- ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, mStatus, mEnabled);
+ ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId,
+ mStatus, mEnabled);
return mStatus;
}
@@ -266,9 +267,11 @@ status_t AudioEffect::setParameter(effect_param_t *param)
uint32_t size = sizeof(int);
uint32_t psize = ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize;
- ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data, (param->psize == 8) ? *((int *)param->data + 1): -1);
+ ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data,
+ (param->psize == 8) ? *((int *)param->data + 1): -1);
- return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size, &param->status);
+ return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size,
+ &param->status);
}
status_t AudioEffect::setParameterDeferred(effect_param_t *param)
@@ -321,11 +324,14 @@ status_t AudioEffect::getParameter(effect_param_t *param)
return BAD_VALUE;
}
- ALOGV("getParameter: param: %d, param2: %d", *(int *)param->data, (param->psize == 8) ? *((int *)param->data + 1): -1);
+ ALOGV("getParameter: param: %d, param2: %d", *(int *)param->data,
+ (param->psize == 8) ? *((int *)param->data + 1): -1);
- uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize;
+ uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) +
+ param->vsize;
- return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param, &psize, param);
+ return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param,
+ &psize, param);
}
@@ -346,7 +352,8 @@ void AudioEffect::binderDied()
void AudioEffect::controlStatusChanged(bool controlGranted)
{
- ALOGV("controlStatusChanged %p control %d callback %p mUserData %p", this, controlGranted, mCbf, mUserData);
+ ALOGV("controlStatusChanged %p control %d callback %p mUserData %p", this, controlGranted, mCbf,
+ mUserData);
if (controlGranted) {
if (mStatus == ALREADY_EXISTS) {
mStatus = NO_ERROR;
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 8ea6306..c2ef68c 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -36,7 +36,7 @@ namespace android {
// static
status_t AudioRecord::getMinFrameCount(
- int* frameCount,
+ size_t* frameCount,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask)
@@ -54,7 +54,7 @@ status_t AudioRecord::getMinFrameCount(
}
if (size == 0) {
- ALOGE("Unsupported configuration: sampleRate %d, format %d, channelMask %#x",
+ ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
sampleRate, format, channelMask);
return BAD_VALUE;
}
@@ -63,7 +63,7 @@ status_t AudioRecord::getMinFrameCount(
size <<= 1;
if (audio_is_linear_pcm(format)) {
- int channelCount = popcount(channelMask);
+ uint32_t channelCount = popcount(channelMask);
size /= channelCount * audio_bytes_per_sample(format);
}
@@ -119,15 +119,22 @@ status_t AudioRecord::set(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ int frameCountInt,
callback_t cbf,
void* user,
int notificationFrames,
bool threadCanCallJava,
int sessionId)
{
+ // FIXME "int" here is legacy and will be replaced by size_t later
+ if (frameCountInt < 0) {
+ ALOGE("Invalid frame count %d", frameCountInt);
+ return BAD_VALUE;
+ }
+ size_t frameCount = frameCountInt;
- ALOGV("set(): sampleRate %d, channelMask %#x, frameCount %d",sampleRate, channelMask, frameCount);
+ ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask,
+ frameCount);
AutoMutex lock(mLock);
@@ -155,8 +162,9 @@ status_t AudioRecord::set(
if (!audio_is_input_channel(channelMask)) {
return BAD_VALUE;
}
-
- int channelCount = popcount(channelMask);
+ mChannelMask = channelMask;
+ uint32_t channelCount = popcount(channelMask);
+ mChannelCount = channelCount;
if (sessionId == 0 ) {
mSessionId = AudioSystem::newAudioSessionId();
@@ -176,7 +184,7 @@ status_t AudioRecord::set(
}
// validate framecount
- int minFrameCount = 0;
+ size_t minFrameCount = 0;
status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask);
if (status != NO_ERROR) {
return status;
@@ -194,8 +202,7 @@ status_t AudioRecord::set(
}
// create the IAudioRecord
- status = openRecord_l(sampleRate, format, channelMask,
- frameCount, input);
+ status = openRecord_l(sampleRate, format, frameCount, input);
if (status != NO_ERROR) {
return status;
}
@@ -209,9 +216,14 @@ status_t AudioRecord::set(
mFormat = format;
// Update buffer size in case it has been limited by AudioFlinger during track creation
- mFrameCount = mCblk->frameCount;
- mChannelCount = (uint8_t)channelCount;
- mChannelMask = channelMask;
+ mFrameCount = mCblk->frameCount_;
+
+ if (audio_is_linear_pcm(mFormat)) {
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ } else {
+ mFrameSize = sizeof(uint8_t);
+ }
+
mActive = false;
mCbf = cbf;
mNotificationFrames = notificationFrames;
@@ -247,25 +259,16 @@ audio_format_t AudioRecord::format() const
return mFormat;
}
-int AudioRecord::channelCount() const
+uint32_t AudioRecord::channelCount() const
{
return mChannelCount;
}
-uint32_t AudioRecord::frameCount() const
+size_t AudioRecord::frameCount() const
{
return mFrameCount;
}
-size_t AudioRecord::frameSize() const
-{
- if (audio_is_linear_pcm(mFormat)) {
- return channelCount()*audio_bytes_per_sample(mFormat);
- } else {
- return sizeof(uint8_t);
- }
-}
-
audio_source_t AudioRecord::inputSource() const
{
return mInputSource;
@@ -291,17 +294,19 @@ status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
mActive = true;
cblk->lock.lock();
- if (!(cblk->flags & CBLK_INVALID_MSK)) {
+ if (!(cblk->flags & CBLK_INVALID)) {
cblk->lock.unlock();
ALOGV("mAudioRecord->start()");
ret = mAudioRecord->start(event, triggerSession);
cblk->lock.lock();
if (ret == DEAD_OBJECT) {
- android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
+ android_atomic_or(CBLK_INVALID, &cblk->flags);
}
}
- if (cblk->flags & CBLK_INVALID_MSK) {
- ret = restoreRecord_l(cblk);
+ if (cblk->flags & CBLK_INVALID) {
+ audio_track_cblk_t* temp = cblk;
+ ret = restoreRecord_l(temp);
+ cblk = temp;
}
cblk->lock.unlock();
if (ret == NO_ERROR) {
@@ -425,13 +430,13 @@ unsigned int AudioRecord::getInputFramesLost() const
status_t AudioRecord::openRecord_l(
uint32_t sampleRate,
audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
audio_io_handle_t input)
{
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
+ ALOGE("Could not get audioflinger");
return NO_INIT;
}
@@ -441,7 +446,7 @@ status_t AudioRecord::openRecord_l(
int originalSessionId = mSessionId;
sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), input,
sampleRate, format,
- channelMask,
+ mChannelMask,
frameCount,
IAudioFlinger::TRACK_DEFAULT,
tid,
@@ -454,20 +459,20 @@ status_t AudioRecord::openRecord_l(
ALOGE("AudioFlinger could not create record track, status: %d", status);
return status;
}
- sp<IMemory> cblk = record->getCblk();
- if (cblk == 0) {
+ sp<IMemory> iMem = record->getCblk();
+ if (iMem == 0) {
ALOGE("Could not get control block");
return NO_INIT;
}
mAudioRecord.clear();
mAudioRecord = record;
mCblkMemory.clear();
- mCblkMemory = cblk;
- mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());
- mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
- android_atomic_and(~CBLK_DIRECTION_MSK, &mCblk->flags);
- mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
- mCblk->waitTimeMs = 0;
+ mCblkMemory = iMem;
+ audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
+ mCblk = cblk;
+ mBuffers = (char*)cblk + sizeof(audio_track_cblk_t);
+ cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
+ cblk->waitTimeMs = 0;
return NO_ERROR;
}
@@ -483,7 +488,7 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
audioBuffer->frameCount = 0;
audioBuffer->size = 0;
- uint32_t framesReady = cblk->framesReady();
+ uint32_t framesReady = cblk->framesReadyIn();
if (framesReady == 0) {
cblk->lock.lock();
@@ -498,17 +503,22 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
cblk->lock.unlock();
return WOULD_BLOCK;
}
- if (!(cblk->flags & CBLK_INVALID_MSK)) {
+ if (!(cblk->flags & CBLK_INVALID)) {
mLock.unlock();
+ // this condition is in shared memory, so if IAudioRecord and control block
+ // are replaced due to mediaserver death or IAudioRecord invalidation then
+ // cv won't be signalled, but fortunately the timeout will limit the wait
result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
cblk->lock.unlock();
mLock.lock();
if (!mActive) {
return status_t(STOPPED);
}
+ // IAudioRecord may have been re-created while mLock was unlocked
+ cblk = mCblk;
cblk->lock.lock();
}
- if (cblk->flags & CBLK_INVALID_MSK) {
+ if (cblk->flags & CBLK_INVALID) {
goto create_new_record;
}
if (CC_UNLIKELY(result != NO_ERROR)) {
@@ -521,9 +531,11 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
cblk->lock.lock();
if (result == DEAD_OBJECT) {
- android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
+ android_atomic_or(CBLK_INVALID, &cblk->flags);
create_new_record:
- result = AudioRecord::restoreRecord_l(cblk);
+ audio_track_cblk_t* temp = cblk;
+ result = AudioRecord::restoreRecord_l(temp);
+ cblk = temp;
}
if (result != NO_ERROR) {
ALOGW("obtainBuffer create Track error %d", result);
@@ -539,7 +551,7 @@ create_new_record:
}
// read the server count again
start_loop_here:
- framesReady = cblk->framesReady();
+ framesReady = cblk->framesReadyIn();
}
cblk->lock.unlock();
}
@@ -553,18 +565,15 @@ create_new_record:
}
uint32_t u = cblk->user;
- uint32_t bufferEnd = cblk->userBase + cblk->frameCount;
+ uint32_t bufferEnd = cblk->userBase + mFrameCount;
if (framesReq > bufferEnd - u) {
framesReq = bufferEnd - u;
}
- audioBuffer->flags = 0;
- audioBuffer->channelCount= mChannelCount;
- audioBuffer->format = mFormat;
audioBuffer->frameCount = framesReq;
- audioBuffer->size = framesReq*cblk->frameSize;
- audioBuffer->raw = (int8_t*)cblk->buffer(u);
+ audioBuffer->size = framesReq * mFrameSize;
+ audioBuffer->raw = cblk->buffer(mBuffers, mFrameSize, u);
active = mActive;
return active ? status_t(NO_ERROR) : status_t(STOPPED);
}
@@ -572,7 +581,7 @@ create_new_record:
void AudioRecord::releaseBuffer(Buffer* audioBuffer)
{
AutoMutex lock(mLock);
- mCblk->stepUser(audioBuffer->frameCount);
+ mCblk->stepUserIn(audioBuffer->frameCount, mFrameCount);
}
audio_io_handle_t AudioRecord::getInput() const
@@ -631,10 +640,12 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize)
status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS));
if (err < 0) {
// out of buffers, return #bytes written
- if (err == status_t(NO_MORE_BUFFERS))
+ if (err == status_t(NO_MORE_BUFFERS)) {
break;
- if (err == status_t(TIMED_OUT))
+ }
+ if (err == status_t(TIMED_OUT)) {
err = 0;
+ }
return ssize_t(err);
}
@@ -701,7 +712,8 @@ bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
- ALOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up.");
+ ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
+ "Error obtaining an audio buffer, giving up.");
return false;
}
break;
@@ -733,11 +745,11 @@ bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
// Manage overrun callback
- if (active && (cblk->framesAvailable() == 0)) {
+ if (active && (cblk->framesAvailableIn(mFrameCount) == 0)) {
// The value of active is stale, but we are almost sure to be active here because
// otherwise we would have exited when obtainBuffer returned STOPPED earlier.
ALOGV("Overrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
- if (!(android_atomic_or(CBLK_UNDERRUN_ON, &cblk->flags) & CBLK_UNDERRUN_MSK)) {
+ if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) {
mCbf(EVENT_OVERRUN, mUserData, NULL);
}
}
@@ -753,57 +765,40 @@ bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
// must be called with mLock and cblk.lock held. Callers must also hold strong references on
// the IAudioRecord and IMemory in case they are recreated here.
// If the IAudioRecord is successfully restored, the cblk pointer is updated
-status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& cblk)
+status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& refCblk)
{
status_t result;
- if (!(android_atomic_or(CBLK_RESTORING_ON, &cblk->flags) & CBLK_RESTORING_MSK)) {
- ALOGW("dead IAudioRecord, creating a new one");
- // signal old cblk condition so that other threads waiting for available buffers stop
- // waiting now
- cblk->cv.broadcast();
- cblk->lock.unlock();
+ audio_track_cblk_t* cblk = refCblk;
+ audio_track_cblk_t* newCblk = cblk;
+ ALOGW("dead IAudioRecord, creating a new one");
- // if the new IAudioRecord is created, openRecord_l() will modify the
- // following member variables: mAudioRecord, mCblkMemory and mCblk.
- // It will also delete the strong references on previous IAudioRecord and IMemory
- result = openRecord_l(cblk->sampleRate, mFormat, mChannelMask,
- mFrameCount, getInput_l());
- if (result == NO_ERROR) {
- // callback thread or sync event hasn't changed
- result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
- }
- if (result != NO_ERROR) {
- mActive = false;
- }
+ // signal old cblk condition so that other threads waiting for available buffers stop
+ // waiting now
+ cblk->cv.broadcast();
+ cblk->lock.unlock();
- // signal old cblk condition for other threads waiting for restore completion
- android_atomic_or(CBLK_RESTORED_ON, &cblk->flags);
- cblk->cv.broadcast();
- } else {
- if (!(cblk->flags & CBLK_RESTORED_MSK)) {
- ALOGW("dead IAudioRecord, waiting for a new one to be created");
- mLock.unlock();
- result = cblk->cv.waitRelative(cblk->lock, milliseconds(RESTORE_TIMEOUT_MS));
- cblk->lock.unlock();
- mLock.lock();
- } else {
- ALOGW("dead IAudioRecord, already restored");
- result = NO_ERROR;
- cblk->lock.unlock();
- }
- if (result != NO_ERROR || !mActive) {
- result = status_t(STOPPED);
- }
+ // if the new IAudioRecord is created, openRecord_l() will modify the
+ // following member variables: mAudioRecord, mCblkMemory and mCblk.
+ // It will also delete the strong references on previous IAudioRecord and IMemory
+ result = openRecord_l(cblk->sampleRate, mFormat, mFrameCount, getInput_l());
+ if (result == NO_ERROR) {
+ newCblk = mCblk;
+ // callback thread or sync event hasn't changed
+ result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+ }
+ if (result != NO_ERROR) {
+ mActive = false;
}
+
ALOGV("restoreRecord_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
- result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
+ result, mActive, newCblk, cblk, newCblk->flags, cblk->flags);
if (result == NO_ERROR) {
// from now on we switch to the newly created cblk
- cblk = mCblk;
+ refCblk = newCblk;
}
- cblk->lock.lock();
+ newCblk->lock.lock();
ALOGW_IF(result != NO_ERROR, "restoreRecord_l() error %d", result);
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 207f96f..028e4a3 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -205,12 +205,7 @@ int AudioSystem::logToLinear(float volume)
return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0;
}
-// DEPRECATED
-status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) {
- return getOutputSamplingRate(samplingRate, (audio_stream_type_t)streamType);
-}
-
-status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type_t streamType)
+status_t AudioSystem::getOutputSamplingRate(uint32_t* samplingRate, audio_stream_type_t streamType)
{
audio_io_handle_t output;
@@ -228,7 +223,7 @@ status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type
status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
audio_stream_type_t streamType,
- int* samplingRate)
+ uint32_t* samplingRate)
{
OutputDescriptor *outputDesc;
@@ -246,17 +241,13 @@ status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
gLock.unlock();
}
- ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate);
+ ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %u", streamType, output,
+ *samplingRate);
return NO_ERROR;
}
-// DEPRECATED
-status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) {
- return getOutputFrameCount(frameCount, (audio_stream_type_t)streamType);
-}
-
-status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t streamType)
+status_t AudioSystem::getOutputFrameCount(size_t* frameCount, audio_stream_type_t streamType)
{
audio_io_handle_t output;
@@ -274,7 +265,7 @@ status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t s
status_t AudioSystem::getFrameCount(audio_io_handle_t output,
audio_stream_type_t streamType,
- int* frameCount)
+ size_t* frameCount)
{
OutputDescriptor *outputDesc;
@@ -290,7 +281,8 @@ status_t AudioSystem::getFrameCount(audio_io_handle_t output,
gLock.unlock();
}
- ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output, *frameCount);
+ ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output,
+ *frameCount);
return NO_ERROR;
}
@@ -369,7 +361,8 @@ status_t AudioSystem::setVoiceVolume(float value)
return af->setVoiceVolume(value);
}
-status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream)
+status_t AudioSystem::getRenderPosition(size_t *halFrames, size_t *dspFrames,
+ audio_stream_type_t stream)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
@@ -381,7 +374,7 @@ status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames
return af->getRenderPosition(halFrames, dspFrames, getOutput(stream));
}
-unsigned int AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
+size_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
unsigned int result = 0;
if (af == 0) return result;
@@ -449,8 +442,10 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle
OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
gOutputs.add(ioHandle, outputDesc);
- ALOGV("ioConfigChanged() new output samplingRate %d, format %d channels %#x frameCount %d latency %d",
- outputDesc->samplingRate, outputDesc->format, outputDesc->channels, outputDesc->frameCount, outputDesc->latency);
+ ALOGV("ioConfigChanged() new output samplingRate %u, format %d channels %#x frameCount %u "
+ "latency %d",
+ outputDesc->samplingRate, outputDesc->format, outputDesc->channels,
+ outputDesc->frameCount, outputDesc->latency);
} break;
case OUTPUT_CLOSED: {
if (gOutputs.indexOfKey(ioHandle) < 0) {
@@ -471,7 +466,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle
if (param2 == NULL) break;
desc = (const OutputDescriptor *)param2;
- ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %#x frameCount %d latency %d",
+ ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channels %#x "
+ "frameCount %d latency %d",
ioHandle, desc->samplingRate, desc->format,
desc->channels, desc->frameCount, desc->latency);
OutputDescriptor *outputDesc = gOutputs.valueAt(index);
@@ -510,7 +506,7 @@ sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient;
-// establish binder interface to AudioFlinger service
+// establish binder interface to AudioPolicy service
const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
{
gLock.lock();
@@ -744,14 +740,14 @@ status_t AudioSystem::isSourceActive(audio_source_t stream, bool* state)
return NO_ERROR;
}
-int32_t AudioSystem::getPrimaryOutputSamplingRate()
+uint32_t AudioSystem::getPrimaryOutputSamplingRate()
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return 0;
return af->getPrimaryOutputSamplingRate();
}
-int32_t AudioSystem::getPrimaryOutputFrameCount()
+size_t AudioSystem::getPrimaryOutputFrameCount()
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return 0;
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index aec8c4a..e40895a 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -50,11 +50,13 @@ namespace android {
// static
status_t AudioTrack::getMinFrameCount(
- int* frameCount,
+ size_t* frameCount,
audio_stream_type_t streamType,
uint32_t sampleRate)
{
- if (frameCount == NULL) return BAD_VALUE;
+ if (frameCount == NULL) {
+ return BAD_VALUE;
+ }
// default to 0 in case of error
*frameCount = 0;
@@ -65,11 +67,11 @@ status_t AudioTrack::getMinFrameCount(
// audio_format_t format
// audio_channel_mask_t channelMask
// audio_output_flags_t flags
- int afSampleRate;
+ uint32_t afSampleRate;
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
return NO_INIT;
}
- int afFrameCount;
+ size_t afFrameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
return NO_INIT;
}
@@ -120,28 +122,6 @@ AudioTrack::AudioTrack(
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId);
}
-// DEPRECATED
-AudioTrack::AudioTrack(
- int streamType,
- uint32_t sampleRate,
- int format,
- int channelMask,
- int frameCount,
- uint32_t flags,
- callback_t cbf,
- void* user,
- int notificationFrames,
- int sessionId)
- : mStatus(NO_INIT),
- mIsTimed(false),
- mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
-{
- mStatus = set((audio_stream_type_t)streamType, sampleRate, (audio_format_t)format,
- (audio_channel_mask_t) channelMask,
- frameCount, (audio_output_flags_t)flags, cbf, user, notificationFrames,
- 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId);
-}
-
AudioTrack::AudioTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
@@ -188,7 +168,7 @@ status_t AudioTrack::set(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ int frameCountInt,
audio_output_flags_t flags,
callback_t cbf,
void* user,
@@ -197,10 +177,17 @@ status_t AudioTrack::set(
bool threadCanCallJava,
int sessionId)
{
+ // FIXME "int" here is legacy and will be replaced by size_t later
+ if (frameCountInt < 0) {
+ ALOGE("Invalid frame count %d", frameCountInt);
+ return BAD_VALUE;
+ }
+ size_t frameCount = frameCountInt;
- ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
+ ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+ sharedBuffer->size());
- ALOGV("set() streamType %d frameCount %d flags %04x", streamType, frameCount, flags);
+ ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
AutoMutex lock(mLock);
if (mAudioTrack != 0) {
@@ -214,7 +201,7 @@ status_t AudioTrack::set(
}
if (sampleRate == 0) {
- int afSampleRate;
+ uint32_t afSampleRate;
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
return NO_INIT;
}
@@ -256,7 +243,9 @@ status_t AudioTrack::set(
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
+ mChannelMask = channelMask;
uint32_t channelCount = popcount(channelMask);
+ mChannelCount = channelCount;
audio_io_handle_t output = AudioSystem::getOutput(
streamType,
@@ -272,6 +261,7 @@ status_t AudioTrack::set(
mVolume[RIGHT] = 1.0f;
mSendLevel = 0.0f;
mFrameCount = frameCount;
+ mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
mSessionId = sessionId;
mAuxEffectId = 0;
@@ -287,7 +277,6 @@ status_t AudioTrack::set(
status_t status = createTrack_l(streamType,
sampleRate,
format,
- channelMask,
frameCount,
flags,
sharedBuffer,
@@ -305,8 +294,15 @@ status_t AudioTrack::set(
mStreamType = streamType;
mFormat = format;
- mChannelMask = channelMask;
- mChannelCount = channelCount;
+
+ if (audio_is_linear_pcm(format)) {
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ mFrameSizeAF = channelCount * sizeof(int16_t);
+ } else {
+ mFrameSize = sizeof(uint8_t);
+ mFrameSizeAF = sizeof(uint8_t);
+ }
+
mSharedBuffer = sharedBuffer;
mMuted = false;
mActive = false;
@@ -318,7 +314,6 @@ status_t AudioTrack::set(
mUpdatePeriod = 0;
mFlushed = false;
AudioSystem::acquireAudioSessionId(mSessionId);
- mRestoreStatus = NO_ERROR;
return NO_ERROR;
}
@@ -344,23 +339,14 @@ audio_format_t AudioTrack::format() const
return mFormat;
}
-int AudioTrack::channelCount() const
+uint32_t AudioTrack::channelCount() const
{
return mChannelCount;
}
-uint32_t AudioTrack::frameCount() const
+size_t AudioTrack::frameCount() const
{
- return mCblk->frameCount;
-}
-
-size_t AudioTrack::frameSize() const
-{
- if (audio_is_linear_pcm(mFormat)) {
- return channelCount()*audio_bytes_per_sample(mFormat);
- } else {
- return sizeof(uint8_t);
- }
+ return mFrameCount;
}
sp<IMemory>& AudioTrack::sharedBuffer()
@@ -390,7 +376,7 @@ void AudioTrack::start()
cblk->lock.lock();
cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
cblk->waitTimeMs = 0;
- android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags);
+ android_atomic_and(~CBLK_DISABLED, &cblk->flags);
if (t != 0) {
t->resume();
} else {
@@ -399,19 +385,21 @@ void AudioTrack::start()
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
- ALOGV("start %p before lock cblk %p", this, mCblk);
+ ALOGV("start %p before lock cblk %p", this, cblk);
status_t status = NO_ERROR;
- if (!(cblk->flags & CBLK_INVALID_MSK)) {
+ if (!(cblk->flags & CBLK_INVALID)) {
cblk->lock.unlock();
ALOGV("mAudioTrack->start()");
status = mAudioTrack->start();
cblk->lock.lock();
if (status == DEAD_OBJECT) {
- android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
+ android_atomic_or(CBLK_INVALID, &cblk->flags);
}
}
- if (cblk->flags & CBLK_INVALID_MSK) {
- status = restoreTrack_l(cblk, true);
+ if (cblk->flags & CBLK_INVALID) {
+ audio_track_cblk_t* temp = cblk;
+ status = restoreTrack_l(temp, true /*fromStart*/);
+ cblk = temp;
}
cblk->lock.unlock();
if (status != NO_ERROR) {
@@ -528,14 +516,9 @@ status_t AudioTrack::setVolume(float left, float right)
return NO_ERROR;
}
-void AudioTrack::getVolume(float* left, float* right) const
+status_t AudioTrack::setVolume(float volume)
{
- if (left != NULL) {
- *left = mVolume[LEFT];
- }
- if (right != NULL) {
- *right = mVolume[RIGHT];
- }
+ return setVolume(volume, volume);
}
status_t AudioTrack::setAuxEffectSendLevel(float level)
@@ -560,9 +543,9 @@ void AudioTrack::getAuxEffectSendLevel(float* level) const
}
}
-status_t AudioTrack::setSampleRate(int rate)
+status_t AudioTrack::setSampleRate(uint32_t rate)
{
- int afSamplingRate;
+ uint32_t afSamplingRate;
if (mIsTimed) {
return INVALID_OPERATION;
@@ -572,7 +555,9 @@ status_t AudioTrack::setSampleRate(int rate)
return NO_INIT;
}
// Resampler implementation limits input sampling rate to 2 x output sampling rate.
- if (rate <= 0 || rate > afSamplingRate*2 ) return BAD_VALUE;
+ if (rate == 0 || rate > afSamplingRate*2 ) {
+ return BAD_VALUE;
+ }
AutoMutex lock(mLock);
mCblk->sampleRate = rate;
@@ -582,7 +567,7 @@ status_t AudioTrack::setSampleRate(int rate)
uint32_t AudioTrack::getSampleRate() const
{
if (mIsTimed) {
- return INVALID_OPERATION;
+ return 0;
}
AutoMutex lock(mLock);
@@ -615,15 +600,17 @@ status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCou
}
if (loopStart >= loopEnd ||
- loopEnd - loopStart > cblk->frameCount ||
+ loopEnd - loopStart > mFrameCount ||
cblk->server > loopStart) {
- ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, user %d", loopStart, loopEnd, loopCount, cblk->frameCount, cblk->user);
+ ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, "
+ "user %d", loopStart, loopEnd, loopCount, mFrameCount, cblk->user);
return BAD_VALUE;
}
- if ((mSharedBuffer != 0) && (loopEnd > cblk->frameCount)) {
- ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, framecount %d",
- loopStart, loopEnd, cblk->frameCount);
+ if ((mSharedBuffer != 0) && (loopEnd > mFrameCount)) {
+ ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, "
+ "framecount %d",
+ loopStart, loopEnd, mFrameCount);
return BAD_VALUE;
}
@@ -637,7 +624,9 @@ status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCou
status_t AudioTrack::setMarkerPosition(uint32_t marker)
{
- if (mCbf == NULL) return INVALID_OPERATION;
+ if (mCbf == NULL) {
+ return INVALID_OPERATION;
+ }
mMarkerPosition = marker;
mMarkerReached = false;
@@ -647,7 +636,9 @@ status_t AudioTrack::setMarkerPosition(uint32_t marker)
status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
{
- if (marker == NULL) return BAD_VALUE;
+ if (marker == NULL) {
+ return BAD_VALUE;
+ }
*marker = mMarkerPosition;
@@ -656,7 +647,9 @@ status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
{
- if (mCbf == NULL) return INVALID_OPERATION;
+ if (mCbf == NULL) {
+ return INVALID_OPERATION;
+ }
uint32_t curPosition;
getPosition(&curPosition);
@@ -668,7 +661,9 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
{
- if (updatePeriod == NULL) return BAD_VALUE;
+ if (updatePeriod == NULL) {
+ return BAD_VALUE;
+ }
*updatePeriod = mUpdatePeriod;
@@ -677,25 +672,34 @@ status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
status_t AudioTrack::setPosition(uint32_t position)
{
- if (mIsTimed) return INVALID_OPERATION;
+ if (mIsTimed) {
+ return INVALID_OPERATION;
+ }
AutoMutex lock(mLock);
- if (!stopped_l()) return INVALID_OPERATION;
+ if (!stopped_l()) {
+ return INVALID_OPERATION;
+ }
- Mutex::Autolock _l(mCblk->lock);
+ audio_track_cblk_t* cblk = mCblk;
+ Mutex::Autolock _l(cblk->lock);
- if (position > mCblk->user) return BAD_VALUE;
+ if (position > cblk->user) {
+ return BAD_VALUE;
+ }
- mCblk->server = position;
- android_atomic_or(CBLK_FORCEREADY_ON, &mCblk->flags);
+ cblk->server = position;
+ android_atomic_or(CBLK_FORCEREADY, &cblk->flags);
return NO_ERROR;
}
status_t AudioTrack::getPosition(uint32_t *position)
{
- if (position == NULL) return BAD_VALUE;
+ if (position == NULL) {
+ return BAD_VALUE;
+ }
AutoMutex lock(mLock);
*position = mFlushed ? 0 : mCblk->server;
@@ -706,11 +710,14 @@ status_t AudioTrack::reload()
{
AutoMutex lock(mLock);
- if (!stopped_l()) return INVALID_OPERATION;
+ if (!stopped_l()) {
+ return INVALID_OPERATION;
+ }
flush_l();
- mCblk->stepUser(mCblk->frameCount);
+ audio_track_cblk_t* cblk = mCblk;
+ cblk->stepUserOut(mFrameCount, mFrameCount);
return NO_ERROR;
}
@@ -750,8 +757,7 @@ status_t AudioTrack::createTrack_l(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
audio_output_flags_t flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output)
@@ -791,7 +797,7 @@ status_t AudioTrack::createTrack_l(
// Same comment as below about ignoring frameCount parameter for set()
frameCount = sharedBuffer->size();
} else if (frameCount == 0) {
- int afFrameCount;
+ size_t afFrameCount;
if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
return NO_INIT;
}
@@ -800,17 +806,16 @@ status_t AudioTrack::createTrack_l(
} else if (sharedBuffer != 0) {
- // Ensure that buffer alignment matches channelCount
- int channelCount = popcount(channelMask);
+ // Ensure that buffer alignment matches channel count
// 8-bit data in shared memory is not currently supported by AudioFlinger
size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
- if (channelCount > 1) {
+ if (mChannelCount > 1) {
// More than 2 channels does not require stronger alignment than stereo
alignment <<= 1;
}
- if (((uint32_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
- ALOGE("Invalid buffer alignment: address %p, channelCount %d",
- sharedBuffer->pointer(), channelCount);
+ if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+ ALOGE("Invalid buffer alignment: address %p, channel count %u",
+ sharedBuffer->pointer(), mChannelCount);
return BAD_VALUE;
}
@@ -818,16 +823,16 @@ status_t AudioTrack::createTrack_l(
// there's no frameCount parameter.
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
- frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
+ frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
} else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
// FIXME move these calculations and associated checks to server
- int afSampleRate;
+ uint32_t afSampleRate;
if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
return NO_INIT;
}
- int afFrameCount;
+ size_t afFrameCount;
if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
return NO_INIT;
}
@@ -836,8 +841,8 @@ status_t AudioTrack::createTrack_l(
uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
if (minBufCount < 2) minBufCount = 2;
- int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
- ALOGV("minFrameCount: %d, afFrameCount=%d, minBufCount=%d, sampleRate=%d, afSampleRate=%d"
+ size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+ ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
", afLatency=%d",
minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
@@ -849,7 +854,7 @@ status_t AudioTrack::createTrack_l(
}
// Make sure that application is notified with sufficient margin
// before underrun
- if (mNotificationFramesAct > (uint32_t)frameCount/2) {
+ if (mNotificationFramesAct > frameCount/2) {
mNotificationFramesAct = frameCount/2;
}
if (frameCount < minFrameCount) {
@@ -879,10 +884,12 @@ status_t AudioTrack::createTrack_l(
sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
streamType,
sampleRate,
- format,
- channelMask,
+ // AudioFlinger only sees 16-bit PCM
+ format == AUDIO_FORMAT_PCM_8_BIT ?
+ AUDIO_FORMAT_PCM_16_BIT : format,
+ mChannelMask,
frameCount,
- trackFlags,
+ &trackFlags,
sharedBuffer,
output,
tid,
@@ -893,49 +900,58 @@ status_t AudioTrack::createTrack_l(
ALOGE("AudioFlinger could not create track, status: %d", status);
return status;
}
- sp<IMemory> cblk = track->getCblk();
- if (cblk == 0) {
+ sp<IMemory> iMem = track->getCblk();
+ if (iMem == 0) {
ALOGE("Could not get control block");
return NO_INIT;
}
mAudioTrack = track;
- mCblkMemory = cblk;
- mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());
- // old has the previous value of mCblk->flags before the "or" operation
- int32_t old = android_atomic_or(CBLK_DIRECTION_OUT, &mCblk->flags);
+ mCblkMemory = iMem;
+ audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
+ mCblk = cblk;
+ size_t temp = cblk->frameCount_;
+ if (temp < frameCount || (frameCount == 0 && temp == 0)) {
+ // In current design, AudioTrack client checks and ensures frame count validity before
+ // passing it to AudioFlinger so AudioFlinger should not return a different value except
+ // for fast track as it uses a special method of assigning frame count.
+ ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
+ }
+ frameCount = temp;
if (flags & AUDIO_OUTPUT_FLAG_FAST) {
- if (old & CBLK_FAST) {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", mCblk->frameCount);
+ if (trackFlags & IAudioFlinger::TRACK_FAST) {
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
} else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", mCblk->frameCount);
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
// once denied, do not request again if IAudioTrack is re-created
flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
mFlags = flags;
}
if (sharedBuffer == 0) {
- mNotificationFramesAct = mCblk->frameCount/2;
+ mNotificationFramesAct = frameCount/2;
}
}
if (sharedBuffer == 0) {
- mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
+ mBuffers = (char*)cblk + sizeof(audio_track_cblk_t);
} else {
- mCblk->buffers = sharedBuffer->pointer();
+ mBuffers = sharedBuffer->pointer();
// Force buffer full condition as data is already present in shared memory
- mCblk->stepUser(mCblk->frameCount);
+ cblk->stepUserOut(frameCount, frameCount);
}
- mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000));
- mCblk->setSendLevel(mSendLevel);
+ cblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
+ uint16_t(mVolume[LEFT] * 0x1000));
+ cblk->setSendLevel(mSendLevel);
mAudioTrack->attachAuxEffect(mAuxEffectId);
- mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
- mCblk->waitTimeMs = 0;
+ cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
+ cblk->waitTimeMs = 0;
mRemainingFrames = mNotificationFramesAct;
// FIXME don't believe this lie
- mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
+ mLatency = afLatency + (1000*frameCount) / sampleRate;
+ mFrameCount = frameCount;
// If IAudioTrack is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
- if (mCblk->frameCount > mFrameCount) {
- mFrameCount = mCblk->frameCount;
+ if (frameCount > mReqFrameCount) {
+ mReqFrameCount = frameCount;
}
return NO_ERROR;
}
@@ -952,10 +968,10 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
audioBuffer->frameCount = 0;
audioBuffer->size = 0;
- uint32_t framesAvail = cblk->framesAvailable();
+ uint32_t framesAvail = cblk->framesAvailableOut(mFrameCount);
cblk->lock.lock();
- if (cblk->flags & CBLK_INVALID_MSK) {
+ if (cblk->flags & CBLK_INVALID) {
goto create_new_track;
}
cblk->lock.unlock();
@@ -974,18 +990,23 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
cblk->lock.unlock();
return WOULD_BLOCK;
}
- if (!(cblk->flags & CBLK_INVALID_MSK)) {
+ if (!(cblk->flags & CBLK_INVALID)) {
mLock.unlock();
+ // this condition is in shared memory, so if IAudioTrack and control block
+ // are replaced due to mediaserver death or IAudioTrack invalidation then
+ // cv won't be signalled, but fortunately the timeout will limit the wait
result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
cblk->lock.unlock();
mLock.lock();
if (!mActive) {
return status_t(STOPPED);
}
+ // IAudioTrack may have been re-created while mLock was unlocked
+ cblk = mCblk;
cblk->lock.lock();
}
- if (cblk->flags & CBLK_INVALID_MSK) {
+ if (cblk->flags & CBLK_INVALID) {
goto create_new_track;
}
if (CC_UNLIKELY(result != NO_ERROR)) {
@@ -994,16 +1015,18 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
// timing out when a loop has been set and we have already written upto loop end
// is a normal condition: no need to wake AudioFlinger up.
if (cblk->user < cblk->loopEnd) {
- ALOGW( "obtainBuffer timed out (is the CPU pegged?) %p name=%#x"
- "user=%08x, server=%08x", this, cblk->mName, cblk->user, cblk->server);
+ ALOGW("obtainBuffer timed out (is the CPU pegged?) %p name=%#x user=%08x, "
+ "server=%08x", this, cblk->mName, cblk->user, cblk->server);
//unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
cblk->lock.unlock();
result = mAudioTrack->start();
cblk->lock.lock();
if (result == DEAD_OBJECT) {
- android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
+ android_atomic_or(CBLK_INVALID, &cblk->flags);
create_new_track:
- result = restoreTrack_l(cblk, false);
+ audio_track_cblk_t* temp = cblk;
+ result = restoreTrack_l(temp, false /*fromStart*/);
+ cblk = temp;
}
if (result != NO_ERROR) {
ALOGW("obtainBuffer create Track error %d", result);
@@ -1021,7 +1044,7 @@ create_new_track:
}
// read the server count again
start_loop_here:
- framesAvail = cblk->framesAvailable_l();
+ framesAvail = cblk->framesAvailableOut_l(mFrameCount);
}
cblk->lock.unlock();
}
@@ -1033,22 +1056,15 @@ create_new_track:
}
uint32_t u = cblk->user;
- uint32_t bufferEnd = cblk->userBase + cblk->frameCount;
+ uint32_t bufferEnd = cblk->userBase + mFrameCount;
if (framesReq > bufferEnd - u) {
framesReq = bufferEnd - u;
}
- audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
- audioBuffer->channelCount = mChannelCount;
audioBuffer->frameCount = framesReq;
- audioBuffer->size = framesReq * cblk->frameSize;
- if (audio_is_linear_pcm(mFormat)) {
- audioBuffer->format = AUDIO_FORMAT_PCM_16_BIT;
- } else {
- audioBuffer->format = mFormat;
- }
- audioBuffer->raw = (int8_t *)cblk->buffer(u);
+ audioBuffer->size = framesReq * mFrameSizeAF;
+ audioBuffer->raw = cblk->buffer(mBuffers, mFrameSizeAF, u);
active = mActive;
return active ? status_t(NO_ERROR) : status_t(STOPPED);
}
@@ -1056,12 +1072,13 @@ create_new_track:
void AudioTrack::releaseBuffer(Buffer* audioBuffer)
{
AutoMutex lock(mLock);
- mCblk->stepUser(audioBuffer->frameCount);
+ audio_track_cblk_t* cblk = mCblk;
+ cblk->stepUserOut(audioBuffer->frameCount, mFrameCount);
if (audioBuffer->frameCount > 0) {
// restart track if it was disabled by audioflinger due to previous underrun
- if (mActive && (mCblk->flags & CBLK_DISABLED_MSK)) {
- android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags);
- ALOGW("releaseBuffer() track %p name=%#x disabled, restarting", this, mCblk->mName);
+ if (mActive && (cblk->flags & CBLK_DISABLED)) {
+ android_atomic_and(~CBLK_DISABLED, &cblk->flags);
+ ALOGW("releaseBuffer() track %p name=%#x disabled, restarting", this, cblk->mName);
mAudioTrack->start();
}
}
@@ -1072,8 +1089,12 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer)
ssize_t AudioTrack::write(const void* buffer, size_t userSize)
{
- if (mSharedBuffer != 0) return INVALID_OPERATION;
- if (mIsTimed) return INVALID_OPERATION;
+ if (mSharedBuffer != 0) {
+ return INVALID_OPERATION;
+ }
+ if (mIsTimed) {
+ return INVALID_OPERATION;
+ }
if (ssize_t(userSize) < 0) {
// Sanity-check: user is most-likely passing an error code, and it would
@@ -1096,6 +1117,9 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
sp<IMemory> iMem = mCblkMemory;
mLock.unlock();
+ // since mLock is unlocked the IAudioTrack and shared memory may be re-created,
+ // so all cblk references might still refer to old shared memory, but that should be benign
+
ssize_t written = 0;
const int8_t *src = (const int8_t *)buffer;
Buffer audioBuffer;
@@ -1107,8 +1131,9 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
status_t err = obtainBuffer(&audioBuffer, -1);
if (err < 0) {
// out of buffers, return #bytes written
- if (err == status_t(NO_MORE_BUFFERS))
+ if (err == status_t(NO_MORE_BUFFERS)) {
break;
+ }
return ssize_t(err);
}
@@ -1121,8 +1146,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
} else {
toWrite = audioBuffer.size;
memcpy(audioBuffer.i8, src, toWrite);
- src += toWrite;
}
+ src += toWrite;
userSize -= toWrite;
written += toWrite;
@@ -1140,27 +1165,37 @@ TimedAudioTrack::TimedAudioTrack() {
status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
{
+ AutoMutex lock(mLock);
status_t result = UNKNOWN_ERROR;
+ // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
+ // while we are accessing the cblk
+ sp<IAudioTrack> audioTrack = mAudioTrack;
+ sp<IMemory> iMem = mCblkMemory;
+
// If the track is not invalid already, try to allocate a buffer. alloc
// fails indicating that the server is dead, flag the track as invalid so
// we can attempt to restore in just a bit.
- if (!(mCblk->flags & CBLK_INVALID_MSK)) {
+ audio_track_cblk_t* cblk = mCblk;
+ if (!(cblk->flags & CBLK_INVALID)) {
result = mAudioTrack->allocateTimedBuffer(size, buffer);
if (result == DEAD_OBJECT) {
- android_atomic_or(CBLK_INVALID_ON, &mCblk->flags);
+ android_atomic_or(CBLK_INVALID, &cblk->flags);
}
}
// If the track is invalid at this point, attempt to restore it. and try the
// allocation one more time.
- if (mCblk->flags & CBLK_INVALID_MSK) {
- mCblk->lock.lock();
- result = restoreTrack_l(mCblk, false);
- mCblk->lock.unlock();
+ if (cblk->flags & CBLK_INVALID) {
+ cblk->lock.lock();
+ audio_track_cblk_t* temp = cblk;
+ result = restoreTrack_l(temp, false /*fromStart*/);
+ cblk = temp;
+ cblk->lock.unlock();
- if (result == OK)
+ if (result == OK) {
result = mAudioTrack->allocateTimedBuffer(size, buffer);
+ }
}
return result;
@@ -1172,10 +1207,11 @@ status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
{
AutoMutex lock(mLock);
+ audio_track_cblk_t* cblk = mCblk;
// restart track if it was disabled by audioflinger due to previous underrun
if (buffer->size() != 0 && status == NO_ERROR &&
- mActive && (mCblk->flags & CBLK_DISABLED_MSK)) {
- android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags);
+ mActive && (cblk->flags & CBLK_DISABLED)) {
+ android_atomic_and(~CBLK_DISABLED, &cblk->flags);
ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
mAudioTrack->start();
}
@@ -1206,15 +1242,20 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
bool active = mActive;
mLock.unlock();
+ // since mLock is unlocked the IAudioTrack and shared memory may be re-created,
+ // so all cblk references might still refer to old shared memory, but that should be benign
+
// Manage underrun callback
- if (active && (cblk->framesAvailable() == cblk->frameCount)) {
+ if (active && (cblk->framesAvailableOut(mFrameCount) == mFrameCount)) {
ALOGV("Underrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
- if (!(android_atomic_or(CBLK_UNDERRUN_ON, &cblk->flags) & CBLK_UNDERRUN_MSK)) {
+ if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) {
mCbf(EVENT_UNDERRUN, mUserData, 0);
- if (cblk->server == cblk->frameCount) {
+ if (cblk->server == mFrameCount) {
mCbf(EVENT_BUFFER_END, mUserData, 0);
}
- if (mSharedBuffer != 0) return false;
+ if (mSharedBuffer != 0) {
+ return false;
+ }
}
}
@@ -1265,12 +1306,15 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
status_t err = obtainBuffer(&audioBuffer, waitCount);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
- ALOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up.");
+ ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
+ "Error obtaining an audio buffer, giving up.");
return false;
}
break;
}
- if (err == status_t(STOPPED)) return false;
+ if (err == status_t(STOPPED)) {
+ return false;
+ }
// Divide buffer size by 2 to take into account the expansion
// due to 8 to 16 bit conversion: the callback must fill only half
@@ -1293,7 +1337,9 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
break;
}
- if (writtenSize > reqSize) writtenSize = reqSize;
+ if (writtenSize > reqSize) {
+ writtenSize = reqSize;
+ }
if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
// 8 to 16 bit conversion, note that source and destination are the same address
@@ -1302,10 +1348,10 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
}
audioBuffer.size = writtenSize;
- // NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for
- // 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of
+ // NOTE: cblk->frameSize is not equal to AudioTrack::frameSize() for
+ // 8 bit PCM data: in this case, cblk->frameSize is based on a sample size of
// 16 bit.
- audioBuffer.frameCount = writtenSize/mCblk->frameSize;
+ audioBuffer.frameCount = writtenSize / mFrameSizeAF;
frames -= audioBuffer.frameCount;
@@ -1321,112 +1367,91 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
return true;
}
-// must be called with mLock and cblk.lock held. Callers must also hold strong references on
+// must be called with mLock and refCblk.lock held. Callers must also hold strong references on
// the IAudioTrack and IMemory in case they are recreated here.
-// If the IAudioTrack is successfully restored, the cblk pointer is updated
-status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart)
+// If the IAudioTrack is successfully restored, the refCblk pointer is updated
+// FIXME Don't depend on caller to hold strong references.
+status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& refCblk, bool fromStart)
{
status_t result;
- if (!(android_atomic_or(CBLK_RESTORING_ON, &cblk->flags) & CBLK_RESTORING_MSK)) {
- ALOGW("dead IAudioTrack, creating a new one from %s TID %d",
- fromStart ? "start()" : "obtainBuffer()", gettid());
+ audio_track_cblk_t* cblk = refCblk;
+ audio_track_cblk_t* newCblk = cblk;
+ ALOGW("dead IAudioTrack, creating a new one from %s",
+ fromStart ? "start()" : "obtainBuffer()");
- // signal old cblk condition so that other threads waiting for available buffers stop
- // waiting now
- cblk->cv.broadcast();
- cblk->lock.unlock();
+ // signal old cblk condition so that other threads waiting for available buffers stop
+ // waiting now
+ cblk->cv.broadcast();
+ cblk->lock.unlock();
- // refresh the audio configuration cache in this process to make sure we get new
- // output parameters in getOutput_l() and createTrack_l()
- AudioSystem::clearAudioConfigCache();
-
- // if the new IAudioTrack is created, createTrack_l() will modify the
- // following member variables: mAudioTrack, mCblkMemory and mCblk.
- // It will also delete the strong references on previous IAudioTrack and IMemory
- result = createTrack_l(mStreamType,
- cblk->sampleRate,
- mFormat,
- mChannelMask,
- mFrameCount,
- mFlags,
- mSharedBuffer,
- getOutput_l());
-
- if (result == NO_ERROR) {
- uint32_t user = cblk->user;
- uint32_t server = cblk->server;
- // restore write index and set other indexes to reflect empty buffer status
- mCblk->user = user;
- mCblk->server = user;
- mCblk->userBase = user;
- mCblk->serverBase = user;
- // restore loop: this is not guaranteed to succeed if new frame count is not
- // compatible with loop length
- setLoop_l(cblk->loopStart, cblk->loopEnd, cblk->loopCount);
- if (!fromStart) {
- mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
- // Make sure that a client relying on callback events indicating underrun or
- // the actual amount of audio frames played (e.g SoundPool) receives them.
- if (mSharedBuffer == 0) {
- uint32_t frames = 0;
- if (user > server) {
- frames = ((user - server) > mCblk->frameCount) ?
- mCblk->frameCount : (user - server);
- memset(mCblk->buffers, 0, frames * mCblk->frameSize);
- }
- // restart playback even if buffer is not completely filled.
- android_atomic_or(CBLK_FORCEREADY_ON, &mCblk->flags);
- // stepUser() clears CBLK_UNDERRUN_ON flag enabling underrun callbacks to
- // the client
- mCblk->stepUser(frames);
+ // refresh the audio configuration cache in this process to make sure we get new
+ // output parameters in getOutput_l() and createTrack_l()
+ AudioSystem::clearAudioConfigCache();
+
+ // if the new IAudioTrack is created, createTrack_l() will modify the
+ // following member variables: mAudioTrack, mCblkMemory and mCblk.
+ // It will also delete the strong references on previous IAudioTrack and IMemory
+ result = createTrack_l(mStreamType,
+ cblk->sampleRate,
+ mFormat,
+ mReqFrameCount, // so that frame count never goes down
+ mFlags,
+ mSharedBuffer,
+ getOutput_l());
+
+ if (result == NO_ERROR) {
+ uint32_t user = cblk->user;
+ uint32_t server = cblk->server;
+ // restore write index and set other indexes to reflect empty buffer status
+ newCblk = mCblk;
+ newCblk->user = user;
+ newCblk->server = user;
+ newCblk->userBase = user;
+ newCblk->serverBase = user;
+ // restore loop: this is not guaranteed to succeed if new frame count is not
+ // compatible with loop length
+ setLoop_l(cblk->loopStart, cblk->loopEnd, cblk->loopCount);
+ if (!fromStart) {
+ newCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
+ // Make sure that a client relying on callback events indicating underrun or
+ // the actual amount of audio frames played (e.g SoundPool) receives them.
+ if (mSharedBuffer == 0) {
+ uint32_t frames = 0;
+ if (user > server) {
+ frames = ((user - server) > mFrameCount) ?
+ mFrameCount : (user - server);
+ memset(mBuffers, 0, frames * mFrameSizeAF);
}
- }
- if (mSharedBuffer != 0) {
- mCblk->stepUser(mCblk->frameCount);
- }
- if (mActive) {
- result = mAudioTrack->start();
- ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result);
- }
- if (fromStart && result == NO_ERROR) {
- mNewPosition = mCblk->server + mUpdatePeriod;
+ // restart playback even if buffer is not completely filled.
+ android_atomic_or(CBLK_FORCEREADY, &newCblk->flags);
+ // stepUser() clears CBLK_UNDERRUN flag enabling underrun callbacks to
+ // the client
+ newCblk->stepUserOut(frames, mFrameCount);
}
}
- if (result != NO_ERROR) {
- android_atomic_and(~CBLK_RESTORING_ON, &cblk->flags);
- ALOGW_IF(result != NO_ERROR, "restoreTrack_l() failed status %d", result);
+ if (mSharedBuffer != 0) {
+ newCblk->stepUserOut(mFrameCount, mFrameCount);
}
- mRestoreStatus = result;
- // signal old cblk condition for other threads waiting for restore completion
- android_atomic_or(CBLK_RESTORED_ON, &cblk->flags);
- cblk->cv.broadcast();
- } else {
- if (!(cblk->flags & CBLK_RESTORED_MSK)) {
- ALOGW("dead IAudioTrack, waiting for a new one TID %d", gettid());
- mLock.unlock();
- result = cblk->cv.waitRelative(cblk->lock, milliseconds(RESTORE_TIMEOUT_MS));
- if (result == NO_ERROR) {
- result = mRestoreStatus;
- }
- cblk->lock.unlock();
- mLock.lock();
- } else {
- ALOGW("dead IAudioTrack, already restored TID %d", gettid());
- result = mRestoreStatus;
- cblk->lock.unlock();
+ if (mActive) {
+ result = mAudioTrack->start();
+ ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result);
+ }
+ if (fromStart && result == NO_ERROR) {
+ mNewPosition = newCblk->server + mUpdatePeriod;
}
}
+ ALOGW_IF(result != NO_ERROR, "restoreTrack_l() failed status %d", result);
ALOGV("restoreTrack_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
- result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
+ result, mActive, newCblk, cblk, newCblk->flags, cblk->flags);
if (result == NO_ERROR) {
// from now on we switch to the newly created cblk
- cblk = mCblk;
+ refCblk = newCblk;
}
- cblk->lock.lock();
+ newCblk->lock.lock();
- ALOGW_IF(result != NO_ERROR, "restoreTrack_l() error %d TID %d", result, gettid());
+ ALOGW_IF(result != NO_ERROR, "restoreTrack_l() error %d", result);
return result;
}
@@ -1438,12 +1463,16 @@ status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
char buffer[SIZE];
String8 result;
+ audio_track_cblk_t* cblk = mCblk;
result.append(" AudioTrack::dump\n");
- snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, mVolume[0], mVolume[1]);
+ snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
+ mVolume[0], mVolume[1]);
result.append(buffer);
- snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, mChannelCount, (mCblk == 0) ? 0 : mCblk->frameCount);
+ snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat,
+ mChannelCount, mFrameCount);
result.append(buffer);
- snprintf(buffer, 255, " sample rate(%d), status(%d), muted(%d)\n", (mCblk == 0) ? 0 : mCblk->sampleRate, mStatus, mMuted);
+ snprintf(buffer, 255, " sample rate(%u), status(%d), muted(%d)\n",
+ (cblk == 0) ? 0 : cblk->sampleRate, mStatus, mMuted);
result.append(buffer);
snprintf(buffer, 255, " active(%d), latency (%d)\n", mActive, mLatency);
result.append(buffer);
@@ -1505,20 +1534,20 @@ void AudioTrack::AudioTrackThread::resume()
audio_track_cblk_t::audio_track_cblk_t()
: lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0),
- userBase(0), serverBase(0), buffers(NULL), frameCount(0),
+ userBase(0), serverBase(0), frameCount_(0),
loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000),
mSendLevel(0), flags(0)
{
}
-uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount)
+uint32_t audio_track_cblk_t::stepUser(size_t stepCount, size_t frameCount, bool isOut)
{
- ALOGV("stepuser %08x %08x %d", user, server, frameCount);
+ ALOGV("stepuser %08x %08x %d", user, server, stepCount);
uint32_t u = user;
- u += frameCount;
+ u += stepCount;
// Ensure that user is never ahead of server for AudioRecord
- if (flags & CBLK_DIRECTION_MSK) {
+ if (isOut) {
// If stepServer() has been called once, switch to normal obtainBuffer() timeout period
if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {
bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
@@ -1528,30 +1557,29 @@ uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount)
u = server;
}
- uint32_t fc = this->frameCount;
- if (u >= fc) {
+ if (u >= frameCount) {
// common case, user didn't just wrap
- if (u - fc >= userBase ) {
- userBase += fc;
+ if (u - frameCount >= userBase ) {
+ userBase += frameCount;
}
- } else if (u >= userBase + fc) {
+ } else if (u >= userBase + frameCount) {
// user just wrapped
- userBase += fc;
+ userBase += frameCount;
}
user = u;
// Clear flow control error condition as new data has been written/read to/from buffer.
- if (flags & CBLK_UNDERRUN_MSK) {
- android_atomic_and(~CBLK_UNDERRUN_MSK, &flags);
+ if (flags & CBLK_UNDERRUN) {
+ android_atomic_and(~CBLK_UNDERRUN, &flags);
}
return u;
}
-bool audio_track_cblk_t::stepServer(uint32_t frameCount)
+bool audio_track_cblk_t::stepServer(size_t stepCount, size_t frameCount, bool isOut)
{
- ALOGV("stepserver %08x %08x %d", user, server, frameCount);
+ ALOGV("stepserver %08x %08x %d", user, server, stepCount);
if (!tryLock()) {
ALOGW("stepServer() could not lock cblk");
@@ -1561,8 +1589,8 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount)
uint32_t s = server;
bool flushed = (s == user);
- s += frameCount;
- if (flags & CBLK_DIRECTION_MSK) {
+ s += stepCount;
+ if (isOut) {
// Mark that we have read the first buffer so that next time stepUser() is called
// we switch to normal obtainBuffer() timeout period
if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
@@ -1587,43 +1615,42 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount)
}
}
- uint32_t fc = this->frameCount;
- if (s >= fc) {
+ if (s >= frameCount) {
// common case, server didn't just wrap
- if (s - fc >= serverBase ) {
- serverBase += fc;
+ if (s - frameCount >= serverBase ) {
+ serverBase += frameCount;
}
- } else if (s >= serverBase + fc) {
+ } else if (s >= serverBase + frameCount) {
// server just wrapped
- serverBase += fc;
+ serverBase += frameCount;
}
server = s;
- if (!(flags & CBLK_INVALID_MSK)) {
+ if (!(flags & CBLK_INVALID)) {
cv.signal();
}
lock.unlock();
return true;
}
-void* audio_track_cblk_t::buffer(uint32_t offset) const
+void* audio_track_cblk_t::buffer(void *buffers, size_t frameSize, uint32_t offset) const
{
return (int8_t *)buffers + (offset - userBase) * frameSize;
}
-uint32_t audio_track_cblk_t::framesAvailable()
+uint32_t audio_track_cblk_t::framesAvailable(size_t frameCount, bool isOut)
{
Mutex::Autolock _l(lock);
- return framesAvailable_l();
+ return framesAvailable_l(frameCount, isOut);
}
-uint32_t audio_track_cblk_t::framesAvailable_l()
+uint32_t audio_track_cblk_t::framesAvailable_l(size_t frameCount, bool isOut)
{
uint32_t u = user;
uint32_t s = server;
- if (flags & CBLK_DIRECTION_MSK) {
+ if (isOut) {
uint32_t limit = (s < loopStart) ? s : loopStart;
return limit + frameCount - u;
} else {
@@ -1631,12 +1658,12 @@ uint32_t audio_track_cblk_t::framesAvailable_l()
}
}
-uint32_t audio_track_cblk_t::framesReady()
+uint32_t audio_track_cblk_t::framesReady(bool isOut)
{
uint32_t u = user;
uint32_t s = server;
- if (flags & CBLK_DIRECTION_MSK) {
+ if (isOut) {
if (u < loopEnd) {
return u - s;
} else {
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index ce8ffc4..a010bb6 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -89,8 +89,8 @@ public:
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
- track_flags_t flags,
+ size_t frameCount,
+ track_flags_t *flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
@@ -106,7 +106,8 @@ public:
data.writeInt32(format);
data.writeInt32(channelMask);
data.writeInt32(frameCount);
- data.writeInt32((int32_t) flags);
+ track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
+ data.writeInt32(lFlags);
data.writeStrongBinder(sharedBuffer->asBinder());
data.writeInt32((int32_t) output);
data.writeInt32((int32_t) tid);
@@ -119,6 +120,10 @@ public:
if (lStatus != NO_ERROR) {
ALOGE("createTrack error: %s", strerror(-lStatus));
} else {
+ lFlags = reply.readInt32();
+ if (flags != NULL) {
+ *flags = lFlags;
+ }
lSessionId = reply.readInt32();
if (sessionId != NULL) {
*sessionId = lSessionId;
@@ -138,7 +143,7 @@ public:
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
track_flags_t flags,
pid_t tid,
int *sessionId,
@@ -501,7 +506,7 @@ public:
return reply.readInt32();
}
- virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+ virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
audio_io_handle_t output) const
{
Parcel data, reply;
@@ -522,7 +527,7 @@ public:
return status;
}
- virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const
+ virtual size_t getInputFramesLost(audio_io_handle_t ioHandle) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -690,7 +695,7 @@ public:
return (audio_module_handle_t) reply.readInt32();
}
- virtual int32_t getPrimaryOutputSamplingRate()
+ virtual uint32_t getPrimaryOutputSamplingRate()
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -698,7 +703,7 @@ public:
return reply.readInt32();
}
- virtual int32_t getPrimaryOutputFrameCount()
+ virtual size_t getPrimaryOutputFrameCount()
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -723,7 +728,7 @@ status_t BnAudioFlinger::onTransact(
uint32_t sampleRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
audio_channel_mask_t channelMask = data.readInt32();
- size_t bufferCount = data.readInt32();
+ size_t frameCount = data.readInt32();
track_flags_t flags = (track_flags_t) data.readInt32();
sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
@@ -732,7 +737,8 @@ status_t BnAudioFlinger::onTransact(
status_t status;
sp<IAudioTrack> track = createTrack(pid,
(audio_stream_type_t) streamType, sampleRate, format,
- channelMask, bufferCount, flags, buffer, output, tid, &sessionId, &status);
+ channelMask, frameCount, &flags, buffer, output, tid, &sessionId, &status);
+ reply->writeInt32(flags);
reply->writeInt32(sessionId);
reply->writeInt32(status);
reply->writeStrongBinder(track->asBinder());
@@ -745,13 +751,13 @@ status_t BnAudioFlinger::onTransact(
uint32_t sampleRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
audio_channel_mask_t channelMask = data.readInt32();
- size_t bufferCount = data.readInt32();
+ size_t frameCount = data.readInt32();
track_flags_t flags = (track_flags_t) data.readInt32();
pid_t tid = (pid_t) data.readInt32();
int sessionId = data.readInt32();
status_t status;
sp<IAudioRecord> record = openRecord(pid, input,
- sampleRate, format, channelMask, bufferCount, flags, tid, &sessionId, &status);
+ sampleRate, format, channelMask, frameCount, flags, tid, &sessionId, &status);
reply->writeInt32(sessionId);
reply->writeInt32(status);
reply->writeStrongBinder(record->asBinder());
@@ -865,7 +871,8 @@ status_t BnAudioFlinger::onTransact(
case REGISTER_CLIENT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(data.readStrongBinder());
+ sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(
+ data.readStrongBinder());
registerClient(client);
return NO_ERROR;
} break;
@@ -965,8 +972,8 @@ status_t BnAudioFlinger::onTransact(
case GET_RENDER_POSITION: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- uint32_t halFrames;
- uint32_t dspFrames;
+ size_t halFrames;
+ size_t dspFrames;
status_t status = getRenderPosition(&halFrames, &dspFrames, output);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1043,7 +1050,8 @@ status_t BnAudioFlinger::onTransact(
int id;
int enabled;
- sp<IEffect> effect = createEffect(pid, &desc, client, priority, output, sessionId, &status, &id, &enabled);
+ sp<IEffect> effect = createEffect(pid, &desc, client, priority, output, sessionId,
+ &status, &id, &enabled);
reply->writeInt32(status);
reply->writeInt32(id);
reply->writeInt32(enabled);
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 4178b29..2d1e0f8 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -50,7 +50,8 @@ public:
ALOGV("ioConfigChanged stream %d", stream);
data.writeInt32(stream);
} else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
- const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2;
+ const AudioSystem::OutputDescriptor *desc =
+ (const AudioSystem::OutputDescriptor *)param2;
data.writeInt32(desc->samplingRate);
data.writeInt32(desc->format);
data.writeInt32(desc->channels);
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 401437c..769deae 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -399,13 +399,15 @@ status_t BnAudioPolicyService::onTransact(
case SET_PHONE_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- reply->writeInt32(static_cast <uint32_t>(setPhoneState((audio_mode_t) data.readInt32())));
+ reply->writeInt32(static_cast <uint32_t>(setPhoneState(
+ (audio_mode_t) data.readInt32())));
return NO_ERROR;
} break;
case SET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32());
+ audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(
+ data.readInt32());
audio_policy_forced_cfg_t config =
static_cast <audio_policy_forced_cfg_t>(data.readInt32());
reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config)));
@@ -414,7 +416,8 @@ status_t BnAudioPolicyService::onTransact(
case GET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32());
+ audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(
+ data.readInt32());
reply->writeInt32(static_cast <uint32_t>(getForceUse(usage)));
return NO_ERROR;
} break;
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index abc8899..ee70ef7 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -489,7 +489,7 @@ Sample::~Sample()
::close(mFd);
}
mData.clear();
- delete mUrl;
+ free(mUrl);
}
status_t Sample::doLoad()
@@ -568,8 +568,8 @@ void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftV
}
// initialize track
- int afFrameCount;
- int afSampleRate;
+ size_t afFrameCount;
+ uint32_t afSampleRate;
audio_stream_type_t streamType = mSoundPool->streamType();
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
afFrameCount = kDefaultFrameCount;
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 253602d..42584fe 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1036,7 +1036,7 @@ bool ToneGenerator::initAudioTrack() {
goto initAudioTrack_exit;
}
- mpAudioTrack->setVolume(mVolume, mVolume);
+ mpAudioTrack->setVolume(mVolume);
mState = TONE_INIT;
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 8196e10..5b4071b 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -88,7 +88,8 @@ status_t Visualizer::setEnabled(bool enabled)
return status;
}
-status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate)
+status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
+ uint32_t rate)
{
if (rate > CAPTURE_RATE_MAX) {
return BAD_VALUE;
@@ -334,7 +335,8 @@ void Visualizer::controlStatusChanged(bool controlGranted) {
//-------------------------------------------------------------------------
-Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava)
+Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate,
+ bool bCanCallJava)
: Thread(bCanCallJava), mReceiver(receiver)
{
mSleepTimeUs = 1000000000 / captureRate;
diff --git a/media/libmedia_native/Android.mk b/media/libmedia_native/Android.mk
deleted file mode 100644
index 065a90f..0000000
--- a/media/libmedia_native/Android.mk
+++ /dev/null
@@ -1,11 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES :=
-
-LOCAL_MODULE:= libmedia_native
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 5b5ed71..48f48e4 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -28,7 +28,6 @@ LOCAL_SHARED_LIBRARIES := \
libdl \
libgui \
libmedia \
- libmedia_native \
libsonivox \
libstagefright \
libstagefright_foundation \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9bedff1..c3e5c40 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1387,8 +1387,8 @@ status_t MediaPlayerService::AudioOutput::open(
}
ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask,
format, bufferCount, mSessionId);
- int afSampleRate;
- int afFrameCount;
+ uint32_t afSampleRate;
+ size_t afFrameCount;
uint32_t frameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index ff27873..d3ec122 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -550,8 +550,6 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
}
}
} else if (what == Renderer::kWhatFlushComplete) {
- CHECK_EQ(what, (int32_t)Renderer::kWhatFlushComplete);
-
int32_t audio;
CHECK(msg->findInt32("audio", &audio));
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index cf455bd..afaa5db 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -57,9 +57,7 @@ NuPlayer::RTSPSource::RTSPSource(
}
NuPlayer::RTSPSource::~RTSPSource() {
- if (mLooper != NULL) {
- mLooper->stop();
- }
+ mLooper->stop();
}
void NuPlayer::RTSPSource::start() {
@@ -86,6 +84,9 @@ void NuPlayer::RTSPSource::start() {
}
void NuPlayer::RTSPSource::stop() {
+ if (mLooper == NULL) {
+ return;
+ }
sp<AMessage> msg = new AMessage(kWhatDisconnect, mReflector->id());
sp<AMessage> dummy;
diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp
index 00d2017..e0d2c21 100644
--- a/media/libnbaio/NBAIO.cpp
+++ b/media/libnbaio/NBAIO.cpp
@@ -24,44 +24,55 @@ namespace android {
size_t Format_frameSize(NBAIO_Format format)
{
- switch (format) {
- case Format_SR44_1_C2_I16:
- case Format_SR48_C2_I16:
- return 2 * sizeof(short);
- case Format_SR44_1_C1_I16:
- case Format_SR48_C1_I16:
- return 1 * sizeof(short);
- case Format_Invalid:
- default:
- return 0;
- }
+ return Format_channelCount(format) * sizeof(short);
}
size_t Format_frameBitShift(NBAIO_Format format)
{
- switch (format) {
- case Format_SR44_1_C2_I16:
- case Format_SR48_C2_I16:
- return 2; // 1 << 2 == 2 * sizeof(short)
- case Format_SR44_1_C1_I16:
- case Format_SR48_C1_I16:
- return 1; // 1 << 1 == 1 * sizeof(short)
- case Format_Invalid:
- default:
- return 0;
- }
+ // sizeof(short) == 2, so frame size == 1 << channels
+ return Format_channelCount(format);
}
+enum {
+ Format_SR_8000,
+ Format_SR_11025,
+ Format_SR_16000,
+ Format_SR_22050,
+ Format_SR_24000,
+ Format_SR_32000,
+ Format_SR_44100,
+ Format_SR_48000,
+ Format_SR_Mask = 7
+};
+
+enum {
+ Format_C_1 = 0x08,
+ Format_C_2 = 0x10,
+ Format_C_Mask = 0x18
+};
+
unsigned Format_sampleRate(NBAIO_Format format)
{
- switch (format) {
- case Format_SR44_1_C1_I16:
- case Format_SR44_1_C2_I16:
+ if (format == Format_Invalid) {
+ return 0;
+ }
+ switch (format & Format_SR_Mask) {
+ case Format_SR_8000:
+ return 8000;
+ case Format_SR_11025:
+ return 11025;
+ case Format_SR_16000:
+ return 16000;
+ case Format_SR_22050:
+ return 22050;
+ case Format_SR_24000:
+ return 24000;
+ case Format_SR_32000:
+ return 32000;
+ case Format_SR_44100:
return 44100;
- case Format_SR48_C1_I16:
- case Format_SR48_C2_I16:
+ case Format_SR_48000:
return 48000;
- case Format_Invalid:
default:
return 0;
}
@@ -69,14 +80,14 @@ unsigned Format_sampleRate(NBAIO_Format format)
unsigned Format_channelCount(NBAIO_Format format)
{
- switch (format) {
- case Format_SR44_1_C1_I16:
- case Format_SR48_C1_I16:
+ if (format == Format_Invalid) {
+ return 0;
+ }
+ switch (format & Format_C_Mask) {
+ case Format_C_1:
return 1;
- case Format_SR44_1_C2_I16:
- case Format_SR48_C2_I16:
+ case Format_C_2:
return 2;
- case Format_Invalid:
default:
return 0;
}
@@ -84,11 +95,46 @@ unsigned Format_channelCount(NBAIO_Format format)
NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount)
{
- if (sampleRate == 44100 && channelCount == 2) return Format_SR44_1_C2_I16;
- if (sampleRate == 48000 && channelCount == 2) return Format_SR48_C2_I16;
- if (sampleRate == 44100 && channelCount == 1) return Format_SR44_1_C1_I16;
- if (sampleRate == 48000 && channelCount == 1) return Format_SR48_C1_I16;
- return Format_Invalid;
+ NBAIO_Format format;
+ switch (sampleRate) {
+ case 8000:
+ format = Format_SR_8000;
+ break;
+ case 11025:
+ format = Format_SR_11025;
+ break;
+ case 16000:
+ format = Format_SR_16000;
+ break;
+ case 22050:
+ format = Format_SR_22050;
+ break;
+ case 24000:
+ format = Format_SR_24000;
+ break;
+ case 32000:
+ format = Format_SR_32000;
+ break;
+ case 44100:
+ format = Format_SR_44100;
+ break;
+ case 48000:
+ format = Format_SR_48000;
+ break;
+ default:
+ return Format_Invalid;
+ }
+ switch (channelCount) {
+ case 1:
+ format |= Format_C_1;
+ break;
+ case 2:
+ format |= Format_C_2;
+ break;
+ default:
+ return Format_Invalid;
+ }
+ return format;
}
// This is a default implementation; it is expected that subclasses will optimize this.
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a01d03f..2a7b2ae 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -803,6 +803,8 @@ status_t ACodec::setComponentRole(
"audio_decoder.raw", "audio_encoder.raw" },
{ MEDIA_MIMETYPE_AUDIO_FLAC,
"audio_decoder.flac", "audio_encoder.flac" },
+ { MEDIA_MIMETYPE_AUDIO_MSGSM,
+ "audio_decoder.gsm", "audio_encoder.gsm" },
};
static const size_t kNumMimeToRole =
@@ -3152,11 +3154,6 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
mCodec->mOMX = omx;
mCodec->mNode = node;
- mCodec->mPortEOS[kPortIndexInput] =
- mCodec->mPortEOS[kPortIndexOutput] = false;
-
- mCodec->mInputEOSResult = OK;
-
{
sp<AMessage> notify = mCodec->mNotify->dup();
notify->setInt32("what", ACodec::kWhatComponentAllocated);
@@ -3178,6 +3175,11 @@ ACodec::LoadedState::LoadedState(ACodec *codec)
void ACodec::LoadedState::stateEntered() {
ALOGV("[%s] Now Loaded", mCodec->mComponentName.c_str());
+ mCodec->mPortEOS[kPortIndexInput] =
+ mCodec->mPortEOS[kPortIndexOutput] = false;
+
+ mCodec->mInputEOSResult = OK;
+
if (mCodec->mShutdownInProgress) {
bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index faa0f31..a056706 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -78,7 +78,6 @@ LOCAL_SHARED_LIBRARIES := \
libicuuc \
liblog \
libmedia \
- libmedia_native \
libsonivox \
libssl \
libstagefright_omx \
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 861aebe..3cf4d5c 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -58,7 +58,7 @@ AudioSource::AudioSource(
ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
CHECK(channelCount == 1 || channelCount == 2);
- int minFrameCount;
+ size_t minFrameCount;
status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
sampleRate,
AUDIO_FORMAT_PCM_16_BIT,
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index efd7af7..efd7af7 100755..100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 8b52e15..8b52e15 100755..100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index e7b5903..5d8029c 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -40,6 +40,7 @@ const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW = "audio/g711-mlaw";
const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw";
const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac";
const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
+const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 70de174..22aefcc 100755..100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1390,6 +1390,8 @@ void OMXCodec::setComponentRole(
"audio_decoder.raw", "audio_encoder.raw" },
{ MEDIA_MIMETYPE_AUDIO_FLAC,
"audio_decoder.flac", "audio_encoder.flac" },
+ { MEDIA_MIMETYPE_AUDIO_MSGSM,
+ "audio_decoder.gsm", "audio_encoder.gsm" },
};
static const size_t kNumMimeToRole =
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 773854f..773854f 100755..100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index bccffd8..af8186c 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -42,7 +42,7 @@ static bool FileHasAcceptableExtension(const char *extension) {
".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
- ".avi", ".mpeg", ".mpg", ".mpga"
+ ".avi", ".mpeg", ".mpg", ".awb", ".mpga"
};
static const size_t kNumValidExtensions =
sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
diff --git a/media/libstagefright/ThrottledSource.cpp b/media/libstagefright/ThrottledSource.cpp
index 348a9d3..7496752 100644
--- a/media/libstagefright/ThrottledSource.cpp
+++ b/media/libstagefright/ThrottledSource.cpp
@@ -31,10 +31,6 @@ ThrottledSource::ThrottledSource(
CHECK(mBandwidthLimitBytesPerSecond > 0);
}
-status_t ThrottledSource::initCheck() const {
- return mSource->initCheck();
-}
-
ssize_t ThrottledSource::readAt(off64_t offset, void *data, size_t size) {
Mutex::Autolock autoLock(mLock);
@@ -62,17 +58,9 @@ ssize_t ThrottledSource::readAt(off64_t offset, void *data, size_t size) {
if (whenUs > nowUs) {
usleep(whenUs - nowUs);
}
-
return n;
}
-status_t ThrottledSource::getSize(off64_t *size) {
- return mSource->getSize(size);
-}
-
-uint32_t ThrottledSource::flags() {
- return mSource->flags();
-}
} // namespace android
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index a38400b..d32f4fb 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -38,6 +38,7 @@ enum {
WAVE_FORMAT_PCM = 0x0001,
WAVE_FORMAT_ALAW = 0x0006,
WAVE_FORMAT_MULAW = 0x0007,
+ WAVE_FORMAT_MSGSM = 0x0031,
WAVE_FORMAT_EXTENSIBLE = 0xFFFE
};
@@ -178,6 +179,7 @@ status_t WAVExtractor::init() {
if (mWaveFormat != WAVE_FORMAT_PCM
&& mWaveFormat != WAVE_FORMAT_ALAW
&& mWaveFormat != WAVE_FORMAT_MULAW
+ && mWaveFormat != WAVE_FORMAT_MSGSM
&& mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
return ERROR_UNSUPPORTED;
}
@@ -216,6 +218,10 @@ status_t WAVExtractor::init() {
&& mBitsPerSample != 24) {
return ERROR_UNSUPPORTED;
}
+ } else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ if (mBitsPerSample != 0) {
+ return ERROR_UNSUPPORTED;
+ }
} else {
CHECK(mWaveFormat == WAVE_FORMAT_MULAW
|| mWaveFormat == WAVE_FORMAT_ALAW);
@@ -283,6 +289,10 @@ status_t WAVExtractor::init() {
mTrackMeta->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_ALAW);
break;
+ case WAVE_FORMAT_MSGSM:
+ mTrackMeta->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MSGSM);
+ break;
default:
CHECK_EQ(mWaveFormat, (uint16_t)WAVE_FORMAT_MULAW);
mTrackMeta->setCString(
@@ -294,11 +304,17 @@ status_t WAVExtractor::init() {
mTrackMeta->setInt32(kKeyChannelMask, mChannelMask);
mTrackMeta->setInt32(kKeySampleRate, mSampleRate);
- size_t bytesPerSample = mBitsPerSample >> 3;
-
- int64_t durationUs =
- 1000000LL * (mDataSize / (mNumChannels * bytesPerSample))
- / mSampleRate;
+ int64_t durationUs = 0;
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ // 65 bytes decode to 320 8kHz samples
+ durationUs =
+ 1000000LL * (mDataSize / 65 * 320) / 8000;
+ } else {
+ size_t bytesPerSample = mBitsPerSample >> 3;
+ durationUs =
+ 1000000LL * (mDataSize / (mNumChannels * bytesPerSample))
+ / mSampleRate;
+ }
mTrackMeta->setInt64(kKeyDuration, durationUs);
@@ -388,7 +404,16 @@ status_t WAVSource::read(
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
- int64_t pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
+ int64_t pos = 0;
+
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ // 65 bytes decode to 320 8kHz samples
+ int64_t samplenumber = (seekTimeUs * mSampleRate) / 1000000;
+ int64_t framenumber = samplenumber / 320;
+ pos = framenumber * 65;
+ } else {
+ pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
+ }
if (pos > mSize) {
pos = mSize;
}
@@ -412,6 +437,15 @@ status_t WAVSource::read(
maxBytesToRead = maxBytesAvailable;
}
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ // Microsoft packs 2 frames into 65 bytes, rather than using separate 33-byte frames,
+ // so read multiples of 65, and use smaller buffers to account for ~10:1 expansion ratio
+ if (maxBytesToRead > 1024) {
+ maxBytesToRead = 1024;
+ }
+ maxBytesToRead = (maxBytesToRead / 65) * 65;
+ }
+
ssize_t n = mDataSource->readAt(
mCurrentPos, buffer->data(),
maxBytesToRead);
@@ -468,12 +502,17 @@ status_t WAVSource::read(
}
}
- size_t bytesPerSample = mBitsPerSample >> 3;
+ int64_t timeStampUs = 0;
+
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ timeStampUs = 1000000LL * (mCurrentPos - mOffset) * 320 / 65 / mSampleRate;
+ } else {
+ size_t bytesPerSample = mBitsPerSample >> 3;
+ timeStampUs = 1000000LL * (mCurrentPos - mOffset)
+ / (mNumChannels * bytesPerSample) / mSampleRate;
+ }
- buffer->meta_data()->setInt64(
- kKeyTime,
- 1000000LL * (mCurrentPos - mOffset)
- / (mNumChannels * bytesPerSample) / mSampleRate);
+ buffer->meta_data()->setInt64(kKeyTime, timeStampUs);
buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
mCurrentPos += n;
diff --git a/media/libstagefright/codecs/gsm/Android.mk b/media/libstagefright/codecs/gsm/Android.mk
new file mode 100644
index 0000000..2e43120
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/Android.mk
@@ -0,0 +1,4 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
new file mode 100644
index 0000000..9c0c6ae
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -0,0 +1,21 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ SoftGSM.cpp
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/media/libstagefright/include \
+ frameworks/native/include/media/openmax \
+ external/libgsm/inc
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright libstagefright_omx libstagefright_foundation libutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libgsm
+
+LOCAL_MODULE := libstagefright_soft_gsmdec
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/gsm/dec/MODULE_LICENSE_APACHE2 b/media/libstagefright/codecs/gsm/dec/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/dec/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/codecs/gsm/dec/NOTICE b/media/libstagefright/codecs/gsm/dec/NOTICE
new file mode 100644
index 0000000..c5b1efa
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/dec/NOTICE
@@ -0,0 +1,190 @@
+
+ Copyright (c) 2005-2008, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
new file mode 100644
index 0000000..00e0c85
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftGSM"
+#include <utils/Log.h>
+
+#include "SoftGSM.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+SoftGSM::SoftGSM(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mSignalledError(false) {
+
+ CHECK(!strcmp(name, "OMX.google.gsm.decoder"));
+
+ mGsm = gsm_create();
+ CHECK(mGsm);
+ int msopt = 1;
+ gsm_option(mGsm, GSM_OPT_WAV49, &msopt);
+
+ initPorts();
+}
+
+SoftGSM::~SoftGSM() {
+ gsm_destroy(mGsm);
+}
+
+void SoftGSM::initPorts() {
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+
+ def.nPortIndex = 0;
+ def.eDir = OMX_DirInput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = sizeof(gsm_frame);
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 1;
+
+ def.format.audio.cMIMEType =
+ const_cast<char *>(MEDIA_MIMETYPE_AUDIO_MSGSM);
+
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingGSMFR;
+
+ addPort(def);
+
+ def.nPortIndex = 1;
+ def.eDir = OMX_DirOutput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = kMaxNumSamplesPerFrame * sizeof(int16_t);
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 2;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+}
+
+OMX_ERRORTYPE SoftGSM::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eEndian = OMX_EndianBig;
+ pcmParams->bInterleaved = OMX_TRUE;
+ pcmParams->nBitPerSample = 16;
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+ pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+
+ pcmParams->nChannels = 1;
+ pcmParams->nSamplingRate = 8000;
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalGetParameter(index, params);
+ }
+}
+
+OMX_ERRORTYPE SoftGSM::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (pcmParams->nChannels != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (pcmParams->nSamplingRate != 8000) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamStandardComponentRole:
+ {
+ const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+ (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+ if (strncmp((const char *)roleParams->cRole,
+ "audio_decoder.gsm",
+ OMX_MAX_STRINGNAME_SIZE - 1)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+}
+
+void SoftGSM::onQueueFilled(OMX_U32 portIndex) {
+ if (mSignalledError) {
+ return;
+ }
+
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+
+ while (!inQueue.empty() && !outQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+
+ outHeader->nFilledLen = 0;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+
+ outQueue.erase(outQueue.begin());
+ outInfo->mOwnedByUs = false;
+ notifyFillBufferDone(outHeader);
+ return;
+ }
+
+ if (inHeader->nFilledLen > kMaxNumSamplesPerFrame) {
+ ALOGE("input buffer too large (%ld).", inHeader->nFilledLen);
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ }
+
+ if(((inHeader->nFilledLen / 65) * 65) != inHeader->nFilledLen) {
+ ALOGE("input buffer not multiple of 65 (%ld).", inHeader->nFilledLen);
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ }
+
+ uint8_t *inputptr = inHeader->pBuffer + inHeader->nOffset;
+
+ int n = mSignalledError ? 0 : DecodeGSM(mGsm,
+ reinterpret_cast<int16_t *>(outHeader->pBuffer), inputptr, inHeader->nFilledLen);
+
+ outHeader->nTimeStamp = inHeader->nTimeStamp;
+ outHeader->nOffset = 0;
+ outHeader->nFilledLen = n * sizeof(int16_t);
+ outHeader->nFlags = 0;
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ }
+}
+
+
+// static
+int SoftGSM::DecodeGSM(gsm handle,
+ int16_t *out, uint8_t *in, size_t inSize) {
+
+ int ret = 0;
+ while (inSize > 0) {
+ gsm_decode(handle, in, out);
+ in += 33;
+ inSize -= 33;
+ out += 160;
+ ret += 160;
+ gsm_decode(handle, in, out);
+ in += 32;
+ inSize -= 32;
+ out += 160;
+ ret += 160;
+ }
+ return ret;
+}
+
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+ const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+ return new android::SoftGSM(name, callbacks, appData, component);
+}
+
diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.h b/media/libstagefright/codecs/gsm/dec/SoftGSM.h
new file mode 100644
index 0000000..8ab6116
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_GSM_H_
+
+#define SOFT_GSM_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+extern "C" {
+#include "gsm.h"
+}
+
+namespace android {
+
+struct SoftGSM : public SimpleSoftOMXComponent {
+ SoftGSM(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftGSM();
+
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params);
+
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+ enum {
+ kNumBuffers = 4,
+ kMaxNumSamplesPerFrame = 16384,
+ };
+
+ bool mSignalledError;
+ gsm mGsm;
+
+ void initPorts();
+
+ static int DecodeGSM(gsm handle, int16_t *out, uint8_t *in, size_t inSize);
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftGSM);
+};
+
+} // namespace android
+
+#endif // SOFT_GSM_H_
+
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index bf9ab3a..a400b4c 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -66,7 +66,7 @@ void SoftVPX::initPorts() {
def.eDir = OMX_DirInput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = 256 * 1024;
+ def.nBufferSize = 768 * 1024;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainVideo;
diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
index 0edafb9..dbe02b8 100644
--- a/media/libstagefright/include/FragmentedMP4Parser.h
+++ b/media/libstagefright/include/FragmentedMP4Parser.h
@@ -263,7 +263,7 @@ private:
void copyBuffer(
sp<ABuffer> *dst,
- size_t offset, uint64_t size, size_t extra = 0) const;
+ size_t offset, uint64_t size) const;
DISALLOW_EVIL_CONSTRUCTORS(FragmentedMP4Parser);
};
diff --git a/media/libstagefright/include/ThrottledSource.h b/media/libstagefright/include/ThrottledSource.h
index 7fe7c06..673268b 100644
--- a/media/libstagefright/include/ThrottledSource.h
+++ b/media/libstagefright/include/ThrottledSource.h
@@ -28,18 +28,44 @@ struct ThrottledSource : public DataSource {
const sp<DataSource> &source,
int32_t bandwidthLimitBytesPerSecond);
- virtual status_t initCheck() const;
-
+ // implementation of readAt() that sleeps to achieve the desired max throughput
virtual ssize_t readAt(off64_t offset, void *data, size_t size);
- virtual status_t getSize(off64_t *size);
- virtual uint32_t flags();
+ // returns an empty string to prevent callers from using the Uri to construct a new datasource
+ virtual String8 getUri() {
+ return String8();
+ }
+
+ // following methods all call through to the wrapped DataSource's methods
+
+ status_t initCheck() const {
+ return mSource->initCheck();
+ }
+
+ virtual status_t getSize(off64_t *size) {
+ return mSource->getSize(size);
+ }
+
+ virtual uint32_t flags() {
+ return mSource->flags();
+ }
+
+ virtual status_t reconnectAtOffset(off64_t offset) {
+ return mSource->reconnectAtOffset(offset);
+ }
+
+ virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL) {
+ return mSource->DrmInitialization(mime);
+ }
+
+ virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {
+ mSource->getDrmInfo(handle, client);
+ };
virtual String8 getMIMEType() const {
return mSource->getMIMEType();
}
-
private:
Mutex mLock;
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 8f7d12b..7fc7037 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -758,31 +758,69 @@ static void addESDSFromCodecPrivate(
esds = NULL;
}
-void addVorbisCodecInfo(
+status_t addVorbisCodecInfo(
const sp<MetaData> &meta,
const void *_codecPrivate, size_t codecPrivateSize) {
- // printf("vorbis private data follows:\n");
// hexdump(_codecPrivate, codecPrivateSize);
- CHECK(codecPrivateSize >= 3);
+ if (codecPrivateSize < 1) {
+ return ERROR_MALFORMED;
+ }
const uint8_t *codecPrivate = (const uint8_t *)_codecPrivate;
- CHECK(codecPrivate[0] == 0x02);
- size_t len1 = codecPrivate[1];
- size_t len2 = codecPrivate[2];
+ if (codecPrivate[0] != 0x02) {
+ return ERROR_MALFORMED;
+ }
- CHECK(codecPrivateSize > 3 + len1 + len2);
+ // codecInfo starts with two lengths, len1 and len2, that are
+ // "Xiph-style-lacing encoded"...
- CHECK(codecPrivate[3] == 0x01);
- meta->setData(kKeyVorbisInfo, 0, &codecPrivate[3], len1);
+ size_t offset = 1;
+ size_t len1 = 0;
+ while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) {
+ len1 += 0xff;
+ ++offset;
+ }
+ if (offset >= codecPrivateSize) {
+ return ERROR_MALFORMED;
+ }
+ len1 += codecPrivate[offset++];
- CHECK(codecPrivate[len1 + 3] == 0x03);
+ size_t len2 = 0;
+ while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) {
+ len2 += 0xff;
+ ++offset;
+ }
+ if (offset >= codecPrivateSize) {
+ return ERROR_MALFORMED;
+ }
+ len2 += codecPrivate[offset++];
+
+ if (codecPrivateSize < offset + len1 + len2) {
+ return ERROR_MALFORMED;
+ }
+
+ if (codecPrivate[offset] != 0x01) {
+ return ERROR_MALFORMED;
+ }
+ meta->setData(kKeyVorbisInfo, 0, &codecPrivate[offset], len1);
+
+ offset += len1;
+ if (codecPrivate[offset] != 0x03) {
+ return ERROR_MALFORMED;
+ }
+
+ offset += len2;
+ if (codecPrivate[offset] != 0x05) {
+ return ERROR_MALFORMED;
+ }
- CHECK(codecPrivate[len1 + len2 + 3] == 0x05);
meta->setData(
- kKeyVorbisBooks, 0, &codecPrivate[len1 + len2 + 3],
- codecPrivateSize - len1 - len2 - 3);
+ kKeyVorbisBooks, 0, &codecPrivate[offset],
+ codecPrivateSize - offset);
+
+ return OK;
}
void MatroskaExtractor::addTracks() {
@@ -809,6 +847,8 @@ void MatroskaExtractor::addTracks() {
sp<MetaData> meta = new MetaData;
+ status_t err = OK;
+
switch (track->GetType()) {
case VIDEO_TRACK:
{
@@ -855,7 +895,8 @@ void MatroskaExtractor::addTracks() {
} else if (!strcmp("A_VORBIS", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS);
- addVorbisCodecInfo(meta, codecPrivate, codecPrivateSize);
+ err = addVorbisCodecInfo(
+ meta, codecPrivate, codecPrivateSize);
} else if (!strcmp("A_MPEG/L3", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
} else {
@@ -872,6 +913,11 @@ void MatroskaExtractor::addTracks() {
continue;
}
+ if (err != OK) {
+ ALOGE("skipping track, codec specific data was malformed.");
+ continue;
+ }
+
long long durationNs = mSegment->GetDuration();
meta->setInt64(kKeyDuration, (durationNs + 500) / 1000);
diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
index 7fe4e63..54c3d63 100644
--- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp
+++ b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
@@ -1971,8 +1971,8 @@ status_t FragmentedMP4Parser::parseTrackFragmentRun(
}
void FragmentedMP4Parser::copyBuffer(
- sp<ABuffer> *dst, size_t offset, uint64_t size, size_t extra) const {
- sp<ABuffer> buf = new ABuffer(size + extra);
+ sp<ABuffer> *dst, size_t offset, uint64_t size) const {
+ sp<ABuffer> buf = new ABuffer(size);
memcpy(buf->data(), mBuffer->data() + offset, size);
*dst = buf;
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 3747b3b..6e1c04d 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -53,6 +53,7 @@ static const struct {
{ "OMX.google.vpx.decoder", "vpxdec", "video_decoder.vpx" },
{ "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
{ "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
+ { "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
};
static const size_t kNumComponents =
diff --git a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
index bc35aef..b913124 100644
--- a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
+++ b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
@@ -271,6 +271,7 @@ sp<ABuffer> TunnelRenderer::dequeueBuffer() {
if (mFirstFailedAttemptUs + 50000ll > ALooper::GetNowUs()) {
// We're willing to wait a little while to get the right packet.
+#if 0
if (!mRequestedRetransmission) {
ALOGI("requesting retransmission of seqNo %d",
(mLastDequeuedExtSeqNo + 1) & 0xffff);
@@ -280,7 +281,9 @@ sp<ABuffer> TunnelRenderer::dequeueBuffer() {
notify->post();
mRequestedRetransmission = true;
- } else {
+ } else
+#endif
+ {
ALOGI("still waiting for the correct packet to arrive.");
}
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 2899953..c4050b8 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -15,6 +15,9 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
AudioFlinger.cpp \
+ Threads.cpp \
+ Tracks.cpp \
+ Effects.cpp \
AudioMixer.cpp.arm \
AudioResampler.cpp.arm \
AudioPolicyService.cpp \
@@ -31,7 +34,6 @@ LOCAL_C_INCLUDES := \
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
-# FIXME keep libmedia_native but remove libmedia after split
LOCAL_SHARED_LIBRARIES := \
libaudioutils \
libcommon_time_client \
@@ -39,7 +41,6 @@ LOCAL_SHARED_LIBRARIES := \
libutils \
libbinder \
libmedia \
- libmedia_native \
libnbaio \
libhardware \
libhardware_legacy \
@@ -72,6 +73,10 @@ LOCAL_CFLAGS += -UFAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
# 47.5 seconds at 44.1 kHz, 8 megabytes
# LOCAL_CFLAGS += -DTEE_SINK_FRAMES=0x200000
+# uncomment for dumpsys to write most recent audio input to .wav file
+# 47.5 seconds at 44.1 kHz, 8 megabytes
+# LOCAL_CFLAGS += -DTEE_SINK_INPUT_FRAMES=0x200000
+
# uncomment to enable the audio watchdog
# LOCAL_SRC_FILES += AudioWatchdog.cpp
# LOCAL_CFLAGS += -DAUDIO_WATCHDOG
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1913b6f..514fcb1 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -29,7 +29,6 @@
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
#include <utils/String16.h>
#include <utils/threads.h>
#include <utils/Atomic.h>
@@ -38,15 +37,8 @@
#include <cutils/properties.h>
#include <cutils/compiler.h>
-#undef ADD_BATTERY_DATA
-
-#ifdef ADD_BATTERY_DATA
-#include <media/IMediaPlayerService.h>
-#include <media/IMediaDeathNotifier.h>
-#endif
-
-#include <private/media/AudioTrackShared.h>
-#include <private/media/AudioEffectShared.h>
+//#include <private/media/AudioTrackShared.h>
+//#include <private/media/AudioEffectShared.h>
#include <system/audio.h>
#include <hardware/audio.h>
@@ -64,26 +56,8 @@
#include <powermanager/PowerManager.h>
-// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds
-#ifdef DEBUG_CPU_USAGE
-#include <cpustats/CentralTendencyStatistics.h>
-#include <cpustats/ThreadCpuUsage.h>
-#endif
-
#include <common_time/cc_helper.h>
-#include <common_time/local_clock.h>
-
-#include "FastMixer.h"
-
-// NBAIO implementations
-#include <media/nbaio/AudioStreamOutSink.h>
-#include <media/nbaio/MonoPipe.h>
-#include <media/nbaio/MonoPipeReader.h>
-#include <media/nbaio/Pipe.h>
-#include <media/nbaio/PipeReader.h>
-#include <media/nbaio/SourceAudioBufferProvider.h>
-
-#include "SchedulingPolicyService.h"
+//#include <common_time/local_clock.h>
// ----------------------------------------------------------------------------
@@ -105,90 +79,13 @@ namespace android {
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
static const char kHardwareLockedString[] = "Hardware lock is taken\n";
-static const float MAX_GAIN = 4096.0f;
-static const uint32_t MAX_GAIN_INT = 0x1000;
-
-// retry counts for buffer fill timeout
-// 50 * ~20msecs = 1 second
-static const int8_t kMaxTrackRetries = 50;
-static const int8_t kMaxTrackStartupRetries = 50;
-// allow less retry attempts on direct output thread.
-// direct outputs can be a scarce resource in audio hardware and should
-// be released as quickly as possible.
-static const int8_t kMaxTrackRetriesDirect = 2;
-
-static const int kDumpLockRetries = 50;
-static const int kDumpLockSleepUs = 20000;
-
-// don't warn about blocked writes or record buffer overflows more often than this
-static const nsecs_t kWarningThrottleNs = seconds(5);
-
-// RecordThread loop sleep time upon application overrun or audio HAL read error
-static const int kRecordThreadSleepUs = 5000;
-
-// maximum time to wait for setParameters to complete
-static const nsecs_t kSetParametersTimeoutNs = seconds(2);
-
-// minimum sleep time for the mixer thread loop when tracks are active but in underrun
-static const uint32_t kMinThreadSleepTimeUs = 5000;
-// maximum divider applied to the active sleep time in the mixer thread loop
-static const uint32_t kMaxThreadSleepTimeShift = 2;
-
-// minimum normal mix buffer size, expressed in milliseconds rather than frames
-static const uint32_t kMinNormalMixBufferSizeMs = 20;
-// maximum normal mix buffer size
-static const uint32_t kMaxNormalMixBufferSizeMs = 24;
nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
-// Whether to use fast mixer
-static const enum {
- FastMixer_Never, // never initialize or use: for debugging only
- FastMixer_Always, // always initialize and use, even if not needed: for debugging only
- // normal mixer multiplier is 1
- FastMixer_Static, // initialize if needed, then use all the time if initialized,
- // multiplier is calculated based on min & max normal mixer buffer size
- FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load,
- // multiplier is calculated based on min & max normal mixer buffer size
- // FIXME for FastMixer_Dynamic:
- // Supporting this option will require fixing HALs that can't handle large writes.
- // For example, one HAL implementation returns an error from a large write,
- // and another HAL implementation corrupts memory, possibly in the sample rate converter.
- // We could either fix the HAL implementations, or provide a wrapper that breaks
- // up large writes into smaller ones, and the wrapper would need to deal with scheduler.
-} kUseFastMixer = FastMixer_Static;
-
-static uint32_t gScreenState; // incremented by 2 when screen state changes, bit 0 == 1 means "off"
- // AudioFlinger::setParameters() updates, other threads read w/o lock
-
-// Priorities for requestPriority
-static const int kPriorityAudioApp = 2;
-static const int kPriorityFastMixer = 3;
-
-// IAudioFlinger::createTrack() reports back to client the total size of shared memory area
-// for the track. The client then sub-divides this into smaller buffers for its use.
-// Currently the client uses double-buffering by default, but doesn't tell us about that.
-// So for now we just assume that client is double-buffered.
-// FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or
-// N-buffering, so AudioFlinger could allocate the right amount of memory.
-// See the client's minBufCount and mNotificationFramesAct calculations for details.
-static const int kFastTrackMultiplier = 2;
+uint32_t AudioFlinger::mScreenState;
// ----------------------------------------------------------------------------
-#ifdef ADD_BATTERY_DATA
-// To collect the amplifier usage
-static void addBatteryData(uint32_t params) {
- sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
- if (service == NULL) {
- // it already logged
- return;
- }
-
- service->addBatteryData(params);
-}
-#endif
-
static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
const hw_module_t *mod;
@@ -364,7 +261,7 @@ void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args)
write(fd, result.string(), result.size());
}
-static bool tryLock(Mutex& mutex)
+bool AudioFlinger::dumpTryLock(Mutex& mutex)
{
bool locked = false;
for (int i = 0; i < kDumpLockRetries; ++i) {
@@ -383,7 +280,7 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
dumpPermissionDenial(fd, args);
} else {
// get state of hardware lock
- bool hardwareLocked = tryLock(mHardwareLock);
+ bool hardwareLocked = dumpTryLock(mHardwareLock);
if (!hardwareLocked) {
String8 result(kHardwareLockedString);
write(fd, result.string(), result.size());
@@ -391,7 +288,7 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
mHardwareLock.unlock();
}
- bool locked = tryLock(mLock);
+ bool locked = dumpTryLock(mLock);
// failed to lock - AudioFlinger is probably deadlocked
if (!locked) {
@@ -417,7 +314,15 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
dev->dump(dev, fd);
}
- if (locked) mLock.unlock();
+
+ // dump the serially shared record tee sink
+ if (mRecordTeeSource != 0) {
+ dumpTee(fd, mRecordTeeSource);
+ }
+
+ if (locked) {
+ mLock.unlock();
+ }
}
return NO_ERROR;
}
@@ -444,8 +349,8 @@ sp<IAudioTrack> AudioFlinger::createTrack(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
- IAudioFlinger::track_flags_t flags,
+ size_t frameCount,
+ IAudioFlinger::track_flags_t *flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
@@ -466,6 +371,14 @@ sp<IAudioTrack> AudioFlinger::createTrack(
goto Exit;
}
+ // client is responsible for conversion of 8-bit PCM to 16-bit PCM,
+ // and we don't yet support 8.24 or 32-bit PCM
+ if (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT) {
+ ALOGE("createTrack() invalid format %d", format);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
{
Mutex::Autolock _l(mLock);
PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -856,8 +769,9 @@ bool AudioFlinger::streamMute(audio_stream_type_t stream) const
status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
- ALOGV("setParameters(): io %d, keyvalue %s, tid %d, calling pid %d",
- ioHandle, keyValuePairs.string(), gettid(), IPCThreadState::self()->getCallingPid());
+ ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
+ ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());
+
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
@@ -906,8 +820,8 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
String8 screenState;
if (param.get(String8(AudioParameter::keyScreenState), screenState) == NO_ERROR) {
bool isOff = screenState == "off";
- if (isOff != (gScreenState & 1)) {
- gScreenState = ((gScreenState & ~1) + 2) | isOff;
+ if (isOff != (AudioFlinger::mScreenState & 1)) {
+ AudioFlinger::mScreenState = ((AudioFlinger::mScreenState & ~1) + 2) | isOff;
}
}
return final_result;
@@ -941,8 +855,8 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& keys) const
{
-// ALOGV("getParameters() io %d, keys %s, tid %d, calling pid %d",
-// ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid());
+ ALOGVV("getParameters() io %d, keys %s, calling pid %d",
+ ioHandle, keys.string(), IPCThreadState::self()->getCallingPid());
Mutex::Autolock _l(mLock);
@@ -1028,7 +942,7 @@ status_t AudioFlinger::setVoiceVolume(float value)
return ret;
}
-status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+status_t AudioFlinger::getRenderPosition(size_t *halFrames, size_t *dspFrames,
audio_io_handle_t output) const
{
status_t status;
@@ -1112,7 +1026,8 @@ void AudioFlinger::audioConfigChanged_l(int event, audio_io_handle_t ioHandle, c
// removeClient_l() must be called with AudioFlinger::mLock held
void AudioFlinger::removeClient_l(pid_t pid)
{
- ALOGV("removeClient_l() pid %d, tid %d, calling tid %d", pid, gettid(), IPCThreadState::self()->getCallingPid());
+ ALOGV("removeClient_l() pid %d, calling pid %d", pid,
+ IPCThreadState::self()->getCallingPid());
mClients.removeItem(pid);
}
@@ -1131,4596 +1046,7 @@ sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(int sessionId,
return thread;
}
-// ----------------------------------------------------------------------------
-
-AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
- audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
- : Thread(false /*canCallJava*/),
- mType(type),
- mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0),
- // mChannelMask
- mChannelCount(0),
- mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
- mParamStatus(NO_ERROR),
- mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
- mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
- // mName will be set by concrete (non-virtual) subclass
- mDeathRecipient(new PMDeathRecipient(this))
-{
-}
-
-AudioFlinger::ThreadBase::~ThreadBase()
-{
- mParamCond.broadcast();
- // do not lock the mutex in destructor
- releaseWakeLock_l();
- if (mPowerManager != 0) {
- sp<IBinder> binder = mPowerManager->asBinder();
- binder->unlinkToDeath(mDeathRecipient);
- }
-}
-
-void AudioFlinger::ThreadBase::exit()
-{
- ALOGV("ThreadBase::exit");
- // do any cleanup required for exit to succeed
- preExit();
- {
- // This lock prevents the following race in thread (uniprocessor for illustration):
- // if (!exitPending()) {
- // // context switch from here to exit()
- // // exit() calls requestExit(), what exitPending() observes
- // // exit() calls signal(), which is dropped since no waiters
- // // context switch back from exit() to here
- // mWaitWorkCV.wait(...);
- // // now thread is hung
- // }
- AutoMutex lock(mLock);
- requestExit();
- mWaitWorkCV.broadcast();
- }
- // When Thread::requestExitAndWait is made virtual and this method is renamed to
- // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
- requestExitAndWait();
-}
-
-status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
-{
- status_t status;
-
- ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
- Mutex::Autolock _l(mLock);
-
- mNewParameters.add(keyValuePairs);
- mWaitWorkCV.signal();
- // wait condition with timeout in case the thread loop has exited
- // before the request could be processed
- if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) {
- status = mParamStatus;
- mWaitWorkCV.signal();
- } else {
- status = TIMED_OUT;
- }
- return status;
-}
-
-void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
-{
- Mutex::Autolock _l(mLock);
- sendIoConfigEvent_l(event, param);
-}
-
-// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param)
-{
- IoConfigEvent *ioEvent = new IoConfigEvent(event, param);
- mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent));
- ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, param);
- mWaitWorkCV.signal();
-}
-
-// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
-{
- PrioConfigEvent *prioEvent = new PrioConfigEvent(pid, tid, prio);
- mConfigEvents.add(static_cast<ConfigEvent *>(prioEvent));
- ALOGV("sendPrioConfigEvent_l() num events %d pid %d, tid %d prio %d",
- mConfigEvents.size(), pid, tid, prio);
- mWaitWorkCV.signal();
-}
-
-void AudioFlinger::ThreadBase::processConfigEvents()
-{
- mLock.lock();
- while (!mConfigEvents.isEmpty()) {
- ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
- ConfigEvent *event = mConfigEvents[0];
- mConfigEvents.removeAt(0);
- // release mLock before locking AudioFlinger mLock: lock order is always
- // AudioFlinger then ThreadBase to avoid cross deadlock
- mLock.unlock();
- switch(event->type()) {
- case CFG_EVENT_PRIO: {
- PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
- int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio());
- if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
- prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
- }
- } break;
- case CFG_EVENT_IO: {
- IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
- mAudioFlinger->mLock.lock();
- audioConfigChanged_l(ioEvent->event(), ioEvent->param());
- mAudioFlinger->mLock.unlock();
- } break;
- default:
- ALOGE("processConfigEvents() unknown event type %d", event->type());
- break;
- }
- delete event;
- mLock.lock();
- }
- mLock.unlock();
-}
-
-void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- bool locked = tryLock(mLock);
- if (!locked) {
- snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this);
- write(fd, buffer, strlen(buffer));
- }
-
- snprintf(buffer, SIZE, "io handle: %d\n", mId);
- result.append(buffer);
- snprintf(buffer, SIZE, "TID: %d\n", getTid());
- result.append(buffer);
- snprintf(buffer, SIZE, "standby: %d\n", mStandby);
- result.append(buffer);
- snprintf(buffer, SIZE, "Sample rate: %d\n", mSampleRate);
- result.append(buffer);
- snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
- result.append(buffer);
- snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
- result.append(buffer);
- snprintf(buffer, SIZE, "Channel Count: %d\n", mChannelCount);
- result.append(buffer);
- snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
- result.append(buffer);
- snprintf(buffer, SIZE, "Format: %d\n", mFormat);
- result.append(buffer);
- snprintf(buffer, SIZE, "Frame size: %u\n", mFrameSize);
- result.append(buffer);
-
- snprintf(buffer, SIZE, "\nPending setParameters commands: \n");
- result.append(buffer);
- result.append(" Index Command");
- for (size_t i = 0; i < mNewParameters.size(); ++i) {
- snprintf(buffer, SIZE, "\n %02d ", i);
- result.append(buffer);
- result.append(mNewParameters[i]);
- }
-
- snprintf(buffer, SIZE, "\n\nPending config events: \n");
- result.append(buffer);
- for (size_t i = 0; i < mConfigEvents.size(); i++) {
- mConfigEvents[i]->dump(buffer, SIZE);
- result.append(buffer);
- }
- result.append("\n");
-
- write(fd, result.string(), result.size());
-
- if (locked) {
- mLock.unlock();
- }
-}
-
-void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size());
- write(fd, buffer, strlen(buffer));
-
- for (size_t i = 0; i < mEffectChains.size(); ++i) {
- sp<EffectChain> chain = mEffectChains[i];
- if (chain != 0) {
- chain->dump(fd, args);
- }
- }
-}
-
-void AudioFlinger::ThreadBase::acquireWakeLock()
-{
- Mutex::Autolock _l(mLock);
- acquireWakeLock_l();
-}
-
-void AudioFlinger::ThreadBase::acquireWakeLock_l()
-{
- if (mPowerManager == 0) {
- // use checkService() to avoid blocking if power service is not up yet
- sp<IBinder> binder =
- defaultServiceManager()->checkService(String16("power"));
- if (binder == 0) {
- ALOGW("Thread %s cannot connect to the power manager service", mName);
- } else {
- mPowerManager = interface_cast<IPowerManager>(binder);
- binder->linkToDeath(mDeathRecipient);
- }
- }
- if (mPowerManager != 0) {
- sp<IBinder> binder = new BBinder();
- status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder,
- String16(mName));
- if (status == NO_ERROR) {
- mWakeLockToken = binder;
- }
- ALOGV("acquireWakeLock_l() %s status %d", mName, status);
- }
-}
-
-void AudioFlinger::ThreadBase::releaseWakeLock()
-{
- Mutex::Autolock _l(mLock);
- releaseWakeLock_l();
-}
-
-void AudioFlinger::ThreadBase::releaseWakeLock_l()
-{
- if (mWakeLockToken != 0) {
- ALOGV("releaseWakeLock_l() %s", mName);
- if (mPowerManager != 0) {
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
- }
- mWakeLockToken.clear();
- }
-}
-
-void AudioFlinger::ThreadBase::clearPowerManager()
-{
- Mutex::Autolock _l(mLock);
- releaseWakeLock_l();
- mPowerManager.clear();
-}
-
-void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who)
-{
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- thread->clearPowerManager();
- }
- ALOGW("power manager service died !!!");
-}
-
-void AudioFlinger::ThreadBase::setEffectSuspended(
- const effect_uuid_t *type, bool suspend, int sessionId)
-{
- Mutex::Autolock _l(mLock);
- setEffectSuspended_l(type, suspend, sessionId);
-}
-
-void AudioFlinger::ThreadBase::setEffectSuspended_l(
- const effect_uuid_t *type, bool suspend, int sessionId)
-{
- sp<EffectChain> chain = getEffectChain_l(sessionId);
- if (chain != 0) {
- if (type != NULL) {
- chain->setEffectSuspended_l(type, suspend);
- } else {
- chain->setEffectSuspendedAll_l(suspend);
- }
- }
-
- updateSuspendedSessions_l(type, suspend, sessionId);
-}
-
-void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain)
-{
- ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
- if (index < 0) {
- return;
- }
-
- const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects =
- mSuspendedSessions.valueAt(index);
-
- for (size_t i = 0; i < sessionEffects.size(); i++) {
- sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
- for (int j = 0; j < desc->mRefCount; j++) {
- if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) {
- chain->setEffectSuspendedAll_l(true);
- } else {
- ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
- desc->mType.timeLow);
- chain->setEffectSuspended_l(&desc->mType, true);
- }
- }
- }
-}
-
-void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type,
- bool suspend,
- int sessionId)
-{
- ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
-
- KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
-
- if (suspend) {
- if (index >= 0) {
- sessionEffects = mSuspendedSessions.valueAt(index);
- } else {
- mSuspendedSessions.add(sessionId, sessionEffects);
- }
- } else {
- if (index < 0) {
- return;
- }
- sessionEffects = mSuspendedSessions.valueAt(index);
- }
-
-
- int key = EffectChain::kKeyForSuspendAll;
- if (type != NULL) {
- key = type->timeLow;
- }
- index = sessionEffects.indexOfKey(key);
-
- sp<SuspendedSessionDesc> desc;
- if (suspend) {
- if (index >= 0) {
- desc = sessionEffects.valueAt(index);
- } else {
- desc = new SuspendedSessionDesc();
- if (type != NULL) {
- desc->mType = *type;
- }
- sessionEffects.add(key, desc);
- ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key);
- }
- desc->mRefCount++;
- } else {
- if (index < 0) {
- return;
- }
- desc = sessionEffects.valueAt(index);
- if (--desc->mRefCount == 0) {
- ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key);
- sessionEffects.removeItemsAt(index);
- if (sessionEffects.isEmpty()) {
- ALOGV("updateSuspendedSessions_l() restore removing session %d",
- sessionId);
- mSuspendedSessions.removeItem(sessionId);
- }
- }
- }
- if (!sessionEffects.isEmpty()) {
- mSuspendedSessions.replaceValueFor(sessionId, sessionEffects);
- }
-}
-
-void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
- bool enabled,
- int sessionId)
-{
- Mutex::Autolock _l(mLock);
- checkSuspendOnEffectEnabled_l(effect, enabled, sessionId);
-}
-
-void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
- bool enabled,
- int sessionId)
-{
- if (mType != RECORD) {
- // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
- // another session. This gives the priority to well behaved effect control panels
- // and applications not using global effects.
- // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect
- // global effects
- if ((sessionId != AUDIO_SESSION_OUTPUT_MIX) && (sessionId != AUDIO_SESSION_OUTPUT_STAGE)) {
- setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX);
- }
- }
-
- sp<EffectChain> chain = getEffectChain_l(sessionId);
- if (chain != 0) {
- chain->checkSuspendOnEffectEnabled(effect, enabled);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output,
- audio_io_handle_t id,
- audio_devices_t device,
- type_t type)
- : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
- mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
- // mStreamTypes[] initialized in constructor body
- mOutput(output),
- mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
- mMixerStatus(MIXER_IDLE),
- mMixerStatusIgnoringFastTracks(MIXER_IDLE),
- standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
- mScreenState(gScreenState),
- // index 0 is reserved for normal mixer's submix
- mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
-{
- snprintf(mName, kNameLength, "AudioOut_%X", id);
-
- // Assumes constructor is called by AudioFlinger with it's mLock held, but
- // it would be safer to explicitly pass initial masterVolume/masterMute as
- // parameter.
- //
- // If the HAL we are using has support for master volume or master mute,
- // then do not attenuate or mute during mixing (just leave the volume at 1.0
- // and the mute set to false).
- mMasterVolume = audioFlinger->masterVolume_l();
- mMasterMute = audioFlinger->masterMute_l();
- if (mOutput && mOutput->audioHwDev) {
- if (mOutput->audioHwDev->canSetMasterVolume()) {
- mMasterVolume = 1.0;
- }
-
- if (mOutput->audioHwDev->canSetMasterMute()) {
- mMasterMute = false;
- }
- }
-
- readOutputParameters();
-
- // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
- // There is no AUDIO_STREAM_MIN, and ++ operator does not compile
- for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
- stream = (audio_stream_type_t) (stream + 1)) {
- mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
- mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
- }
- // mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here,
- // because mAudioFlinger doesn't have one to copy from
-}
-
-AudioFlinger::PlaybackThread::~PlaybackThread()
-{
- delete [] mMixBuffer;
-}
-
-void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
-{
- dumpInternals(fd, args);
- dumpTracks(fd, args);
- dumpEffectChains(fd, args);
-}
-
-void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- result.appendFormat("Output thread %p stream volumes in dB:\n ", this);
- for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
- const stream_type_t *st = &mStreamTypes[i];
- if (i > 0) {
- result.appendFormat(", ");
- }
- result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume));
- if (st->mute) {
- result.append("M");
- }
- }
- result.append("\n");
- write(fd, result.string(), result.length());
- result.clear();
-
- snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
- result.append(buffer);
- Track::appendDumpHeader(result);
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> track = mTracks[i];
- if (track != 0) {
- track->dump(buffer, SIZE);
- result.append(buffer);
- }
- }
-
- snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
- result.append(buffer);
- Track::appendDumpHeader(result);
- for (size_t i = 0; i < mActiveTracks.size(); ++i) {
- sp<Track> track = mActiveTracks[i].promote();
- if (track != 0) {
- track->dump(buffer, SIZE);
- result.append(buffer);
- }
- }
- write(fd, result.string(), result.size());
-
- // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way.
- FastTrackUnderruns underruns = getFastTrackUnderruns(0);
- fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n",
- underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
-}
-
-void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
- result.append(buffer);
- snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
- result.append(buffer);
- snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites);
- result.append(buffer);
- snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites);
- result.append(buffer);
- snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite);
- result.append(buffer);
- snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended);
- result.append(buffer);
- snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer);
- result.append(buffer);
- write(fd, result.string(), result.size());
- fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask);
-
- dumpBase(fd, args);
-}
-
-// Thread virtuals
-status_t AudioFlinger::PlaybackThread::readyToRun()
-{
- status_t status = initCheck();
- if (status == NO_ERROR) {
- ALOGI("AudioFlinger's thread %p ready to run", this);
- } else {
- ALOGE("No working audio driver found.");
- }
- return status;
-}
-
-void AudioFlinger::PlaybackThread::onFirstRef()
-{
- run(mName, ANDROID_PRIORITY_URGENT_AUDIO);
-}
-
-// ThreadBase virtuals
-void AudioFlinger::PlaybackThread::preExit()
-{
- ALOGV(" preExit()");
- // FIXME this is using hard-coded strings but in the future, this functionality will be
- // converted to use audio HAL extensions required to support tunneling
- mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1");
-}
-
-// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
-sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
- const sp<AudioFlinger::Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- IAudioFlinger::track_flags_t flags,
- pid_t tid,
- status_t *status)
-{
- sp<Track> track;
- status_t lStatus;
-
- bool isTimed = (flags & IAudioFlinger::TRACK_TIMED) != 0;
-
- // client expresses a preference for FAST, but we get the final say
- if (flags & IAudioFlinger::TRACK_FAST) {
- if (
- // not timed
- (!isTimed) &&
- // either of these use cases:
- (
- // use case 1: shared buffer with any frame count
- (
- (sharedBuffer != 0)
- ) ||
- // use case 2: callback handler and frame count is default or at least as large as HAL
- (
- (tid != -1) &&
- ((frameCount == 0) ||
- (frameCount >= (int) (mFrameCount * kFastTrackMultiplier)))
- )
- ) &&
- // PCM data
- audio_is_linear_pcm(format) &&
- // mono or stereo
- ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
- (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
-#ifndef FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
- // hardware sample rate
- (sampleRate == mSampleRate) &&
-#endif
- // normal mixer has an associated fast mixer
- hasFastMixer() &&
- // there are sufficient fast track slots available
- (mFastTrackAvailMask != 0)
- // FIXME test that MixerThread for this fast track has a capable output HAL
- // FIXME add a permission test also?
- ) {
- // if frameCount not specified, then it defaults to fast mixer (HAL) frame count
- if (frameCount == 0) {
- frameCount = mFrameCount * kFastTrackMultiplier;
- }
- ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
- frameCount, mFrameCount);
- } else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
- "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%d mSampleRate=%d "
- "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
- isTimed, sharedBuffer.get(), frameCount, mFrameCount, format,
- audio_is_linear_pcm(format),
- channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
- flags &= ~IAudioFlinger::TRACK_FAST;
- // For compatibility with AudioTrack calculation, buffer depth is forced
- // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
- // This is probably too conservative, but legacy application code may depend on it.
- // If you change this calculation, also review the start threshold which is related.
- uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
- uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
- if (minBufCount < 2) {
- minBufCount = 2;
- }
- int minFrameCount = mNormalFrameCount * minBufCount;
- if (frameCount < minFrameCount) {
- frameCount = minFrameCount;
- }
- }
- }
-
- if (mType == DIRECT) {
- if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
- if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
- ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
- "for output %p with format %d",
- sampleRate, format, channelMask, mOutput, mFormat);
- lStatus = BAD_VALUE;
- goto Exit;
- }
- }
- } else {
- // Resampler implementation limits input sampling rate to 2 x output sampling rate.
- if (sampleRate > mSampleRate*2) {
- ALOGE("Sample rate out of range: %d mSampleRate %d", sampleRate, mSampleRate);
- lStatus = BAD_VALUE;
- goto Exit;
- }
- }
-
- lStatus = initCheck();
- if (lStatus != NO_ERROR) {
- ALOGE("Audio driver not initialized.");
- goto Exit;
- }
-
- { // scope for mLock
- Mutex::Autolock _l(mLock);
-
- // all tracks in same audio session must share the same routing strategy otherwise
- // conflicts will happen when tracks are moved from one output to another by audio policy
- // manager
- uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> t = mTracks[i];
- if (t != 0 && !t->isOutputTrack()) {
- uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
- if (sessionId == t->sessionId() && strategy != actual) {
- ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
- strategy, actual);
- lStatus = BAD_VALUE;
- goto Exit;
- }
- }
- }
-
- if (!isTimed) {
- track = new Track(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId, flags);
- } else {
- track = TimedTrack::create(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId);
- }
- if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
- lStatus = NO_MEMORY;
- goto Exit;
- }
- mTracks.add(track);
-
- sp<EffectChain> chain = getEffectChain_l(sessionId);
- if (chain != 0) {
- ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
- track->setMainBuffer(chain->inBuffer());
- chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
- chain->incTrackCnt();
- }
-
- if ((flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) {
- pid_t callingPid = IPCThreadState::self()->getCallingPid();
- // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
- // so ask activity manager to do this on our behalf
- sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
- }
- }
-
- lStatus = NO_ERROR;
-
-Exit:
- if (status) {
- *status = lStatus;
- }
- return track;
-}
-
-uint32_t AudioFlinger::MixerThread::correctLatency(uint32_t latency) const
-{
- if (mFastMixer != NULL) {
- MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
- latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
- }
- return latency;
-}
-
-uint32_t AudioFlinger::PlaybackThread::correctLatency(uint32_t latency) const
-{
- return latency;
-}
-
-uint32_t AudioFlinger::PlaybackThread::latency() const
-{
- Mutex::Autolock _l(mLock);
- return latency_l();
-}
-uint32_t AudioFlinger::PlaybackThread::latency_l() const
-{
- if (initCheck() == NO_ERROR) {
- return correctLatency(mOutput->stream->get_latency(mOutput->stream));
- } else {
- return 0;
- }
-}
-
-void AudioFlinger::PlaybackThread::setMasterVolume(float value)
-{
- Mutex::Autolock _l(mLock);
- // Don't apply master volume in SW if our HAL can do it for us.
- if (mOutput && mOutput->audioHwDev &&
- mOutput->audioHwDev->canSetMasterVolume()) {
- mMasterVolume = 1.0;
- } else {
- mMasterVolume = value;
- }
-}
-
-void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
-{
- Mutex::Autolock _l(mLock);
- // Don't apply master mute in SW if our HAL can do it for us.
- if (mOutput && mOutput->audioHwDev &&
- mOutput->audioHwDev->canSetMasterMute()) {
- mMasterMute = false;
- } else {
- mMasterMute = muted;
- }
-}
-
-void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
-{
- Mutex::Autolock _l(mLock);
- mStreamTypes[stream].volume = value;
-}
-
-void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
-{
- Mutex::Autolock _l(mLock);
- mStreamTypes[stream].mute = muted;
-}
-
-float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
-{
- Mutex::Autolock _l(mLock);
- return mStreamTypes[stream].volume;
-}
-
-// addTrack_l() must be called with ThreadBase::mLock held
-status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
-{
- status_t status = ALREADY_EXISTS;
-
- // set retry count for buffer fill
- track->mRetryCount = kMaxTrackStartupRetries;
- if (mActiveTracks.indexOf(track) < 0) {
- // the track is newly added, make sure it fills up all its
- // buffers before playing. This is to ensure the client will
- // effectively get the latency it requested.
- track->mFillingUpStatus = Track::FS_FILLING;
- track->mResetDone = false;
- track->mPresentationCompleteFrames = 0;
- mActiveTracks.add(track);
- if (track->mainBuffer() != mMixBuffer) {
- sp<EffectChain> chain = getEffectChain_l(track->sessionId());
- if (chain != 0) {
- ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(), track->sessionId());
- chain->incActiveTrackCnt();
- }
- }
-
- status = NO_ERROR;
- }
-
- ALOGV("mWaitWorkCV.broadcast");
- mWaitWorkCV.broadcast();
-
- return status;
-}
-
-// destroyTrack_l() must be called with ThreadBase::mLock held
-void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
-{
- track->mState = TrackBase::TERMINATED;
- // active tracks are removed by threadLoop()
- if (mActiveTracks.indexOf(track) < 0) {
- removeTrack_l(track);
- }
-}
-
-void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
-{
- track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
- mTracks.remove(track);
- deleteTrackName_l(track->name());
- // redundant as track is about to be destroyed, for dumpsys only
- track->mName = -1;
- if (track->isFastTrack()) {
- int index = track->mFastIndex;
- ALOG_ASSERT(0 < index && index < (int)FastMixerState::kMaxFastTracks);
- ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
- mFastTrackAvailMask |= 1 << index;
- // redundant as track is about to be destroyed, for dumpsys only
- track->mFastIndex = -1;
- }
- sp<EffectChain> chain = getEffectChain_l(track->sessionId());
- if (chain != 0) {
- chain->decTrackCnt();
- }
-}
-
-String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
-{
- String8 out_s8 = String8("");
- char *s;
-
- Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return out_s8;
- }
-
- s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
- out_s8 = String8(s);
- free(s);
- return out_s8;
-}
-
-// audioConfigChanged_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) {
- AudioSystem::OutputDescriptor desc;
- void *param2 = NULL;
-
- ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, param);
-
- switch (event) {
- case AudioSystem::OUTPUT_OPENED:
- case AudioSystem::OUTPUT_CONFIG_CHANGED:
- desc.channels = mChannelMask;
- desc.samplingRate = mSampleRate;
- desc.format = mFormat;
- desc.frameCount = mNormalFrameCount; // FIXME see AudioFlinger::frameCount(audio_io_handle_t)
- desc.latency = latency();
- param2 = &desc;
- break;
-
- case AudioSystem::STREAM_CONFIG_CHANGED:
- param2 = &param;
- case AudioSystem::OUTPUT_CLOSED:
- default:
- break;
- }
- mAudioFlinger->audioConfigChanged_l(event, mId, param2);
-}
-
-void AudioFlinger::PlaybackThread::readOutputParameters()
-{
- mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
- mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
- mChannelCount = (uint16_t)popcount(mChannelMask);
- mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
- mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
- mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
- if (mFrameCount & 15) {
- ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
- mFrameCount);
- }
-
- // Calculate size of normal mix buffer relative to the HAL output buffer size
- double multiplier = 1.0;
- if (mType == MIXER && (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) {
- size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000;
- size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000;
- // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
- minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
- maxNormalFrameCount = maxNormalFrameCount & ~15;
- if (maxNormalFrameCount < minNormalFrameCount) {
- maxNormalFrameCount = minNormalFrameCount;
- }
- multiplier = (double) minNormalFrameCount / (double) mFrameCount;
- if (multiplier <= 1.0) {
- multiplier = 1.0;
- } else if (multiplier <= 2.0) {
- if (2 * mFrameCount <= maxNormalFrameCount) {
- multiplier = 2.0;
- } else {
- multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
- }
- } else {
- // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL SRC
- // (it would be unusual for the normal mix buffer size to not be a multiple of fast
- // track, but we sometimes have to do this to satisfy the maximum frame count constraint)
- // FIXME this rounding up should not be done if no HAL SRC
- uint32_t truncMult = (uint32_t) multiplier;
- if ((truncMult & 1)) {
- if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) {
- ++truncMult;
- }
- }
- multiplier = (double) truncMult;
- }
- }
- mNormalFrameCount = multiplier * mFrameCount;
- // round up to nearest 16 frames to satisfy AudioMixer
- mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
- ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, mNormalFrameCount);
-
- delete[] mMixBuffer;
- mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount];
- memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
-
- // force reconfiguration of effect chains and engines to take new buffer size and audio
- // parameters into account
- // Note that mLock is not held when readOutputParameters() is called from the constructor
- // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
- // matter.
- // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
- Vector< sp<EffectChain> > effectChains = mEffectChains;
- for (size_t i = 0; i < effectChains.size(); i ++) {
- mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this, false);
- }
-}
-
-
-status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
-{
- if (halFrames == NULL || dspFrames == NULL) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return INVALID_OPERATION;
- }
- *halFrames = mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
-
- if (isSuspended()) {
- // return an estimation of rendered frames when the output is suspended
- int32_t frames = mBytesWritten - latency_l();
- if (frames < 0) {
- frames = 0;
- }
- *dspFrames = (uint32_t)frames;
- return NO_ERROR;
- } else {
- return mOutput->stream->get_render_position(mOutput->stream, dspFrames);
- }
-}
-
-uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const
-{
- Mutex::Autolock _l(mLock);
- uint32_t result = 0;
- if (getEffectChain_l(sessionId) != 0) {
- result = EFFECT_SESSION;
- }
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> track = mTracks[i];
- if (sessionId == track->sessionId() &&
- !(track->mCblk->flags & CBLK_INVALID_MSK)) {
- result |= TRACK_SESSION;
- break;
- }
- }
-
- return result;
-}
-
-uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
-{
- // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
- // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
- if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
- }
- for (size_t i = 0; i < mTracks.size(); i++) {
- sp<Track> track = mTracks[i];
- if (sessionId == track->sessionId() &&
- !(track->mCblk->flags & CBLK_INVALID_MSK)) {
- return AudioSystem::getStrategyForStream(track->streamType());
- }
- }
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
-}
-
-
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
-{
- Mutex::Autolock _l(mLock);
- return mOutput;
-}
-
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
-{
- Mutex::Autolock _l(mLock);
- AudioStreamOut *output = mOutput;
- mOutput = NULL;
- // FIXME FastMixer might also have a raw ptr to mOutputSink;
- // must push a NULL and wait for ack
- mOutputSink.clear();
- mPipeSink.clear();
- mNormalSink.clear();
- return output;
-}
-
-// this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::PlaybackThread::stream() const
-{
- if (mOutput == NULL) {
- return NULL;
- }
- return &mOutput->stream->common;
-}
-
-uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
-{
- return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000);
-}
-
-status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
-{
- if (!isValidSyncEvent(event)) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock _l(mLock);
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> track = mTracks[i];
- if (event->triggerSession() == track->sessionId()) {
- (void) track->setSyncEvent(event);
- return NO_ERROR;
- }
- }
-
- return NAME_NOT_FOUND;
-}
-
-bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const
-{
- return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
-}
-
-void AudioFlinger::PlaybackThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove)
-{
- size_t count = tracksToRemove.size();
- if (CC_UNLIKELY(count)) {
- for (size_t i = 0 ; i < count ; i++) {
- const sp<Track>& track = tracksToRemove.itemAt(i);
- if ((track->sharedBuffer() != 0) &&
- (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) {
- AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
- }
- }
- }
-
-}
-
-// ----------------------------------------------------------------------------
-
-AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, type_t type)
- : PlaybackThread(audioFlinger, output, id, device, type),
- // mAudioMixer below
- // mFastMixer below
- mFastMixerFutex(0)
- // mOutputSink below
- // mPipeSink below
- // mNormalSink below
-{
- ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
- ALOGV("mSampleRate=%d, mChannelMask=%#x, mChannelCount=%d, mFormat=%d, mFrameSize=%d, "
- "mFrameCount=%d, mNormalFrameCount=%d",
- mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
- mNormalFrameCount);
- mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
-
- // FIXME - Current mixer implementation only supports stereo output
- if (mChannelCount != FCC_2) {
- ALOGE("Invalid audio hardware channel count %d", mChannelCount);
- }
-
- // create an NBAIO sink for the HAL output stream, and negotiate
- mOutputSink = new AudioStreamOutSink(output->stream);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)};
- ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
-
- // initialize fast mixer depending on configuration
- bool initFastMixer;
- switch (kUseFastMixer) {
- case FastMixer_Never:
- initFastMixer = false;
- break;
- case FastMixer_Always:
- initFastMixer = true;
- break;
- case FastMixer_Static:
- case FastMixer_Dynamic:
- initFastMixer = mFrameCount < mNormalFrameCount;
- break;
- }
- if (initFastMixer) {
-
- // create a MonoPipe to connect our submix to FastMixer
- NBAIO_Format format = mOutputSink->format();
- // This pipe depth compensates for scheduling latency of the normal mixer thread.
- // When it wakes up after a maximum latency, it runs a few cycles quickly before
- // finally blocking. Note the pipe implementation rounds up the request to a power of 2.
- MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
- const NBAIO_Format offers[1] = {format};
- size_t numCounterOffers = 0;
- ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- monoPipe->setAvgFrames((mScreenState & 1) ?
- (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
- mPipeSink = monoPipe;
-
-#ifdef TEE_SINK_FRAMES
- // create a Pipe to archive a copy of FastMixer's output for dumpsys
- Pipe *teeSink = new Pipe(TEE_SINK_FRAMES, format);
- numCounterOffers = 0;
- index = teeSink->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSink = teeSink;
- PipeReader *teeSource = new PipeReader(*teeSink);
- numCounterOffers = 0;
- index = teeSource->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSource = teeSource;
-#endif
-
- // create fast mixer and configure it initially with just one fast track for our submix
- mFastMixer = new FastMixer();
- FastMixerStateQueue *sq = mFastMixer->sq();
-#ifdef STATE_QUEUE_DUMP
- sq->setObserverDump(&mStateQueueObserverDump);
- sq->setMutatorDump(&mStateQueueMutatorDump);
-#endif
- FastMixerState *state = sq->begin();
- FastTrack *fastTrack = &state->mFastTracks[0];
- // wrap the source side of the MonoPipe to make it an AudioBufferProvider
- fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
- fastTrack->mVolumeProvider = NULL;
- fastTrack->mGeneration++;
- state->mFastTracksGen++;
- state->mTrackMask = 1;
- // fast mixer will use the HAL output sink
- state->mOutputSink = mOutputSink.get();
- state->mOutputSinkGen++;
- state->mFrameCount = mFrameCount;
- state->mCommand = FastMixerState::COLD_IDLE;
- // already done in constructor initialization list
- //mFastMixerFutex = 0;
- state->mColdFutexAddr = &mFastMixerFutex;
- state->mColdGen++;
- state->mDumpState = &mFastMixerDumpState;
- state->mTeeSink = mTeeSink.get();
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
-
- // start the fast mixer
- mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
- pid_t tid = mFastMixer->getTid();
- int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
- if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
- kPriorityFastMixer, getpid_cached, tid, err);
- }
-
-#ifdef AUDIO_WATCHDOG
- // create and start the watchdog
- mAudioWatchdog = new AudioWatchdog();
- mAudioWatchdog->setDump(&mAudioWatchdogDump);
- mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
- tid = mAudioWatchdog->getTid();
- err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
- if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
- kPriorityFastMixer, getpid_cached, tid, err);
- }
-#endif
-
- } else {
- mFastMixer = NULL;
- }
-
- switch (kUseFastMixer) {
- case FastMixer_Never:
- case FastMixer_Dynamic:
- mNormalSink = mOutputSink;
- break;
- case FastMixer_Always:
- mNormalSink = mPipeSink;
- break;
- case FastMixer_Static:
- mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
- break;
- }
-}
-
-AudioFlinger::MixerThread::~MixerThread()
-{
- if (mFastMixer != NULL) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (state->mCommand == FastMixerState::COLD_IDLE) {
- int32_t old = android_atomic_inc(&mFastMixerFutex);
- if (old == -1) {
- __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
- }
- }
- state->mCommand = FastMixerState::EXIT;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- mFastMixer->join();
- // Though the fast mixer thread has exited, it's state queue is still valid.
- // We'll use that extract the final state which contains one remaining fast track
- // corresponding to our sub-mix.
- state = sq->begin();
- ALOG_ASSERT(state->mTrackMask == 1);
- FastTrack *fastTrack = &state->mFastTracks[0];
- ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
- delete fastTrack->mBufferProvider;
- sq->end(false /*didModify*/);
- delete mFastMixer;
-#ifdef AUDIO_WATCHDOG
- if (mAudioWatchdog != 0) {
- mAudioWatchdog->requestExit();
- mAudioWatchdog->requestExitAndWait();
- mAudioWatchdog.clear();
- }
-#endif
- }
- delete mAudioMixer;
-}
-
-class CpuStats {
-public:
- CpuStats();
- void sample(const String8 &title);
-#ifdef DEBUG_CPU_USAGE
-private:
- ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
- CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
-
- CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
-
- int mCpuNum; // thread's current CPU number
- int mCpukHz; // frequency of thread's current CPU in kHz
-#endif
-};
-
-CpuStats::CpuStats()
-#ifdef DEBUG_CPU_USAGE
- : mCpuNum(-1), mCpukHz(-1)
-#endif
-{
-}
-
-void CpuStats::sample(const String8 &title) {
-#ifdef DEBUG_CPU_USAGE
- // get current thread's delta CPU time in wall clock ns
- double wcNs;
- bool valid = mCpuUsage.sampleAndEnable(wcNs);
-
- // record sample for wall clock statistics
- if (valid) {
- mWcStats.sample(wcNs);
- }
-
- // get the current CPU number
- int cpuNum = sched_getcpu();
-
- // get the current CPU frequency in kHz
- int cpukHz = mCpuUsage.getCpukHz(cpuNum);
-
- // check if either CPU number or frequency changed
- if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
- mCpuNum = cpuNum;
- mCpukHz = cpukHz;
- // ignore sample for purposes of cycles
- valid = false;
- }
-
- // if no change in CPU number or frequency, then record sample for cycle statistics
- if (valid && mCpukHz > 0) {
- double cycles = wcNs * cpukHz * 0.000001;
- mHzStats.sample(cycles);
- }
-
- unsigned n = mWcStats.n();
- // mCpuUsage.elapsed() is expensive, so don't call it every loop
- if ((n & 127) == 1) {
- long long elapsed = mCpuUsage.elapsed();
- if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
- double perLoop = elapsed / (double) n;
- double perLoop100 = perLoop * 0.01;
- double perLoop1k = perLoop * 0.001;
- double mean = mWcStats.mean();
- double stddev = mWcStats.stddev();
- double minimum = mWcStats.minimum();
- double maximum = mWcStats.maximum();
- double meanCycles = mHzStats.mean();
- double stddevCycles = mHzStats.stddev();
- double minCycles = mHzStats.minimum();
- double maxCycles = mHzStats.maximum();
- mCpuUsage.resetElapsed();
- mWcStats.reset();
- mHzStats.reset();
- ALOGD("CPU usage for %s over past %.1f secs\n"
- " (%u mixer loops at %.1f mean ms per loop):\n"
- " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
- " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
- " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
- title.string(),
- elapsed * .000000001, n, perLoop * .000001,
- mean * .001,
- stddev * .001,
- minimum * .001,
- maximum * .001,
- mean / perLoop100,
- stddev / perLoop100,
- minimum / perLoop100,
- maximum / perLoop100,
- meanCycles / perLoop1k,
- stddevCycles / perLoop1k,
- minCycles / perLoop1k,
- maxCycles / perLoop1k);
-
- }
- }
-#endif
-};
-
-void AudioFlinger::PlaybackThread::checkSilentMode_l()
-{
- if (!mMasterMute) {
- char value[PROPERTY_VALUE_MAX];
- if (property_get("ro.audio.silent", value, "0") > 0) {
- char *endptr;
- unsigned long ul = strtoul(value, &endptr, 0);
- if (*endptr == '\0' && ul != 0) {
- ALOGD("Silence is golden");
- // The setprop command will not allow a property to be changed after
- // the first time it is set, so we don't have to worry about un-muting.
- setMasterMute_l(true);
- }
- }
- }
-}
-
-bool AudioFlinger::PlaybackThread::threadLoop()
-{
- Vector< sp<Track> > tracksToRemove;
-
- standbyTime = systemTime();
-
- // MIXER
- nsecs_t lastWarning = 0;
-
- // DUPLICATING
- // FIXME could this be made local to while loop?
- writeFrames = 0;
-
- cacheParameters_l();
- sleepTime = idleSleepTime;
-
- if (mType == MIXER) {
- sleepTimeShift = 0;
- }
-
- CpuStats cpuStats;
- const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
-
- acquireWakeLock();
-
- while (!exitPending())
- {
- cpuStats.sample(myName);
-
- Vector< sp<EffectChain> > effectChains;
-
- processConfigEvents();
-
- { // scope for mLock
-
- Mutex::Autolock _l(mLock);
-
- if (checkForNewParameters_l()) {
- cacheParameters_l();
- }
-
- saveOutputTracks();
-
- // put audio hardware into standby after short delay
- if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
- isSuspended())) {
- if (!mStandby) {
-
- threadLoop_standby();
-
- mStandby = true;
- }
-
- if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
- // we're about to wait, flush the binder command buffer
- IPCThreadState::self()->flushCommands();
-
- clearOutputTracks();
-
- if (exitPending()) break;
-
- releaseWakeLock_l();
- // wait until we have something to do...
- ALOGV("%s going to sleep", myName.string());
- mWaitWorkCV.wait(mLock);
- ALOGV("%s waking up", myName.string());
- acquireWakeLock_l();
-
- mMixerStatus = MIXER_IDLE;
- mMixerStatusIgnoringFastTracks = MIXER_IDLE;
- mBytesWritten = 0;
-
- checkSilentMode_l();
-
- standbyTime = systemTime() + standbyDelay;
- sleepTime = idleSleepTime;
- if (mType == MIXER) {
- sleepTimeShift = 0;
- }
-
- continue;
- }
- }
-
- // mMixerStatusIgnoringFastTracks is also updated internally
- mMixerStatus = prepareTracks_l(&tracksToRemove);
-
- // prevent any changes in effect chain list and in each effect chain
- // during mixing and effect process as the audio buffers could be deleted
- // or modified if an effect is created or deleted
- lockEffectChains_l(effectChains);
- }
-
- if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) {
- threadLoop_mix();
- } else {
- threadLoop_sleepTime();
- }
-
- if (isSuspended()) {
- sleepTime = suspendSleepTimeUs();
- mBytesWritten += mixBufferSize;
- }
-
- // only process effects if we're going to write
- if (sleepTime == 0) {
- for (size_t i = 0; i < effectChains.size(); i ++) {
- effectChains[i]->process_l();
- }
- }
-
- // enable changes in effect chain
- unlockEffectChains(effectChains);
-
- // sleepTime == 0 means we must write to audio hardware
- if (sleepTime == 0) {
-
- threadLoop_write();
-
-if (mType == MIXER) {
- // write blocked detection
- nsecs_t now = systemTime();
- nsecs_t delta = now - mLastWriteTime;
- if (!mStandby && delta > maxPeriod) {
- mNumDelayedWrites++;
- if ((now - lastWarning) > kWarningThrottleNs) {
-#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER)
- ScopedTrace st(ATRACE_TAG, "underrun");
-#endif
- ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
- ns2ms(delta), mNumDelayedWrites, this);
- lastWarning = now;
- }
- }
-}
-
- mStandby = false;
- } else {
- usleep(sleepTime);
- }
-
- // Finally let go of removed track(s), without the lock held
- // since we can't guarantee the destructors won't acquire that
- // same lock. This will also mutate and push a new fast mixer state.
- threadLoop_removeTracks(tracksToRemove);
- tracksToRemove.clear();
-
- // FIXME I don't understand the need for this here;
- // it was in the original code but maybe the
- // assignment in saveOutputTracks() makes this unnecessary?
- clearOutputTracks();
-
- // Effect chains will be actually deleted here if they were removed from
- // mEffectChains list during mixing or effects processing
- effectChains.clear();
-
- // FIXME Note that the above .clear() is no longer necessary since effectChains
- // is now local to this block, but will keep it for now (at least until merge done).
- }
-
- // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
- if (mType == MIXER || mType == DIRECT) {
- // put output stream into standby mode
- if (!mStandby) {
- mOutput->stream->common.standby(&mOutput->stream->common);
- }
- }
-
- releaseWakeLock();
-
- ALOGV("Thread %p type %d exiting", this, mType);
- return false;
-}
-
-void AudioFlinger::MixerThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove)
-{
- PlaybackThread::threadLoop_removeTracks(tracksToRemove);
-}
-
-void AudioFlinger::MixerThread::threadLoop_write()
-{
- // FIXME we should only do one push per cycle; confirm this is true
- // Start the fast mixer if it's not already running
- if (mFastMixer != NULL) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (state->mCommand != FastMixerState::MIX_WRITE &&
- (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
- if (state->mCommand == FastMixerState::COLD_IDLE) {
- int32_t old = android_atomic_inc(&mFastMixerFutex);
- if (old == -1) {
- __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
- }
-#ifdef AUDIO_WATCHDOG
- if (mAudioWatchdog != 0) {
- mAudioWatchdog->resume();
- }
-#endif
- }
- state->mCommand = FastMixerState::MIX_WRITE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- if (kUseFastMixer == FastMixer_Dynamic) {
- mNormalSink = mPipeSink;
- }
- } else {
- sq->end(false /*didModify*/);
- }
- }
- PlaybackThread::threadLoop_write();
-}
-
-// shared by MIXER and DIRECT, overridden by DUPLICATING
-void AudioFlinger::PlaybackThread::threadLoop_write()
-{
- // FIXME rewrite to reduce number of system calls
- mLastWriteTime = systemTime();
- mInWrite = true;
- int bytesWritten;
-
- // If an NBAIO sink is present, use it to write the normal mixer's submix
- if (mNormalSink != 0) {
-#define mBitShift 2 // FIXME
- size_t count = mixBufferSize >> mBitShift;
-#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER)
- Tracer::traceBegin(ATRACE_TAG, "write");
-#endif
- // update the setpoint when gScreenState changes
- uint32_t screenState = gScreenState;
- if (screenState != mScreenState) {
- mScreenState = screenState;
- MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
- if (pipe != NULL) {
- pipe->setAvgFrames((mScreenState & 1) ?
- (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
- }
- }
- ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
-#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER)
- Tracer::traceEnd(ATRACE_TAG);
-#endif
- if (framesWritten > 0) {
- bytesWritten = framesWritten << mBitShift;
- } else {
- bytesWritten = framesWritten;
- }
- // otherwise use the HAL / AudioStreamOut directly
- } else {
- // Direct output thread.
- bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
- }
-
- if (bytesWritten > 0) mBytesWritten += mixBufferSize;
- mNumWrites++;
- mInWrite = false;
-}
-
-void AudioFlinger::MixerThread::threadLoop_standby()
-{
- // Idle the fast mixer if it's currently running
- if (mFastMixer != NULL) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- state->mCommand = FastMixerState::COLD_IDLE;
- state->mColdFutexAddr = &mFastMixerFutex;
- state->mColdGen++;
- mFastMixerFutex = 0;
- sq->end();
- // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- if (kUseFastMixer == FastMixer_Dynamic) {
- mNormalSink = mOutputSink;
- }
-#ifdef AUDIO_WATCHDOG
- if (mAudioWatchdog != 0) {
- mAudioWatchdog->pause();
- }
-#endif
- } else {
- sq->end(false /*didModify*/);
- }
- }
- PlaybackThread::threadLoop_standby();
-}
-
-// shared by MIXER and DIRECT, overridden by DUPLICATING
-void AudioFlinger::PlaybackThread::threadLoop_standby()
-{
- ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
- mOutput->stream->common.standby(&mOutput->stream->common);
-}
-
-void AudioFlinger::MixerThread::threadLoop_mix()
-{
- // obtain the presentation timestamp of the next output buffer
- int64_t pts;
- status_t status = INVALID_OPERATION;
-
- if (mNormalSink != 0) {
- status = mNormalSink->getNextWriteTimestamp(&pts);
- } else {
- status = mOutputSink->getNextWriteTimestamp(&pts);
- }
-
- if (status != NO_ERROR) {
- pts = AudioBufferProvider::kInvalidPTS;
- }
-
- // mix buffers...
- mAudioMixer->process(pts);
- // increase sleep time progressively when application underrun condition clears.
- // Only increase sleep time if the mixer is ready for two consecutive times to avoid
- // that a steady state of alternating ready/not ready conditions keeps the sleep time
- // such that we would underrun the audio HAL.
- if ((sleepTime == 0) && (sleepTimeShift > 0)) {
- sleepTimeShift--;
- }
- sleepTime = 0;
- standbyTime = systemTime() + standbyDelay;
- //TODO: delay standby when effects have a tail
-}
-
-void AudioFlinger::MixerThread::threadLoop_sleepTime()
-{
- // If no tracks are ready, sleep once for the duration of an output
- // buffer size, then write 0s to the output
- if (sleepTime == 0) {
- if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime >> sleepTimeShift;
- if (sleepTime < kMinThreadSleepTimeUs) {
- sleepTime = kMinThreadSleepTimeUs;
- }
- // reduce sleep time in case of consecutive application underruns to avoid
- // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
- // duration we would end up writing less data than needed by the audio HAL if
- // the condition persists.
- if (sleepTimeShift < kMaxThreadSleepTimeShift) {
- sleepTimeShift++;
- }
- } else {
- sleepTime = idleSleepTime;
- }
- } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
- memset (mMixBuffer, 0, mixBufferSize);
- sleepTime = 0;
- ALOGV_IF((mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED)), "anticipated start");
- }
- // TODO add standby time extension fct of effect tail
-}
-
-// prepareTracks_l() must be called with ThreadBase::mLock held
-AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
- Vector< sp<Track> > *tracksToRemove)
-{
-
- mixer_state mixerStatus = MIXER_IDLE;
- // find out which tracks need to be processed
- size_t count = mActiveTracks.size();
- size_t mixedTracks = 0;
- size_t tracksWithEffect = 0;
- // counts only _active_ fast tracks
- size_t fastTracks = 0;
- uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
-
- float masterVolume = mMasterVolume;
- bool masterMute = mMasterMute;
-
- if (masterMute) {
- masterVolume = 0;
- }
- // Delegate master volume control to effect in output mix effect chain if needed
- sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
- if (chain != 0) {
- uint32_t v = (uint32_t)(masterVolume * (1 << 24));
- chain->setVolume_l(&v, &v);
- masterVolume = (float)((v + (1 << 23)) >> 24);
- chain.clear();
- }
-
- // prepare a new state to push
- FastMixerStateQueue *sq = NULL;
- FastMixerState *state = NULL;
- bool didModify = false;
- FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
- if (mFastMixer != NULL) {
- sq = mFastMixer->sq();
- state = sq->begin();
- }
-
- for (size_t i=0 ; i<count ; i++) {
- sp<Track> t = mActiveTracks[i].promote();
- if (t == 0) continue;
-
- // this const just means the local variable doesn't change
- Track* const track = t.get();
-
- // process fast tracks
- if (track->isFastTrack()) {
-
- // It's theoretically possible (though unlikely) for a fast track to be created
- // and then removed within the same normal mix cycle. This is not a problem, as
- // the track never becomes active so it's fast mixer slot is never touched.
- // The converse, of removing an (active) track and then creating a new track
- // at the identical fast mixer slot within the same normal mix cycle,
- // is impossible because the slot isn't marked available until the end of each cycle.
- int j = track->mFastIndex;
- ALOG_ASSERT(0 < j && j < (int)FastMixerState::kMaxFastTracks);
- ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j)));
- FastTrack *fastTrack = &state->mFastTracks[j];
-
- // Determine whether the track is currently in underrun condition,
- // and whether it had a recent underrun.
- FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j];
- FastTrackUnderruns underruns = ftDump->mUnderruns;
- uint32_t recentFull = (underruns.mBitFields.mFull -
- track->mObservedUnderruns.mBitFields.mFull) & UNDERRUN_MASK;
- uint32_t recentPartial = (underruns.mBitFields.mPartial -
- track->mObservedUnderruns.mBitFields.mPartial) & UNDERRUN_MASK;
- uint32_t recentEmpty = (underruns.mBitFields.mEmpty -
- track->mObservedUnderruns.mBitFields.mEmpty) & UNDERRUN_MASK;
- uint32_t recentUnderruns = recentPartial + recentEmpty;
- track->mObservedUnderruns = underruns;
- // don't count underruns that occur while stopping or pausing
- // or stopped which can occur when flush() is called while active
- if (!(track->isStopping() || track->isPausing() || track->isStopped())) {
- track->mUnderrunCount += recentUnderruns;
- }
-
- // This is similar to the state machine for normal tracks,
- // with a few modifications for fast tracks.
- bool isActive = true;
- switch (track->mState) {
- case TrackBase::STOPPING_1:
- // track stays active in STOPPING_1 state until first underrun
- if (recentUnderruns > 0) {
- track->mState = TrackBase::STOPPING_2;
- }
- break;
- case TrackBase::PAUSING:
- // ramp down is not yet implemented
- track->setPaused();
- break;
- case TrackBase::RESUMING:
- // ramp up is not yet implemented
- track->mState = TrackBase::ACTIVE;
- break;
- case TrackBase::ACTIVE:
- if (recentFull > 0 || recentPartial > 0) {
- // track has provided at least some frames recently: reset retry count
- track->mRetryCount = kMaxTrackRetries;
- }
- if (recentUnderruns == 0) {
- // no recent underruns: stay active
- break;
- }
- // there has recently been an underrun of some kind
- if (track->sharedBuffer() == 0) {
- // were any of the recent underruns "empty" (no frames available)?
- if (recentEmpty == 0) {
- // no, then ignore the partial underruns as they are allowed indefinitely
- break;
- }
- // there has recently been an "empty" underrun: decrement the retry counter
- if (--(track->mRetryCount) > 0) {
- break;
- }
- // indicate to client process that the track was disabled because of underrun;
- // it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED_ON, &track->mCblk->flags);
- // remove from active list, but state remains ACTIVE [confusing but true]
- isActive = false;
- break;
- }
- // fall through
- case TrackBase::STOPPING_2:
- case TrackBase::PAUSED:
- case TrackBase::TERMINATED:
- case TrackBase::STOPPED:
- case TrackBase::FLUSHED: // flush() while active
- // Check for presentation complete if track is inactive
- // We have consumed all the buffers of this track.
- // This would be incomplete if we auto-paused on underrun
- {
- size_t audioHALFrames =
- (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
- size_t framesWritten =
- mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
- if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
- // track stays in active list until presentation is complete
- break;
- }
- }
- if (track->isStopping_2()) {
- track->mState = TrackBase::STOPPED;
- }
- if (track->isStopped()) {
- // Can't reset directly, as fast mixer is still polling this track
- // track->reset();
- // So instead mark this track as needing to be reset after push with ack
- resetMask |= 1 << i;
- }
- isActive = false;
- break;
- case TrackBase::IDLE:
- default:
- LOG_FATAL("unexpected track state %d", track->mState);
- }
-
- if (isActive) {
- // was it previously inactive?
- if (!(state->mTrackMask & (1 << j))) {
- ExtendedAudioBufferProvider *eabp = track;
- VolumeProvider *vp = track;
- fastTrack->mBufferProvider = eabp;
- fastTrack->mVolumeProvider = vp;
- fastTrack->mSampleRate = track->mSampleRate;
- fastTrack->mChannelMask = track->mChannelMask;
- fastTrack->mGeneration++;
- state->mTrackMask |= 1 << j;
- didModify = true;
- // no acknowledgement required for newly active tracks
- }
- // cache the combined master volume and stream type volume for fast mixer; this
- // lacks any synchronization or barrier so VolumeProvider may read a stale value
- track->mCachedVolume = track->isMuted() ?
- 0 : masterVolume * mStreamTypes[track->streamType()].volume;
- ++fastTracks;
- } else {
- // was it previously active?
- if (state->mTrackMask & (1 << j)) {
- fastTrack->mBufferProvider = NULL;
- fastTrack->mGeneration++;
- state->mTrackMask &= ~(1 << j);
- didModify = true;
- // If any fast tracks were removed, we must wait for acknowledgement
- // because we're about to decrement the last sp<> on those tracks.
- block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
- } else {
- LOG_FATAL("fast track %d should have been active", j);
- }
- tracksToRemove->add(track);
- // Avoids a misleading display in dumpsys
- track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL;
- }
- continue;
- }
-
- { // local variable scope to avoid goto warning
-
- audio_track_cblk_t* cblk = track->cblk();
-
- // The first time a track is added we wait
- // for all its buffers to be filled before processing it
- int name = track->name();
- // make sure that we have enough frames to mix one full buffer.
- // enforce this condition only once to enable draining the buffer in case the client
- // app does not call stop() and relies on underrun to stop:
- // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
- // during last round
- uint32_t minFrames = 1;
- if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
- (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
- if (t->sampleRate() == (int)mSampleRate) {
- minFrames = mNormalFrameCount;
- } else {
- // +1 for rounding and +1 for additional sample needed for interpolation
- minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
- // add frames already consumed but not yet released by the resampler
- // because cblk->framesReady() will include these frames
- minFrames += mAudioMixer->getUnreleasedFrames(track->name());
- // the minimum track buffer size is normally twice the number of frames necessary
- // to fill one buffer and the resampler should not leave more than one buffer worth
- // of unreleased frames after each pass, but just in case...
- ALOG_ASSERT(minFrames <= cblk->frameCount);
- }
- }
- if ((track->framesReady() >= minFrames) && track->isReady() &&
- !track->isPaused() && !track->isTerminated())
- {
- //ALOGV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, this);
-
- mixedTracks++;
-
- // track->mainBuffer() != mMixBuffer means there is an effect chain
- // connected to the track
- chain.clear();
- if (track->mainBuffer() != mMixBuffer) {
- chain = getEffectChain_l(track->sessionId());
- // Delegate volume control to effect in track effect chain if needed
- if (chain != 0) {
- tracksWithEffect++;
- } else {
- ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on session %d",
- name, track->sessionId());
- }
- }
-
-
- int param = AudioMixer::VOLUME;
- if (track->mFillingUpStatus == Track::FS_FILLED) {
- // no ramp for the first volume setting
- track->mFillingUpStatus = Track::FS_ACTIVE;
- if (track->mState == TrackBase::RESUMING) {
- track->mState = TrackBase::ACTIVE;
- param = AudioMixer::RAMP_VOLUME;
- }
- mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
- } else if (cblk->server != 0) {
- // If the track is stopped before the first frame was mixed,
- // do not apply ramp
- param = AudioMixer::RAMP_VOLUME;
- }
-
- // compute volume for this track
- uint32_t vl, vr, va;
- if (track->isMuted() || track->isPausing() ||
- mStreamTypes[track->streamType()].mute) {
- vl = vr = va = 0;
- if (track->isPausing()) {
- track->setPaused();
- }
- } else {
-
- // read original volumes with volume control
- float typeVolume = mStreamTypes[track->streamType()].volume;
- float v = masterVolume * typeVolume;
- uint32_t vlr = cblk->getVolumeLR();
- vl = vlr & 0xFFFF;
- vr = vlr >> 16;
- // track volumes come from shared memory, so can't be trusted and must be clamped
- if (vl > MAX_GAIN_INT) {
- ALOGV("Track left volume out of range: %04X", vl);
- vl = MAX_GAIN_INT;
- }
- if (vr > MAX_GAIN_INT) {
- ALOGV("Track right volume out of range: %04X", vr);
- vr = MAX_GAIN_INT;
- }
- // now apply the master volume and stream type volume
- vl = (uint32_t)(v * vl) << 12;
- vr = (uint32_t)(v * vr) << 12;
- // assuming master volume and stream type volume each go up to 1.0,
- // vl and vr are now in 8.24 format
-
- uint16_t sendLevel = cblk->getSendLevel_U4_12();
- // send level comes from shared memory and so may be corrupt
- if (sendLevel > MAX_GAIN_INT) {
- ALOGV("Track send level out of range: %04X", sendLevel);
- sendLevel = MAX_GAIN_INT;
- }
- va = (uint32_t)(v * sendLevel);
- }
- // Delegate volume control to effect in track effect chain if needed
- if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
- // Do not ramp volume if volume is controlled by effect
- param = AudioMixer::VOLUME;
- track->mHasVolumeController = true;
- } else {
- // force no volume ramp when volume controller was just disabled or removed
- // from effect chain to avoid volume spike
- if (track->mHasVolumeController) {
- param = AudioMixer::VOLUME;
- }
- track->mHasVolumeController = false;
- }
-
- // Convert volumes from 8.24 to 4.12 format
- // This additional clamping is needed in case chain->setVolume_l() overshot
- vl = (vl + (1 << 11)) >> 12;
- if (vl > MAX_GAIN_INT) vl = MAX_GAIN_INT;
- vr = (vr + (1 << 11)) >> 12;
- if (vr > MAX_GAIN_INT) vr = MAX_GAIN_INT;
-
- if (va > MAX_GAIN_INT) va = MAX_GAIN_INT; // va is uint32_t, so no need to check for -
-
- // XXX: these things DON'T need to be done each time
- mAudioMixer->setBufferProvider(name, track);
- mAudioMixer->enable(name);
-
- mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)vl);
- mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)vr);
- mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)va);
- mAudioMixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::FORMAT, (void *)track->format());
- mAudioMixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::CHANNEL_MASK, (void *)track->channelMask());
- mAudioMixer->setParameter(
- name,
- AudioMixer::RESAMPLE,
- AudioMixer::SAMPLE_RATE,
- (void *)(cblk->sampleRate));
- mAudioMixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
- mAudioMixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
-
- // reset retry count
- track->mRetryCount = kMaxTrackRetries;
-
- // If one track is ready, set the mixer ready if:
- // - the mixer was not ready during previous round OR
- // - no other track is not ready
- if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
- mixerStatus != MIXER_TRACKS_ENABLED) {
- mixerStatus = MIXER_TRACKS_READY;
- }
- } else {
- // clear effect chain input buffer if an active track underruns to avoid sending
- // previous audio buffer again to effects
- chain = getEffectChain_l(track->sessionId());
- if (chain != 0) {
- chain->clearInputBuffer();
- }
-
- //ALOGV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user, cblk->server, this);
- if ((track->sharedBuffer() != 0) || track->isTerminated() ||
- track->isStopped() || track->isPaused()) {
- // We have consumed all the buffers of this track.
- // Remove it from the list of active tracks.
- // TODO: use actual buffer filling status instead of latency when available from
- // audio HAL
- size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
- size_t framesWritten =
- mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
- if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
- if (track->isStopped()) {
- track->reset();
- }
- tracksToRemove->add(track);
- }
- } else {
- track->mUnderrunCount++;
- // No buffers for this track. Give it a few chances to
- // fill a buffer, then remove it from active list.
- if (--(track->mRetryCount) <= 0) {
- ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this);
- tracksToRemove->add(track);
- // indicate to client process that the track was disabled because of underrun;
- // it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED_ON, &cblk->flags);
- // If one track is not ready, mark the mixer also not ready if:
- // - the mixer was ready during previous round OR
- // - no other track is ready
- } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
- mixerStatus != MIXER_TRACKS_READY) {
- mixerStatus = MIXER_TRACKS_ENABLED;
- }
- }
- mAudioMixer->disable(name);
- }
-
- } // local variable scope to avoid goto warning
-track_is_ready: ;
-
- }
-
- // Push the new FastMixer state if necessary
- bool pauseAudioWatchdog = false;
- if (didModify) {
- state->mFastTracksGen++;
- // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
- if (kUseFastMixer == FastMixer_Dynamic &&
- state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
- state->mCommand = FastMixerState::COLD_IDLE;
- state->mColdFutexAddr = &mFastMixerFutex;
- state->mColdGen++;
- mFastMixerFutex = 0;
- if (kUseFastMixer == FastMixer_Dynamic) {
- mNormalSink = mOutputSink;
- }
- // If we go into cold idle, need to wait for acknowledgement
- // so that fast mixer stops doing I/O.
- block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
- pauseAudioWatchdog = true;
- }
- sq->end();
- }
- if (sq != NULL) {
- sq->end(didModify);
- sq->push(block);
- }
-#ifdef AUDIO_WATCHDOG
- if (pauseAudioWatchdog && mAudioWatchdog != 0) {
- mAudioWatchdog->pause();
- }
-#endif
-
- // Now perform the deferred reset on fast tracks that have stopped
- while (resetMask != 0) {
- size_t i = __builtin_ctz(resetMask);
- ALOG_ASSERT(i < count);
- resetMask &= ~(1 << i);
- sp<Track> t = mActiveTracks[i].promote();
- if (t == 0) continue;
- Track* track = t.get();
- ALOG_ASSERT(track->isFastTrack() && track->isStopped());
- track->reset();
- }
-
- // remove all the tracks that need to be...
- count = tracksToRemove->size();
- if (CC_UNLIKELY(count)) {
- for (size_t i=0 ; i<count ; i++) {
- const sp<Track>& track = tracksToRemove->itemAt(i);
- mActiveTracks.remove(track);
- if (track->mainBuffer() != mMixBuffer) {
- chain = getEffectChain_l(track->sessionId());
- if (chain != 0) {
- ALOGV("stopping track on chain %p for session Id: %d", chain.get(), track->sessionId());
- chain->decActiveTrackCnt();
- }
- }
- if (track->isTerminated()) {
- removeTrack_l(track);
- }
- }
- }
-
- // mix buffer must be cleared if all tracks are connected to an
- // effect chain as in this case the mixer will not write to
- // mix buffer and track effects will accumulate into it
- if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || (mixedTracks == 0 && fastTracks > 0)) {
- // FIXME as a performance optimization, should remember previous zero status
- memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
- }
-
- // if any fast tracks, then status is ready
- mMixerStatusIgnoringFastTracks = mixerStatus;
- if (fastTracks > 0) {
- mixerStatus = MIXER_TRACKS_READY;
- }
- return mixerStatus;
-}
-
-/*
-The derived values that are cached:
- - mixBufferSize from frame count * frame size
- - activeSleepTime from activeSleepTimeUs()
- - idleSleepTime from idleSleepTimeUs()
- - standbyDelay from mActiveSleepTimeUs (DIRECT only)
- - maxPeriod from frame count and sample rate (MIXER only)
-
-The parameters that affect these derived values are:
- - frame count
- - frame size
- - sample rate
- - device type: A2DP or not
- - device latency
- - format: PCM or not
- - active sleep time
- - idle sleep time
-*/
-
-void AudioFlinger::PlaybackThread::cacheParameters_l()
-{
- mixBufferSize = mNormalFrameCount * mFrameSize;
- activeSleepTime = activeSleepTimeUs();
- idleSleepTime = idleSleepTimeUs();
-}
-
-void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
-{
- ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
- this, streamType, mTracks.size());
- Mutex::Autolock _l(mLock);
-
- size_t size = mTracks.size();
- for (size_t i = 0; i < size; i++) {
- sp<Track> t = mTracks[i];
- if (t->streamType() == streamType) {
- android_atomic_or(CBLK_INVALID_ON, &t->mCblk->flags);
- t->mCblk->cv.signal();
- }
- }
-}
-
-// getTrackName_l() must be called with ThreadBase::mLock held
-int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask, int sessionId)
-{
- return mAudioMixer->getTrackName(channelMask, sessionId);
-}
-
-// deleteTrackName_l() must be called with ThreadBase::mLock held
-void AudioFlinger::MixerThread::deleteTrackName_l(int name)
-{
- ALOGV("remove track (%d) and delete from mixer", name);
- mAudioMixer->deleteTrackName(name);
-}
-
-// checkForNewParameters_l() must be called with ThreadBase::mLock held
-bool AudioFlinger::MixerThread::checkForNewParameters_l()
-{
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- bool reconfig = false;
-
- while (!mNewParameters.isEmpty()) {
-
- if (mFastMixer != NULL) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
-
- status_t status = NO_ERROR;
- String8 keyValuePair = mNewParameters[0];
- AudioParameter param = AudioParameter(keyValuePair);
- int value;
-
- if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
- reconfig = true;
- }
- if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
- if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
- status = BAD_VALUE;
- } else {
- reconfig = true;
- }
- }
- if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
- if (value != AUDIO_CHANNEL_OUT_STEREO) {
- status = BAD_VALUE;
- } else {
- reconfig = true;
- }
- }
- if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
- // do not accept frame count changes if tracks are open as the track buffer
- // size depends on frame count and correct behavior would not be guaranteed
- // if frame count is changed after track creation
- if (!mTracks.isEmpty()) {
- status = INVALID_OPERATION;
- } else {
- reconfig = true;
- }
- }
- if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
-#ifdef ADD_BATTERY_DATA
- // when changing the audio output device, call addBatteryData to notify
- // the change
- if (mOutDevice != value) {
- uint32_t params = 0;
- // check whether speaker is on
- if (value & AUDIO_DEVICE_OUT_SPEAKER) {
- params |= IMediaPlayerService::kBatteryDataSpeakerOn;
- }
-
- audio_devices_t deviceWithoutSpeaker
- = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
- // check if any other device (except speaker) is on
- if (value & deviceWithoutSpeaker ) {
- params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
- }
-
- if (params != 0) {
- addBatteryData(params);
- }
- }
-#endif
-
- // forward device change to effects that have requested to be
- // aware of attached audio device.
- mOutDevice = value;
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->setDevice_l(mOutDevice);
- }
- }
-
- if (status == NO_ERROR) {
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
- if (!mStandby && status == INVALID_OPERATION) {
- mOutput->stream->common.standby(&mOutput->stream->common);
- mStandby = true;
- mBytesWritten = 0;
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
- }
- if (status == NO_ERROR && reconfig) {
- delete mAudioMixer;
- // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't)
- mAudioMixer = NULL;
- readOutputParameters();
- mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
- for (size_t i = 0; i < mTracks.size() ; i++) {
- int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId);
- if (name < 0) break;
- mTracks[i]->mName = name;
- // limit track sample rate to 2 x new output sample rate
- if (mTracks[i]->mCblk->sampleRate > 2 * sampleRate()) {
- mTracks[i]->mCblk->sampleRate = 2 * sampleRate();
- }
- }
- sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
- }
- }
-
- mNewParameters.removeAt(0);
-
- mParamStatus = status;
- mParamCond.signal();
- // wait for condition with time out in case the thread calling ThreadBase::setParameters()
- // already timed out waiting for the status and will never signal the condition.
- mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
- }
-
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != NULL);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
-
- return reconfig;
-}
-
-void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- PlaybackThread::dumpInternals(fd, args);
-
- snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
- result.append(buffer);
- write(fd, result.string(), result.size());
-
- // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
- FastMixerDumpState copy = mFastMixerDumpState;
- copy.dump(fd);
-
-#ifdef STATE_QUEUE_DUMP
- // Similar for state queue
- StateQueueObserverDump observerCopy = mStateQueueObserverDump;
- observerCopy.dump(fd);
- StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
- mutatorCopy.dump(fd);
-#endif
-
- // Write the tee output to a .wav file
- NBAIO_Source *teeSource = mTeeSource.get();
- if (teeSource != NULL) {
- char teePath[64];
- struct timeval tv;
- gettimeofday(&tv, NULL);
- struct tm tm;
- localtime_r(&tv.tv_sec, &tm);
- strftime(teePath, sizeof(teePath), "/data/misc/media/%T.wav", &tm);
- int teeFd = open(teePath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
- if (teeFd >= 0) {
- char wavHeader[44];
- memcpy(wavHeader,
- "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
- sizeof(wavHeader));
- NBAIO_Format format = teeSource->format();
- unsigned channelCount = Format_channelCount(format);
- ALOG_ASSERT(channelCount <= FCC_2);
- unsigned sampleRate = Format_sampleRate(format);
- wavHeader[22] = channelCount; // number of channels
- wavHeader[24] = sampleRate; // sample rate
- wavHeader[25] = sampleRate >> 8;
- wavHeader[32] = channelCount * 2; // block alignment
- write(teeFd, wavHeader, sizeof(wavHeader));
- size_t total = 0;
- bool firstRead = true;
- for (;;) {
-#define TEE_SINK_READ 1024
- short buffer[TEE_SINK_READ * FCC_2];
- size_t count = TEE_SINK_READ;
- ssize_t actual = teeSource->read(buffer, count,
- AudioBufferProvider::kInvalidPTS);
- bool wasFirstRead = firstRead;
- firstRead = false;
- if (actual <= 0) {
- if (actual == (ssize_t) OVERRUN && wasFirstRead) {
- continue;
- }
- break;
- }
- ALOG_ASSERT(actual <= (ssize_t)count);
- write(teeFd, buffer, actual * channelCount * sizeof(short));
- total += actual;
- }
- lseek(teeFd, (off_t) 4, SEEK_SET);
- uint32_t temp = 44 + total * channelCount * sizeof(short) - 8;
- write(teeFd, &temp, sizeof(temp));
- lseek(teeFd, (off_t) 40, SEEK_SET);
- temp = total * channelCount * sizeof(short);
- write(teeFd, &temp, sizeof(temp));
- close(teeFd);
- fdprintf(fd, "FastMixer tee copied to %s\n", teePath);
- } else {
- fdprintf(fd, "FastMixer unable to create tee %s: \n", strerror(errno));
- }
- }
-
-#ifdef AUDIO_WATCHDOG
- if (mAudioWatchdog != 0) {
- // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
- AudioWatchdogDump wdCopy = mAudioWatchdogDump;
- wdCopy.dump(fd);
- }
-#endif
-}
-
-uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
-{
- return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
-}
-
-uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs() const
-{
- return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000);
-}
-
-void AudioFlinger::MixerThread::cacheParameters_l()
-{
- PlaybackThread::cacheParameters_l();
-
- // FIXME: Relaxed timing because of a certain device that can't meet latency
- // Should be reduced to 2x after the vendor fixes the driver issue
- // increase threshold again due to low power audio mode. The way this warning
- // threshold is calculated and its usefulness should be reconsidered anyway.
- maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15;
-}
-
-// ----------------------------------------------------------------------------
-AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device)
- : PlaybackThread(audioFlinger, output, id, device, DIRECT)
- // mLeftVolFloat, mRightVolFloat
-{
-}
-
-AudioFlinger::DirectOutputThread::~DirectOutputThread()
-{
-}
-
-AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
- Vector< sp<Track> > *tracksToRemove
-)
-{
- sp<Track> trackToRemove;
-
- mixer_state mixerStatus = MIXER_IDLE;
-
- // find out which tracks need to be processed
- if (mActiveTracks.size() != 0) {
- sp<Track> t = mActiveTracks[0].promote();
- // The track died recently
- if (t == 0) return MIXER_IDLE;
-
- Track* const track = t.get();
- audio_track_cblk_t* cblk = track->cblk();
-
- // The first time a track is added we wait
- // for all its buffers to be filled before processing it
- uint32_t minFrames;
- if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) {
- minFrames = mNormalFrameCount;
- } else {
- minFrames = 1;
- }
- if ((track->framesReady() >= minFrames) && track->isReady() &&
- !track->isPaused() && !track->isTerminated())
- {
- //ALOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
-
- if (track->mFillingUpStatus == Track::FS_FILLED) {
- track->mFillingUpStatus = Track::FS_ACTIVE;
- mLeftVolFloat = mRightVolFloat = 0;
- if (track->mState == TrackBase::RESUMING) {
- track->mState = TrackBase::ACTIVE;
- }
- }
-
- // compute volume for this track
- float left, right;
- if (track->isMuted() || mMasterMute || track->isPausing() ||
- mStreamTypes[track->streamType()].mute) {
- left = right = 0;
- if (track->isPausing()) {
- track->setPaused();
- }
- } else {
- float typeVolume = mStreamTypes[track->streamType()].volume;
- float v = mMasterVolume * typeVolume;
- uint32_t vlr = cblk->getVolumeLR();
- float v_clamped = v * (vlr & 0xFFFF);
- if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
- left = v_clamped/MAX_GAIN;
- v_clamped = v * (vlr >> 16);
- if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
- right = v_clamped/MAX_GAIN;
- }
-
- if (left != mLeftVolFloat || right != mRightVolFloat) {
- mLeftVolFloat = left;
- mRightVolFloat = right;
-
- // Convert volumes from float to 8.24
- uint32_t vl = (uint32_t)(left * (1 << 24));
- uint32_t vr = (uint32_t)(right * (1 << 24));
-
- // Delegate volume control to effect in track effect chain if needed
- // only one effect chain can be present on DirectOutputThread, so if
- // there is one, the track is connected to it
- if (!mEffectChains.isEmpty()) {
- // Do not ramp volume if volume is controlled by effect
- mEffectChains[0]->setVolume_l(&vl, &vr);
- left = (float)vl / (1 << 24);
- right = (float)vr / (1 << 24);
- }
- mOutput->stream->set_volume(mOutput->stream, left, right);
- }
-
- // reset retry count
- track->mRetryCount = kMaxTrackRetriesDirect;
- mActiveTrack = t;
- mixerStatus = MIXER_TRACKS_READY;
- } else {
- // clear effect chain input buffer if an active track underruns to avoid sending
- // previous audio buffer again to effects
- if (!mEffectChains.isEmpty()) {
- mEffectChains[0]->clearInputBuffer();
- }
-
- //ALOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
- if ((track->sharedBuffer() != 0) || track->isTerminated() ||
- track->isStopped() || track->isPaused()) {
- // We have consumed all the buffers of this track.
- // Remove it from the list of active tracks.
- // TODO: implement behavior for compressed audio
- size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
- size_t framesWritten =
- mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
- if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
- if (track->isStopped()) {
- track->reset();
- }
- trackToRemove = track;
- }
- } else {
- // No buffers for this track. Give it a few chances to
- // fill a buffer, then remove it from active list.
- if (--(track->mRetryCount) <= 0) {
- ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
- trackToRemove = track;
- } else {
- mixerStatus = MIXER_TRACKS_ENABLED;
- }
- }
- }
- }
-
- // FIXME merge this with similar code for removing multiple tracks
- // remove all the tracks that need to be...
- if (CC_UNLIKELY(trackToRemove != 0)) {
- tracksToRemove->add(trackToRemove);
- mActiveTracks.remove(trackToRemove);
- if (!mEffectChains.isEmpty()) {
- ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(),
- trackToRemove->sessionId());
- mEffectChains[0]->decActiveTrackCnt();
- }
- if (trackToRemove->isTerminated()) {
- removeTrack_l(trackToRemove);
- }
- }
-
- return mixerStatus;
-}
-
-void AudioFlinger::DirectOutputThread::threadLoop_mix()
-{
- AudioBufferProvider::Buffer buffer;
- size_t frameCount = mFrameCount;
- int8_t *curBuf = (int8_t *)mMixBuffer;
- // output audio to hardware
- while (frameCount) {
- buffer.frameCount = frameCount;
- mActiveTrack->getNextBuffer(&buffer);
- if (CC_UNLIKELY(buffer.raw == NULL)) {
- memset(curBuf, 0, frameCount * mFrameSize);
- break;
- }
- memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
- frameCount -= buffer.frameCount;
- curBuf += buffer.frameCount * mFrameSize;
- mActiveTrack->releaseBuffer(&buffer);
- }
- sleepTime = 0;
- standbyTime = systemTime() + standbyDelay;
- mActiveTrack.clear();
-
-}
-
-void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
-{
- if (sleepTime == 0) {
- if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime;
- } else {
- sleepTime = idleSleepTime;
- }
- } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
- memset(mMixBuffer, 0, mFrameCount * mFrameSize);
- sleepTime = 0;
- }
-}
-
-// getTrackName_l() must be called with ThreadBase::mLock held
-int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask,
- int sessionId)
-{
- return 0;
-}
-
-// deleteTrackName_l() must be called with ThreadBase::mLock held
-void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name)
-{
-}
-
-// checkForNewParameters_l() must be called with ThreadBase::mLock held
-bool AudioFlinger::DirectOutputThread::checkForNewParameters_l()
-{
- bool reconfig = false;
-
- while (!mNewParameters.isEmpty()) {
- status_t status = NO_ERROR;
- String8 keyValuePair = mNewParameters[0];
- AudioParameter param = AudioParameter(keyValuePair);
- int value;
-
- if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
- // do not accept frame count changes if tracks are open as the track buffer
- // size depends on frame count and correct behavior would not be garantied
- // if frame count is changed after track creation
- if (!mTracks.isEmpty()) {
- status = INVALID_OPERATION;
- } else {
- reconfig = true;
- }
- }
- if (status == NO_ERROR) {
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
- if (!mStandby && status == INVALID_OPERATION) {
- mOutput->stream->common.standby(&mOutput->stream->common);
- mStandby = true;
- mBytesWritten = 0;
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
- }
- if (status == NO_ERROR && reconfig) {
- readOutputParameters();
- sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
- }
- }
-
- mNewParameters.removeAt(0);
-
- mParamStatus = status;
- mParamCond.signal();
- // wait for condition with time out in case the thread calling ThreadBase::setParameters()
- // already timed out waiting for the status and will never signal the condition.
- mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
- }
- return reconfig;
-}
-
-uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const
-{
- uint32_t time;
- if (audio_is_linear_pcm(mFormat)) {
- time = PlaybackThread::activeSleepTimeUs();
- } else {
- time = 10000;
- }
- return time;
-}
-
-uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const
-{
- uint32_t time;
- if (audio_is_linear_pcm(mFormat)) {
- time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
- } else {
- time = 10000;
- }
- return time;
-}
-
-uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const
-{
- uint32_t time;
- if (audio_is_linear_pcm(mFormat)) {
- time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
- } else {
- time = 10000;
- }
- return time;
-}
-
-void AudioFlinger::DirectOutputThread::cacheParameters_l()
-{
- PlaybackThread::cacheParameters_l();
-
- // use shorter standby delay as on normal output to release
- // hardware resources as soon as possible
- standbyDelay = microseconds(activeSleepTime*2);
-}
-
-// ----------------------------------------------------------------------------
-
-AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
- AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
- : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(), DUPLICATING),
- mWaitTimeMs(UINT_MAX)
-{
- addOutputTrack(mainThread);
-}
-
-AudioFlinger::DuplicatingThread::~DuplicatingThread()
-{
- for (size_t i = 0; i < mOutputTracks.size(); i++) {
- mOutputTracks[i]->destroy();
- }
-}
-
-void AudioFlinger::DuplicatingThread::threadLoop_mix()
-{
- // mix buffers...
- if (outputsReady(outputTracks)) {
- mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
- } else {
- memset(mMixBuffer, 0, mixBufferSize);
- }
- sleepTime = 0;
- writeFrames = mNormalFrameCount;
- standbyTime = systemTime() + standbyDelay;
-}
-
-void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
-{
- if (sleepTime == 0) {
- if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime;
- } else {
- sleepTime = idleSleepTime;
- }
- } else if (mBytesWritten != 0) {
- if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- writeFrames = mNormalFrameCount;
- memset(mMixBuffer, 0, mixBufferSize);
- } else {
- // flush remaining overflow buffers in output tracks
- writeFrames = 0;
- }
- sleepTime = 0;
- }
-}
-
-void AudioFlinger::DuplicatingThread::threadLoop_write()
-{
- for (size_t i = 0; i < outputTracks.size(); i++) {
- outputTracks[i]->write(mMixBuffer, writeFrames);
- }
- mBytesWritten += mixBufferSize;
-}
-
-void AudioFlinger::DuplicatingThread::threadLoop_standby()
-{
- // DuplicatingThread implements standby by stopping all tracks
- for (size_t i = 0; i < outputTracks.size(); i++) {
- outputTracks[i]->stop();
- }
-}
-
-void AudioFlinger::DuplicatingThread::saveOutputTracks()
-{
- outputTracks = mOutputTracks;
-}
-
-void AudioFlinger::DuplicatingThread::clearOutputTracks()
-{
- outputTracks.clear();
-}
-
-void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
-{
- Mutex::Autolock _l(mLock);
- // FIXME explain this formula
- int frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
- OutputTrack *outputTrack = new OutputTrack(thread,
- this,
- mSampleRate,
- mFormat,
- mChannelMask,
- frameCount);
- if (outputTrack->cblk() != NULL) {
- thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f);
- mOutputTracks.add(outputTrack);
- ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
- updateWaitTime_l();
- }
-}
-
-void AudioFlinger::DuplicatingThread::removeOutputTrack(MixerThread *thread)
-{
- Mutex::Autolock _l(mLock);
- for (size_t i = 0; i < mOutputTracks.size(); i++) {
- if (mOutputTracks[i]->thread() == thread) {
- mOutputTracks[i]->destroy();
- mOutputTracks.removeAt(i);
- updateWaitTime_l();
- return;
- }
- }
- ALOGV("removeOutputTrack(): unkonwn thread: %p", thread);
-}
-
-// caller must hold mLock
-void AudioFlinger::DuplicatingThread::updateWaitTime_l()
-{
- mWaitTimeMs = UINT_MAX;
- for (size_t i = 0; i < mOutputTracks.size(); i++) {
- sp<ThreadBase> strong = mOutputTracks[i]->thread().promote();
- if (strong != 0) {
- uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate();
- if (waitTimeMs < mWaitTimeMs) {
- mWaitTimeMs = waitTimeMs;
- }
- }
- }
-}
-
-
-bool AudioFlinger::DuplicatingThread::outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks)
-{
- for (size_t i = 0; i < outputTracks.size(); i++) {
- sp<ThreadBase> thread = outputTracks[i]->thread().promote();
- if (thread == 0) {
- ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p", outputTracks[i].get());
- return false;
- }
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- // see note at standby() declaration
- if (playbackThread->standby() && !playbackThread->isSuspended()) {
- ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(), thread.get());
- return false;
- }
- }
- return true;
-}
-
-uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
-{
- return (mWaitTimeMs * 1000) / 2;
-}
-
-void AudioFlinger::DuplicatingThread::cacheParameters_l()
-{
- // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first
- updateWaitTime_l();
-
- MixerThread::cacheParameters_l();
-}
-
-// ----------------------------------------------------------------------------
-
-// TrackBase constructor must be called with AudioFlinger::mLock held
-AudioFlinger::ThreadBase::TrackBase::TrackBase(
- ThreadBase *thread,
- const sp<Client>& client,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId)
- : RefBase(),
- mThread(thread),
- mClient(client),
- mCblk(NULL),
- // mBuffer
- // mBufferEnd
- mFrameCount(0),
- mState(IDLE),
- mSampleRate(sampleRate),
- mFormat(format),
- mStepServerFailed(false),
- mSessionId(sessionId)
- // mChannelCount
- // mChannelMask
-{
- ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
-
- // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
- size_t size = sizeof(audio_track_cblk_t);
- uint8_t channelCount = popcount(channelMask);
- size_t bufferSize = frameCount*channelCount*sizeof(int16_t);
- if (sharedBuffer == 0) {
- size += bufferSize;
- }
-
- if (client != NULL) {
- mCblkMemory = client->heap()->allocate(size);
- if (mCblkMemory != 0) {
- mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
- if (mCblk != NULL) { // construct the shared structure in-place.
- new(mCblk) audio_track_cblk_t();
- // clear all buffers
- mCblk->frameCount = frameCount;
- mCblk->sampleRate = sampleRate;
-// uncomment the following lines to quickly test 32-bit wraparound
-// mCblk->user = 0xffff0000;
-// mCblk->server = 0xffff0000;
-// mCblk->userBase = 0xffff0000;
-// mCblk->serverBase = 0xffff0000;
- mChannelCount = channelCount;
- mChannelMask = channelMask;
- if (sharedBuffer == 0) {
- mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
- memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
- // Force underrun condition to avoid false underrun callback until first data is
- // written to buffer (other flags are cleared)
- mCblk->flags = CBLK_UNDERRUN_ON;
- } else {
- mBuffer = sharedBuffer->pointer();
- }
- mBufferEnd = (uint8_t *)mBuffer + bufferSize;
- }
- } else {
- ALOGE("not enough memory for AudioTrack size=%u", size);
- client->heap()->dump("AudioTrack");
- return;
- }
- } else {
- mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
- // construct the shared structure in-place.
- new(mCblk) audio_track_cblk_t();
- // clear all buffers
- mCblk->frameCount = frameCount;
- mCblk->sampleRate = sampleRate;
-// uncomment the following lines to quickly test 32-bit wraparound
-// mCblk->user = 0xffff0000;
-// mCblk->server = 0xffff0000;
-// mCblk->userBase = 0xffff0000;
-// mCblk->serverBase = 0xffff0000;
- mChannelCount = channelCount;
- mChannelMask = channelMask;
- mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
- memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
- // Force underrun condition to avoid false underrun callback until first data is
- // written to buffer (other flags are cleared)
- mCblk->flags = CBLK_UNDERRUN_ON;
- mBufferEnd = (uint8_t *)mBuffer + bufferSize;
- }
-}
-
-AudioFlinger::ThreadBase::TrackBase::~TrackBase()
-{
- if (mCblk != NULL) {
- if (mClient == 0) {
- delete mCblk;
- } else {
- mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
- }
- }
- mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
- if (mClient != 0) {
- // Client destructor must run with AudioFlinger mutex locked
- Mutex::Autolock _l(mClient->audioFlinger()->mLock);
- // If the client's reference count drops to zero, the associated destructor
- // must run with AudioFlinger lock held. Thus the explicit clear() rather than
- // relying on the automatic clear() at end of scope.
- mClient.clear();
- }
-}
-
-// AudioBufferProvider interface
-// getNextBuffer() = 0;
-// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
-void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
-{
- buffer->raw = NULL;
- mFrameCount = buffer->frameCount;
- // FIXME See note at getNextBuffer()
- (void) step(); // ignore return value of step()
- buffer->frameCount = 0;
-}
-
-bool AudioFlinger::ThreadBase::TrackBase::step() {
- bool result;
- audio_track_cblk_t* cblk = this->cblk();
-
- result = cblk->stepServer(mFrameCount);
- if (!result) {
- ALOGV("stepServer failed acquiring cblk mutex");
- mStepServerFailed = true;
- }
- return result;
-}
-
-void AudioFlinger::ThreadBase::TrackBase::reset() {
- audio_track_cblk_t* cblk = this->cblk();
-
- cblk->user = 0;
- cblk->server = 0;
- cblk->userBase = 0;
- cblk->serverBase = 0;
- mStepServerFailed = false;
- ALOGV("TrackBase::reset");
-}
-
-int AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
- return (int)mCblk->sampleRate;
-}
-
-void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
- audio_track_cblk_t* cblk = this->cblk();
- size_t frameSize = cblk->frameSize;
- int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*frameSize;
- int8_t *bufferEnd = bufferStart + frames * frameSize;
-
- // Check validity of returned pointer in case the track control block would have been corrupted.
- ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
- "TrackBase::getBuffer buffer out of range:\n"
- " start: %p, end %p , mBuffer %p mBufferEnd %p\n"
- " server %u, serverBase %u, user %u, userBase %u, frameSize %d",
- bufferStart, bufferEnd, mBuffer, mBufferEnd,
- cblk->server, cblk->serverBase, cblk->user, cblk->userBase, frameSize);
-
- return bufferStart;
-}
-
-status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
-{
- mSyncEvents.add(event);
- return NO_ERROR;
-}
-
-// ----------------------------------------------------------------------------
-
-// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
-AudioFlinger::PlaybackThread::Track::Track(
- PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- IAudioFlinger::track_flags_t flags)
- : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId),
- mMute(false),
- mFillingUpStatus(FS_INVALID),
- // mRetryCount initialized later when needed
- mSharedBuffer(sharedBuffer),
- mStreamType(streamType),
- mName(-1), // see note below
- mMainBuffer(thread->mixBuffer()),
- mAuxBuffer(NULL),
- mAuxEffectId(0), mHasVolumeController(false),
- mPresentationCompleteFrames(0),
- mFlags(flags),
- mFastIndex(-1),
- mUnderrunCount(0),
- mCachedVolume(1.0)
-{
- if (mCblk != NULL) {
- // NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
- // 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
- mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) : sizeof(uint8_t);
- // to avoid leaking a track name, do not allocate one unless there is an mCblk
- mName = thread->getTrackName_l(channelMask, sessionId);
- mCblk->mName = mName;
- if (mName < 0) {
- ALOGE("no more track names available");
- return;
- }
- // only allocate a fast track index if we were able to allocate a normal track name
- if (flags & IAudioFlinger::TRACK_FAST) {
- mCblk->flags |= CBLK_FAST; // atomic op not needed yet
- ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
- int i = __builtin_ctz(thread->mFastTrackAvailMask);
- ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
- // FIXME This is too eager. We allocate a fast track index before the
- // fast track becomes active. Since fast tracks are a scarce resource,
- // this means we are potentially denying other more important fast tracks from
- // being created. It would be better to allocate the index dynamically.
- mFastIndex = i;
- mCblk->mName = i;
- // Read the initial underruns because this field is never cleared by the fast mixer
- mObservedUnderruns = thread->getFastTrackUnderruns(i);
- thread->mFastTrackAvailMask &= ~(1 << i);
- }
- }
- ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid());
-}
-
-AudioFlinger::PlaybackThread::Track::~Track()
-{
- ALOGV("PlaybackThread::Track destructor");
-}
-
-void AudioFlinger::PlaybackThread::Track::destroy()
-{
- // NOTE: destroyTrack_l() can remove a strong reference to this Track
- // by removing it from mTracks vector, so there is a risk that this Tracks's
- // destructor is called. As the destructor needs to lock mLock,
- // we must acquire a strong reference on this Track before locking mLock
- // here so that the destructor is called only when exiting this function.
- // On the other hand, as long as Track::destroy() is only called by
- // TrackHandle destructor, the TrackHandle still holds a strong ref on
- // this Track with its member mTrack.
- sp<Track> keep(this);
- { // scope for mLock
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- if (!isOutputTrack()) {
- if (mState == ACTIVE || mState == RESUMING) {
- AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-
-#ifdef ADD_BATTERY_DATA
- // to track the speaker usage
- addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
- }
- AudioSystem::releaseOutput(thread->id());
- }
- Mutex::Autolock _l(thread->mLock);
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- playbackThread->destroyTrack_l(this);
- }
- }
-}
-
-/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
-{
- result.append(" Name Client Type Fmt Chn mask Session mFrCnt fCount S M F SRate L dB R dB "
- " Server User Main buf Aux Buf Flags Underruns\n");
-}
-
-void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
-{
- uint32_t vlr = mCblk->getVolumeLR();
- if (isFastTrack()) {
- sprintf(buffer, " F %2d", mFastIndex);
- } else {
- sprintf(buffer, " %4d", mName - AudioMixer::TRACK0);
- }
- track_state state = mState;
- char stateChar;
- switch (state) {
- case IDLE:
- stateChar = 'I';
- break;
- case TERMINATED:
- stateChar = 'T';
- break;
- case STOPPING_1:
- stateChar = 's';
- break;
- case STOPPING_2:
- stateChar = '5';
- break;
- case STOPPED:
- stateChar = 'S';
- break;
- case RESUMING:
- stateChar = 'R';
- break;
- case ACTIVE:
- stateChar = 'A';
- break;
- case PAUSING:
- stateChar = 'p';
- break;
- case PAUSED:
- stateChar = 'P';
- break;
- case FLUSHED:
- stateChar = 'F';
- break;
- default:
- stateChar = '?';
- break;
- }
- char nowInUnderrun;
- switch (mObservedUnderruns.mBitFields.mMostRecent) {
- case UNDERRUN_FULL:
- nowInUnderrun = ' ';
- break;
- case UNDERRUN_PARTIAL:
- nowInUnderrun = '<';
- break;
- case UNDERRUN_EMPTY:
- nowInUnderrun = '*';
- break;
- default:
- nowInUnderrun = '?';
- break;
- }
- snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %1d %5u %5.2g %5.2g "
- "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
- (mClient == 0) ? getpid_cached : mClient->pid(),
- mStreamType,
- mFormat,
- mChannelMask,
- mSessionId,
- mFrameCount,
- mCblk->frameCount,
- stateChar,
- mMute,
- mFillingUpStatus,
- mCblk->sampleRate,
- 20.0 * log10((vlr & 0xFFFF) / 4096.0),
- 20.0 * log10((vlr >> 16) / 4096.0),
- mCblk->server,
- mCblk->user,
- (int)mMainBuffer,
- (int)mAuxBuffer,
- mCblk->flags,
- mUnderrunCount,
- nowInUnderrun);
-}
-
-// AudioBufferProvider interface
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts)
-{
- audio_track_cblk_t* cblk = this->cblk();
- uint32_t framesReady;
- uint32_t framesReq = buffer->frameCount;
-
- // Check if last stepServer failed, try to step now
- if (mStepServerFailed) {
- // FIXME When called by fast mixer, this takes a mutex with tryLock().
- // Since the fast mixer is higher priority than client callback thread,
- // it does not result in priority inversion for client.
- // But a non-blocking solution would be preferable to avoid
- // fast mixer being unable to tryLock(), and
- // to avoid the extra context switches if the client wakes up,
- // discovers the mutex is locked, then has to wait for fast mixer to unlock.
- if (!step()) goto getNextBuffer_exit;
- ALOGV("stepServer recovered");
- mStepServerFailed = false;
- }
-
- // FIXME Same as above
- framesReady = cblk->framesReady();
-
- if (CC_LIKELY(framesReady)) {
- uint32_t s = cblk->server;
- uint32_t bufferEnd = cblk->serverBase + cblk->frameCount;
-
- bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
- if (framesReq > framesReady) {
- framesReq = framesReady;
- }
- if (framesReq > bufferEnd - s) {
- framesReq = bufferEnd - s;
- }
-
- buffer->raw = getBuffer(s, framesReq);
- buffer->frameCount = framesReq;
- return NO_ERROR;
- }
-
-getNextBuffer_exit:
- buffer->raw = NULL;
- buffer->frameCount = 0;
- ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
- return NOT_ENOUGH_DATA;
-}
-
-// Note that framesReady() takes a mutex on the control block using tryLock().
-// This could result in priority inversion if framesReady() is called by the normal mixer,
-// as the normal mixer thread runs at lower
-// priority than the client's callback thread: there is a short window within framesReady()
-// during which the normal mixer could be preempted, and the client callback would block.
-// Another problem can occur if framesReady() is called by the fast mixer:
-// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
-// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
-size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
- return mCblk->framesReady();
-}
-
-// Don't call for fast tracks; the framesReady() could result in priority inversion
-bool AudioFlinger::PlaybackThread::Track::isReady() const {
- if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
-
- if (framesReady() >= mCblk->frameCount ||
- (mCblk->flags & CBLK_FORCEREADY_MSK)) {
- mFillingUpStatus = FS_FILLED;
- android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags);
- return true;
- }
- return false;
-}
-
-status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
- int triggerSession)
-{
- status_t status = NO_ERROR;
- ALOGV("start(%d), calling pid %d session %d",
- mName, IPCThreadState::self()->getCallingPid(), mSessionId);
-
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
- track_state state = mState;
- // here the track could be either new, or restarted
- // in both cases "unstop" the track
- if (mState == PAUSED) {
- mState = TrackBase::RESUMING;
- ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
- } else {
- mState = TrackBase::ACTIVE;
- ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
- }
-
- if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
- thread->mLock.unlock();
- status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
- thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
- // to track the speaker usage
- if (status == NO_ERROR) {
- addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
- }
-#endif
- }
- if (status == NO_ERROR) {
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- playbackThread->addTrack_l(this);
- } else {
- mState = state;
- triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
- }
- } else {
- status = BAD_VALUE;
- }
- return status;
-}
-
-void AudioFlinger::PlaybackThread::Track::stop()
-{
- ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
- track_state state = mState;
- if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
- // If the track is not active (PAUSED and buffers full), flush buffers
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- if (playbackThread->mActiveTracks.indexOf(this) < 0) {
- reset();
- mState = STOPPED;
- } else if (!isFastTrack()) {
- mState = STOPPED;
- } else {
- // prepareTracks_l() will set state to STOPPING_2 after next underrun,
- // and then to STOPPED and reset() when presentation is complete
- mState = STOPPING_1;
- }
- ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, playbackThread);
- }
- if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
- thread->mLock.unlock();
- AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
- thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
- // to track the speaker usage
- addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
- }
- }
-}
-
-void AudioFlinger::PlaybackThread::Track::pause()
-{
- ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
- if (mState == ACTIVE || mState == RESUMING) {
- mState = PAUSING;
- ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
- if (!isOutputTrack()) {
- thread->mLock.unlock();
- AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
- thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
- // to track the speaker usage
- addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
- }
- }
- }
-}
-
-void AudioFlinger::PlaybackThread::Track::flush()
-{
- ALOGV("flush(%d)", mName);
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
- if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
- mState != PAUSING && mState != IDLE && mState != FLUSHED) {
- return;
- }
- // No point remaining in PAUSED state after a flush => go to
- // FLUSHED state
- mState = FLUSHED;
- // do not reset the track if it is still in the process of being stopped or paused.
- // this will be done by prepareTracks_l() when the track is stopped.
- // prepareTracks_l() will see mState == FLUSHED, then
- // remove from active track list, reset(), and trigger presentation complete
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- if (playbackThread->mActiveTracks.indexOf(this) < 0) {
- reset();
- }
- }
-}
-
-void AudioFlinger::PlaybackThread::Track::reset()
-{
- // Do not reset twice to avoid discarding data written just after a flush and before
- // the audioflinger thread detects the track is stopped.
- if (!mResetDone) {
- TrackBase::reset();
- // Force underrun condition to avoid false underrun callback until first data is
- // written to buffer
- android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags);
- android_atomic_or(CBLK_UNDERRUN_ON, &mCblk->flags);
- mFillingUpStatus = FS_FILLING;
- mResetDone = true;
- if (mState == FLUSHED) {
- mState = IDLE;
- }
- }
-}
-
-void AudioFlinger::PlaybackThread::Track::mute(bool muted)
-{
- mMute = muted;
-}
-
-status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
-{
- status_t status = DEAD_OBJECT;
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- sp<AudioFlinger> af = mClient->audioFlinger();
-
- Mutex::Autolock _l(af->mLock);
-
- sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
-
- if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
- Mutex::Autolock _dl(playbackThread->mLock);
- Mutex::Autolock _sl(srcThread->mLock);
- sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
- if (chain == 0) {
- return INVALID_OPERATION;
- }
-
- sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
- if (effect == 0) {
- return INVALID_OPERATION;
- }
- srcThread->removeEffect_l(effect);
- playbackThread->addEffect_l(effect);
- // removeEffect_l() has stopped the effect if it was active so it must be restarted
- if (effect->state() == EffectModule::ACTIVE ||
- effect->state() == EffectModule::STOPPING) {
- effect->start();
- }
-
- sp<EffectChain> dstChain = effect->chain().promote();
- if (dstChain == 0) {
- srcThread->addEffect_l(effect);
- return INVALID_OPERATION;
- }
- AudioSystem::unregisterEffect(effect->id());
- AudioSystem::registerEffect(&effect->desc(),
- srcThread->id(),
- dstChain->strategy(),
- AUDIO_SESSION_OUTPUT_MIX,
- effect->id());
- }
- status = playbackThread->attachAuxEffect(this, EffectId);
- }
- return status;
-}
-
-void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
-{
- mAuxEffectId = EffectId;
- mAuxBuffer = buffer;
-}
-
-bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
- size_t audioHalFrames)
-{
- // a track is considered presented when the total number of frames written to audio HAL
- // corresponds to the number of frames written when presentationComplete() is called for the
- // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
- if (mPresentationCompleteFrames == 0) {
- mPresentationCompleteFrames = framesWritten + audioHalFrames;
- ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
- mPresentationCompleteFrames, audioHalFrames);
- }
- if (framesWritten >= mPresentationCompleteFrames) {
- ALOGV("presentationComplete() session %d complete: framesWritten %d",
- mSessionId, framesWritten);
- triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
- return true;
- }
- return false;
-}
-
-void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
-{
- for (int i = 0; i < (int)mSyncEvents.size(); i++) {
- if (mSyncEvents[i]->type() == type) {
- mSyncEvents[i]->trigger();
- mSyncEvents.removeAt(i);
- i--;
- }
- }
-}
-
-// implement VolumeBufferProvider interface
-
-uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
-{
- // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
- ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
- uint32_t vlr = mCblk->getVolumeLR();
- uint32_t vl = vlr & 0xFFFF;
- uint32_t vr = vlr >> 16;
- // track volumes come from shared memory, so can't be trusted and must be clamped
- if (vl > MAX_GAIN_INT) {
- vl = MAX_GAIN_INT;
- }
- if (vr > MAX_GAIN_INT) {
- vr = MAX_GAIN_INT;
- }
- // now apply the cached master volume and stream type volume;
- // this is trusted but lacks any synchronization or barrier so may be stale
- float v = mCachedVolume;
- vl *= v;
- vr *= v;
- // re-combine into U4.16
- vlr = (vr << 16) | (vl & 0xFFFF);
- // FIXME look at mute, pause, and stop flags
- return vlr;
-}
-
-status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
-{
- if (mState == TERMINATED || mState == PAUSED ||
- ((framesReady() == 0) && ((mSharedBuffer != 0) ||
- (mState == STOPPED)))) {
- ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
- mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
- event->cancel();
- return INVALID_OPERATION;
- }
- (void) TrackBase::setSyncEvent(event);
- return NO_ERROR;
-}
-
-// timed audio tracks
-
-sp<AudioFlinger::PlaybackThread::TimedTrack>
-AudioFlinger::PlaybackThread::TimedTrack::create(
- PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId) {
- if (!client->reserveTimedTrack())
- return 0;
-
- return new TimedTrack(
- thread, client, streamType, sampleRate, format, channelMask, frameCount,
- sharedBuffer, sessionId);
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
- PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId)
- : Track(thread, client, streamType, sampleRate, format, channelMask,
- frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
- mQueueHeadInFlight(false),
- mTrimQueueHeadOnRelease(false),
- mFramesPendingInQueue(0),
- mTimedSilenceBuffer(NULL),
- mTimedSilenceBufferSize(0),
- mTimedAudioOutputOnTime(false),
- mMediaTimeTransformValid(false)
-{
- LocalClock lc;
- mLocalTimeFreq = lc.getLocalFreq();
-
- mLocalTimeToSampleTransform.a_zero = 0;
- mLocalTimeToSampleTransform.b_zero = 0;
- mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
- mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
- LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
- &mLocalTimeToSampleTransform.a_to_b_denom);
-
- mMediaTimeToSampleTransform.a_zero = 0;
- mMediaTimeToSampleTransform.b_zero = 0;
- mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
- mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
- LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
- &mMediaTimeToSampleTransform.a_to_b_denom);
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
- mClient->releaseTimedTrack();
- delete [] mTimedSilenceBuffer;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
- size_t size, sp<IMemory>* buffer) {
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- trimTimedBufferQueue_l();
-
- // lazily initialize the shared memory heap for timed buffers
- if (mTimedMemoryDealer == NULL) {
- const int kTimedBufferHeapSize = 512 << 10;
-
- mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
- "AudioFlingerTimed");
- if (mTimedMemoryDealer == NULL)
- return NO_MEMORY;
- }
-
- sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
- if (newBuffer == NULL) {
- newBuffer = mTimedMemoryDealer->allocate(size);
- if (newBuffer == NULL)
- return NO_MEMORY;
- }
-
- *buffer = newBuffer;
- return NO_ERROR;
-}
-
-// caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
- int64_t mediaTimeNow;
- {
- Mutex::Autolock mttLock(mMediaTimeTransformLock);
- if (!mMediaTimeTransformValid)
- return;
-
- int64_t targetTimeNow;
- status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
- ? mCCHelper.getCommonTime(&targetTimeNow)
- : mCCHelper.getLocalTime(&targetTimeNow);
-
- if (OK != res)
- return;
-
- if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
- &mediaTimeNow)) {
- return;
- }
- }
-
- size_t trimEnd;
- for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
- int64_t bufEnd;
-
- if ((trimEnd + 1) < mTimedBufferQueue.size()) {
- // We have a next buffer. Just use its PTS as the PTS of the frame
- // following the last frame in this buffer. If the stream is sparse
- // (ie, there are deliberate gaps left in the stream which should be
- // filled with silence by the TimedAudioTrack), then this can result
- // in one extra buffer being left un-trimmed when it could have
- // been. In general, this is not typical, and we would rather
- // optimized away the TS calculation below for the more common case
- // where PTSes are contiguous.
- bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
- } else {
- // We have no next buffer. Compute the PTS of the frame following
- // the last frame in this buffer by computing the duration of of
- // this frame in media time units and adding it to the PTS of the
- // buffer.
- int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
- / mCblk->frameSize;
-
- if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
- &bufEnd)) {
- ALOGE("Failed to convert frame count of %lld to media time"
- " duration" " (scale factor %d/%u) in %s",
- frameCount,
- mMediaTimeToSampleTransform.a_to_b_numer,
- mMediaTimeToSampleTransform.a_to_b_denom,
- __PRETTY_FUNCTION__);
- break;
- }
- bufEnd += mTimedBufferQueue[trimEnd].pts();
- }
-
- if (bufEnd > mediaTimeNow)
- break;
-
- // Is the buffer we want to use in the middle of a mix operation right
- // now? If so, don't actually trim it. Just wait for the releaseBuffer
- // from the mixer which should be coming back shortly.
- if (!trimEnd && mQueueHeadInFlight) {
- mTrimQueueHeadOnRelease = true;
- }
- }
-
- size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
- if (trimStart < trimEnd) {
- // Update the bookkeeping for framesReady()
- for (size_t i = trimStart; i < trimEnd; ++i) {
- updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
- }
-
- // Now actually remove the buffers from the queue.
- mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
- }
-}
-
-void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
- const char* logTag) {
- ALOG_ASSERT(mTimedBufferQueue.size() > 0,
- "%s called (reason \"%s\"), but timed buffer queue has no"
- " elements to trim.", __FUNCTION__, logTag);
-
- updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
- mTimedBufferQueue.removeAt(0);
-}
-
-void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
- const TimedBuffer& buf,
- const char* logTag) {
- uint32_t bufBytes = buf.buffer()->size();
- uint32_t consumedAlready = buf.position();
-
- ALOG_ASSERT(consumedAlready <= bufBytes,
- "Bad bookkeeping while updating frames pending. Timed buffer is"
- " only %u bytes long, but claims to have consumed %u"
- " bytes. (update reason: \"%s\")",
- bufBytes, consumedAlready, logTag);
-
- uint32_t bufFrames = (bufBytes - consumedAlready) / mCblk->frameSize;
- ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
- "Bad bookkeeping while updating frames pending. Should have at"
- " least %u queued frames, but we think we have only %u. (update"
- " reason: \"%s\")",
- bufFrames, mFramesPendingInQueue, logTag);
-
- mFramesPendingInQueue -= bufFrames;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
- const sp<IMemory>& buffer, int64_t pts) {
-
- {
- Mutex::Autolock mttLock(mMediaTimeTransformLock);
- if (!mMediaTimeTransformValid)
- return INVALID_OPERATION;
- }
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- uint32_t bufFrames = buffer->size() / mCblk->frameSize;
- mFramesPendingInQueue += bufFrames;
- mTimedBufferQueue.add(TimedBuffer(buffer, pts));
-
- return NO_ERROR;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
- const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
-
- ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
- xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
- target);
-
- if (!(target == TimedAudioTrack::LOCAL_TIME ||
- target == TimedAudioTrack::COMMON_TIME)) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock lock(mMediaTimeTransformLock);
- mMediaTimeTransform = xform;
- mMediaTimeTransformTarget = target;
- mMediaTimeTransformValid = true;
-
- return NO_ERROR;
-}
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
-// implementation of getNextBuffer for tracks whose buffers have timestamps
-status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts)
-{
- if (pts == AudioBufferProvider::kInvalidPTS) {
- buffer->raw = NULL;
- buffer->frameCount = 0;
- mTimedAudioOutputOnTime = false;
- return INVALID_OPERATION;
- }
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- ALOG_ASSERT(!mQueueHeadInFlight,
- "getNextBuffer called without releaseBuffer!");
-
- while (true) {
-
- // if we have no timed buffers, then fail
- if (mTimedBufferQueue.isEmpty()) {
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return NOT_ENOUGH_DATA;
- }
-
- TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
-
- // calculate the PTS of the head of the timed buffer queue expressed in
- // local time
- int64_t headLocalPTS;
- {
- Mutex::Autolock mttLock(mMediaTimeTransformLock);
-
- ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
-
- if (mMediaTimeTransform.a_to_b_denom == 0) {
- // the transform represents a pause, so yield silence
- timedYieldSilence_l(buffer->frameCount, buffer);
- return NO_ERROR;
- }
-
- int64_t transformedPTS;
- if (!mMediaTimeTransform.doForwardTransform(head.pts(),
- &transformedPTS)) {
- // the transform failed. this shouldn't happen, but if it does
- // then just drop this buffer
- ALOGW("timedGetNextBuffer transform failed");
- buffer->raw = NULL;
- buffer->frameCount = 0;
- trimTimedBufferQueueHead_l("getNextBuffer; no transform");
- return NO_ERROR;
- }
-
- if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
- if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
- &headLocalPTS)) {
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return INVALID_OPERATION;
- }
- } else {
- headLocalPTS = transformedPTS;
- }
- }
-
- // adjust the head buffer's PTS to reflect the portion of the head buffer
- // that has already been consumed
- int64_t effectivePTS = headLocalPTS +
- ((head.position() / mCblk->frameSize) * mLocalTimeFreq / sampleRate());
-
- // Calculate the delta in samples between the head of the input buffer
- // queue and the start of the next output buffer that will be written.
- // If the transformation fails because of over or underflow, it means
- // that the sample's position in the output stream is so far out of
- // whack that it should just be dropped.
- int64_t sampleDelta;
- if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
- ALOGV("*** head buffer is too far from PTS: dropped buffer");
- trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
- " mix");
- continue;
- }
- if (!mLocalTimeToSampleTransform.doForwardTransform(
- (effectivePTS - pts) << 32, &sampleDelta)) {
- ALOGV("*** too late during sample rate transform: dropped buffer");
- trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
- continue;
- }
-
- ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
- " sampleDelta=[%d.%08x]",
- head.pts(), head.position(), pts,
- static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
- + (sampleDelta >> 32)),
- static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
-
- // if the delta between the ideal placement for the next input sample and
- // the current output position is within this threshold, then we will
- // concatenate the next input samples to the previous output
- const int64_t kSampleContinuityThreshold =
- (static_cast<int64_t>(sampleRate()) << 32) / 250;
-
- // if this is the first buffer of audio that we're emitting from this track
- // then it should be almost exactly on time.
- const int64_t kSampleStartupThreshold = 1LL << 32;
-
- if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
- (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
- // the next input is close enough to being on time, so concatenate it
- // with the last output
- timedYieldSamples_l(buffer);
-
- ALOGVV("*** on time: head.pos=%d frameCount=%u",
- head.position(), buffer->frameCount);
- return NO_ERROR;
- }
-
- // Looks like our output is not on time. Reset our on timed status.
- // Next time we mix samples from our input queue, then should be within
- // the StartupThreshold.
- mTimedAudioOutputOnTime = false;
- if (sampleDelta > 0) {
- // the gap between the current output position and the proper start of
- // the next input sample is too big, so fill it with silence
- uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
-
- timedYieldSilence_l(framesUntilNextInput, buffer);
- ALOGV("*** silence: frameCount=%u", buffer->frameCount);
- return NO_ERROR;
- } else {
- // the next input sample is late
- uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
- size_t onTimeSamplePosition =
- head.position() + lateFrames * mCblk->frameSize;
-
- if (onTimeSamplePosition > head.buffer()->size()) {
- // all the remaining samples in the head are too late, so
- // drop it and move on
- ALOGV("*** too late: dropped buffer");
- trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
- continue;
- } else {
- // skip over the late samples
- head.setPosition(onTimeSamplePosition);
-
- // yield the available samples
- timedYieldSamples_l(buffer);
-
- ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
- return NO_ERROR;
- }
- }
- }
-}
-
-// Yield samples from the timed buffer queue head up to the given output
-// buffer's capacity.
-//
-// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
- AudioBufferProvider::Buffer* buffer) {
-
- const TimedBuffer& head = mTimedBufferQueue[0];
-
- buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
- head.position());
-
- uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
- mCblk->frameSize);
- size_t framesRequested = buffer->frameCount;
- buffer->frameCount = min(framesLeftInHead, framesRequested);
-
- mQueueHeadInFlight = true;
- mTimedAudioOutputOnTime = true;
-}
-
-// Yield samples of silence up to the given output buffer's capacity
-//
-// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
- uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
-
- // lazily allocate a buffer filled with silence
- if (mTimedSilenceBufferSize < numFrames * mCblk->frameSize) {
- delete [] mTimedSilenceBuffer;
- mTimedSilenceBufferSize = numFrames * mCblk->frameSize;
- mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
- memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
- }
-
- buffer->raw = mTimedSilenceBuffer;
- size_t framesRequested = buffer->frameCount;
- buffer->frameCount = min(numFrames, framesRequested);
-
- mTimedAudioOutputOnTime = false;
-}
-
-// AudioBufferProvider interface
-void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
- AudioBufferProvider::Buffer* buffer) {
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- // If the buffer which was just released is part of the buffer at the head
- // of the queue, be sure to update the amt of the buffer which has been
- // consumed. If the buffer being returned is not part of the head of the
- // queue, its either because the buffer is part of the silence buffer, or
- // because the head of the timed queue was trimmed after the mixer called
- // getNextBuffer but before the mixer called releaseBuffer.
- if (buffer->raw == mTimedSilenceBuffer) {
- ALOG_ASSERT(!mQueueHeadInFlight,
- "Queue head in flight during release of silence buffer!");
- goto done;
- }
-
- ALOG_ASSERT(mQueueHeadInFlight,
- "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
- " head in flight.");
-
- if (mTimedBufferQueue.size()) {
- TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
-
- void* start = head.buffer()->pointer();
- void* end = reinterpret_cast<void*>(
- reinterpret_cast<uint8_t*>(head.buffer()->pointer())
- + head.buffer()->size());
-
- ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
- "released buffer not within the head of the timed buffer"
- " queue; qHead = [%p, %p], released buffer = %p",
- start, end, buffer->raw);
-
- head.setPosition(head.position() +
- (buffer->frameCount * mCblk->frameSize));
- mQueueHeadInFlight = false;
-
- ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
- "Bad bookkeeping during releaseBuffer! Should have at"
- " least %u queued frames, but we think we have only %u",
- buffer->frameCount, mFramesPendingInQueue);
-
- mFramesPendingInQueue -= buffer->frameCount;
-
- if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
- || mTrimQueueHeadOnRelease) {
- trimTimedBufferQueueHead_l("releaseBuffer");
- mTrimQueueHeadOnRelease = false;
- }
- } else {
- LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
- " buffers in the timed buffer queue");
- }
-
-done:
- buffer->raw = 0;
- buffer->frameCount = 0;
-}
-
-size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
- Mutex::Autolock _l(mTimedBufferQueueLock);
- return mFramesPendingInQueue;
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
- : mPTS(0), mPosition(0) {}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
- const sp<IMemory>& buffer, int64_t pts)
- : mBuffer(buffer), mPTS(pts), mPosition(0) {}
-
-// ----------------------------------------------------------------------------
-
-// RecordTrack constructor must be called with AudioFlinger::mLock held
-AudioFlinger::RecordThread::RecordTrack::RecordTrack(
- RecordThread *thread,
- const sp<Client>& client,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- int sessionId)
- : TrackBase(thread, client, sampleRate, format,
- channelMask, frameCount, 0 /*sharedBuffer*/, sessionId),
- mOverflow(false)
-{
- if (mCblk != NULL) {
- ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
- if (format == AUDIO_FORMAT_PCM_16_BIT) {
- mCblk->frameSize = mChannelCount * sizeof(int16_t);
- } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
- mCblk->frameSize = mChannelCount * sizeof(int8_t);
- } else {
- mCblk->frameSize = sizeof(int8_t);
- }
- }
-}
-
-AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
-{
- ALOGV("%s", __func__);
-}
-
-// AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
-{
- audio_track_cblk_t* cblk = this->cblk();
- uint32_t framesAvail;
- uint32_t framesReq = buffer->frameCount;
-
- // Check if last stepServer failed, try to step now
- if (mStepServerFailed) {
- if (!step()) goto getNextBuffer_exit;
- ALOGV("stepServer recovered");
- mStepServerFailed = false;
- }
-
- framesAvail = cblk->framesAvailable_l();
-
- if (CC_LIKELY(framesAvail)) {
- uint32_t s = cblk->server;
- uint32_t bufferEnd = cblk->serverBase + cblk->frameCount;
-
- if (framesReq > framesAvail) {
- framesReq = framesAvail;
- }
- if (framesReq > bufferEnd - s) {
- framesReq = bufferEnd - s;
- }
-
- buffer->raw = getBuffer(s, framesReq);
- buffer->frameCount = framesReq;
- return NO_ERROR;
- }
-
-getNextBuffer_exit:
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return NOT_ENOUGH_DATA;
-}
-
-status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
- int triggerSession)
-{
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- RecordThread *recordThread = (RecordThread *)thread.get();
- return recordThread->start(this, event, triggerSession);
- } else {
- return BAD_VALUE;
- }
-}
-
-void AudioFlinger::RecordThread::RecordTrack::stop()
-{
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- RecordThread *recordThread = (RecordThread *)thread.get();
- recordThread->mLock.lock();
- bool doStop = recordThread->stop_l(this);
- if (doStop) {
- TrackBase::reset();
- // Force overrun condition to avoid false overrun callback until first data is
- // read from buffer
- android_atomic_or(CBLK_UNDERRUN_ON, &mCblk->flags);
- }
- recordThread->mLock.unlock();
- if (doStop) {
- AudioSystem::stopInput(recordThread->id());
- }
- }
-}
-
-/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
-{
- result.append(" Clien Fmt Chn mask Session Buf S SRate Serv User FrameCount\n");
-}
-
-void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
-{
- snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %05u %08x %08x %05d\n",
- (mClient == 0) ? getpid_cached : mClient->pid(),
- mFormat,
- mChannelMask,
- mSessionId,
- mFrameCount,
- mState,
- mCblk->sampleRate,
- mCblk->server,
- mCblk->user,
- mCblk->frameCount);
-}
-
-
-// ----------------------------------------------------------------------------
-
-AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
- PlaybackThread *playbackThread,
- DuplicatingThread *sourceThread,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount)
- : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
- NULL, 0, IAudioFlinger::TRACK_DEFAULT),
- mActive(false), mSourceThread(sourceThread)
-{
-
- if (mCblk != NULL) {
- mCblk->flags |= CBLK_DIRECTION_OUT;
- mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
- mOutBuffer.frameCount = 0;
- playbackThread->mTracks.add(this);
- ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, " \
- "mCblk->frameCount %d, mCblk->sampleRate %d, mChannelMask 0x%08x mBufferEnd %p",
- mCblk, mBuffer, mCblk->buffers,
- mCblk->frameCount, mCblk->sampleRate, mChannelMask, mBufferEnd);
- } else {
- ALOGW("Error creating output track on thread %p", playbackThread);
- }
-}
-
-AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
-{
- clearBufferQueue();
-}
-
-status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
- int triggerSession)
-{
- status_t status = Track::start(event, triggerSession);
- if (status != NO_ERROR) {
- return status;
- }
-
- mActive = true;
- mRetryCount = 127;
- return status;
-}
-
-void AudioFlinger::PlaybackThread::OutputTrack::stop()
-{
- Track::stop();
- clearBufferQueue();
- mOutBuffer.frameCount = 0;
- mActive = false;
-}
-
-bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
-{
- Buffer *pInBuffer;
- Buffer inBuffer;
- uint32_t channelCount = mChannelCount;
- bool outputBufferFull = false;
- inBuffer.frameCount = frames;
- inBuffer.i16 = data;
-
- uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
-
- if (!mActive && frames != 0) {
- start();
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- MixerThread *mixerThread = (MixerThread *)thread.get();
- if (mCblk->frameCount > frames){
- if (mBufferQueue.size() < kMaxOverFlowBuffers) {
- uint32_t startFrames = (mCblk->frameCount - frames);
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
- pInBuffer->frameCount = startFrames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
- } else {
- ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
- }
- }
- }
- }
-
- while (waitTimeLeftMs) {
- // First write pending buffers, then new data
- if (mBufferQueue.size()) {
- pInBuffer = mBufferQueue.itemAt(0);
- } else {
- pInBuffer = &inBuffer;
- }
-
- if (pInBuffer->frameCount == 0) {
- break;
- }
-
- if (mOutBuffer.frameCount == 0) {
- mOutBuffer.frameCount = pInBuffer->frameCount;
- nsecs_t startTime = systemTime();
- if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
- ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this, mThread.unsafe_get());
- outputBufferFull = true;
- break;
- }
- uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
- if (waitTimeLeftMs >= waitTimeMs) {
- waitTimeLeftMs -= waitTimeMs;
- } else {
- waitTimeLeftMs = 0;
- }
- }
-
- uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : pInBuffer->frameCount;
- memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
- mCblk->stepUser(outFrames);
- pInBuffer->frameCount -= outFrames;
- pInBuffer->i16 += outFrames * channelCount;
- mOutBuffer.frameCount -= outFrames;
- mOutBuffer.i16 += outFrames * channelCount;
-
- if (pInBuffer->frameCount == 0) {
- if (mBufferQueue.size()) {
- mBufferQueue.removeAt(0);
- delete [] pInBuffer->mBuffer;
- delete pInBuffer;
- ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, mThread.unsafe_get(), mBufferQueue.size());
- } else {
- break;
- }
- }
- }
-
- // If we could not write all frames, allocate a buffer and queue it for next time.
- if (inBuffer.frameCount) {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0 && !thread->standby()) {
- if (mBufferQueue.size() < kMaxOverFlowBuffers) {
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
- pInBuffer->frameCount = inBuffer.frameCount;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
- ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, mThread.unsafe_get(), mBufferQueue.size());
- } else {
- ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", mThread.unsafe_get(), this);
- }
- }
- }
-
- // Calling write() with a 0 length buffer, means that no more data will be written:
- // If no more buffers are pending, fill output track buffer to make sure it is started
- // by output mixer.
- if (frames == 0 && mBufferQueue.size() == 0) {
- if (mCblk->user < mCblk->frameCount) {
- frames = mCblk->frameCount - mCblk->user;
- pInBuffer = new Buffer;
- pInBuffer->mBuffer = new int16_t[frames * channelCount];
- pInBuffer->frameCount = frames;
- pInBuffer->i16 = pInBuffer->mBuffer;
- memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
- mBufferQueue.add(pInBuffer);
- } else if (mActive) {
- stop();
- }
- }
-
- return outputBufferFull;
-}
-
-status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
-{
- int active;
- status_t result;
- audio_track_cblk_t* cblk = mCblk;
- uint32_t framesReq = buffer->frameCount;
-
-// ALOGV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
- buffer->frameCount = 0;
-
- uint32_t framesAvail = cblk->framesAvailable();
-
-
- if (framesAvail == 0) {
- Mutex::Autolock _l(cblk->lock);
- goto start_loop_here;
- while (framesAvail == 0) {
- active = mActive;
- if (CC_UNLIKELY(!active)) {
- ALOGV("Not active and NO_MORE_BUFFERS");
- return NO_MORE_BUFFERS;
- }
- result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
- if (result != NO_ERROR) {
- return NO_MORE_BUFFERS;
- }
- // read the server count again
- start_loop_here:
- framesAvail = cblk->framesAvailable_l();
- }
- }
-
-// if (framesAvail < framesReq) {
-// return NO_MORE_BUFFERS;
-// }
-
- if (framesReq > framesAvail) {
- framesReq = framesAvail;
- }
-
- uint32_t u = cblk->user;
- uint32_t bufferEnd = cblk->userBase + cblk->frameCount;
-
- if (framesReq > bufferEnd - u) {
- framesReq = bufferEnd - u;
- }
-
- buffer->frameCount = framesReq;
- buffer->raw = (void *)cblk->buffer(u);
- return NO_ERROR;
-}
-
-void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
-{
- size_t size = mBufferQueue.size();
-
- for (size_t i = 0; i < size; i++) {
- Buffer *pBuffer = mBufferQueue.itemAt(i);
- delete [] pBuffer->mBuffer;
- delete pBuffer;
- }
- mBufferQueue.clear();
-}
// ----------------------------------------------------------------------------
@@ -5790,88 +1116,6 @@ void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
mAudioFlinger->removeNotificationClient(mPid);
}
-// ----------------------------------------------------------------------------
-
-AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
- : BnAudioTrack(),
- mTrack(track)
-{
-}
-
-AudioFlinger::TrackHandle::~TrackHandle() {
- // just stop the track on deletion, associated resources
- // will be freed from the main thread once all pending buffers have
- // been played. Unless it's not in the active track list, in which
- // case we free everything now...
- mTrack->destroy();
-}
-
-sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
- return mTrack->getCblk();
-}
-
-status_t AudioFlinger::TrackHandle::start() {
- return mTrack->start();
-}
-
-void AudioFlinger::TrackHandle::stop() {
- mTrack->stop();
-}
-
-void AudioFlinger::TrackHandle::flush() {
- mTrack->flush();
-}
-
-void AudioFlinger::TrackHandle::mute(bool e) {
- mTrack->mute(e);
-}
-
-void AudioFlinger::TrackHandle::pause() {
- mTrack->pause();
-}
-
-status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
-{
- return mTrack->attachAuxEffect(EffectId);
-}
-
-status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
- sp<IMemory>* buffer) {
- if (!mTrack->isTimedTrack())
- return INVALID_OPERATION;
-
- PlaybackThread::TimedTrack* tt =
- reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
- return tt->allocateTimedBuffer(size, buffer);
-}
-
-status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts) {
- if (!mTrack->isTimedTrack())
- return INVALID_OPERATION;
-
- PlaybackThread::TimedTrack* tt =
- reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
- return tt->queueTimedBuffer(buffer, pts);
-}
-
-status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
- const LinearTransform& xform, int target) {
-
- if (!mTrack->isTimedTrack())
- return INVALID_OPERATION;
-
- PlaybackThread::TimedTrack* tt =
- reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
- return tt->setMediaTimeTransform(
- xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
-}
-
-status_t AudioFlinger::TrackHandle::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- return BnAudioTrack::onTransact(code, data, reply, flags);
-}
// ----------------------------------------------------------------------------
@@ -5881,7 +1125,7 @@ sp<IAudioRecord> AudioFlinger::openRecord(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
IAudioFlinger::track_flags_t flags,
pid_t tid,
int *sessionId,
@@ -5921,13 +1165,14 @@ sp<IAudioRecord> AudioFlinger::openRecord(
*sessionId = lSessionId;
}
}
- // create new record track. The record track uses one track in mHardwareMixerThread by convention.
+ // create new record track.
+ // The record track uses one track in mHardwareMixerThread by convention.
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
frameCount, lSessionId, flags, tid, &lStatus);
}
if (lStatus != NO_ERROR) {
- // remove local strong reference to Client before deleting the RecordTrack so that the Client
- // destructor is called by the TrackBase destructor with mLock held
+ // remove local strong reference to Client before deleting the RecordTrack so that the
+ // Client destructor is called by the TrackBase destructor with mLock held
client.clear();
recordTrack.clear();
goto Exit;
@@ -5944,891 +1189,6 @@ Exit:
return recordHandle;
}
-// ----------------------------------------------------------------------------
-
-AudioFlinger::RecordHandle::RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
- : BnAudioRecord(),
- mRecordTrack(recordTrack)
-{
-}
-
-AudioFlinger::RecordHandle::~RecordHandle() {
- stop_nonvirtual();
- mRecordTrack->destroy();
-}
-
-sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
- return mRecordTrack->getCblk();
-}
-
-status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, int triggerSession) {
- ALOGV("RecordHandle::start()");
- return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
-}
-
-void AudioFlinger::RecordHandle::stop() {
- stop_nonvirtual();
-}
-
-void AudioFlinger::RecordHandle::stop_nonvirtual() {
- ALOGV("RecordHandle::stop()");
- mRecordTrack->stop();
-}
-
-status_t AudioFlinger::RecordHandle::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- return BnAudioRecord::onTransact(code, data, reply, flags);
-}
-
-// ----------------------------------------------------------------------------
-
-AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamIn *input,
- uint32_t sampleRate,
- audio_channel_mask_t channelMask,
- audio_io_handle_t id,
- audio_devices_t device) :
- ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
- mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
- // mRsmpInIndex and mInputBytes set by readInputParameters()
- mReqChannelCount(popcount(channelMask)),
- mReqSampleRate(sampleRate)
- // mBytesRead is only meaningful while active, and so is cleared in start()
- // (but might be better to also clear here for dump?)
-{
- snprintf(mName, kNameLength, "AudioIn_%X", id);
-
- readInputParameters();
-}
-
-
-AudioFlinger::RecordThread::~RecordThread()
-{
- delete[] mRsmpInBuffer;
- delete mResampler;
- delete[] mRsmpOutBuffer;
-}
-
-void AudioFlinger::RecordThread::onFirstRef()
-{
- run(mName, PRIORITY_URGENT_AUDIO);
-}
-
-status_t AudioFlinger::RecordThread::readyToRun()
-{
- status_t status = initCheck();
- ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this);
- return status;
-}
-
-bool AudioFlinger::RecordThread::threadLoop()
-{
- AudioBufferProvider::Buffer buffer;
- sp<RecordTrack> activeTrack;
- Vector< sp<EffectChain> > effectChains;
-
- nsecs_t lastWarning = 0;
-
- inputStandBy();
- acquireWakeLock();
-
- // used to verify we've read at least once before evaluating how many bytes were read
- bool readOnce = false;
-
- // start recording
- while (!exitPending()) {
-
- processConfigEvents();
-
- { // scope for mLock
- Mutex::Autolock _l(mLock);
- checkForNewParameters_l();
- if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
- standby();
-
- if (exitPending()) break;
-
- releaseWakeLock_l();
- ALOGV("RecordThread: loop stopping");
- // go to sleep
- mWaitWorkCV.wait(mLock);
- ALOGV("RecordThread: loop starting");
- acquireWakeLock_l();
- continue;
- }
- if (mActiveTrack != 0) {
- if (mActiveTrack->mState == TrackBase::PAUSING) {
- standby();
- mActiveTrack.clear();
- mStartStopCond.broadcast();
- } else if (mActiveTrack->mState == TrackBase::RESUMING) {
- if (mReqChannelCount != mActiveTrack->channelCount()) {
- mActiveTrack.clear();
- mStartStopCond.broadcast();
- } else if (readOnce) {
- // record start succeeds only if first read from audio input
- // succeeds
- if (mBytesRead >= 0) {
- mActiveTrack->mState = TrackBase::ACTIVE;
- } else {
- mActiveTrack.clear();
- }
- mStartStopCond.broadcast();
- }
- mStandby = false;
- } else if (mActiveTrack->mState == TrackBase::TERMINATED) {
- removeTrack_l(mActiveTrack);
- mActiveTrack.clear();
- }
- }
- lockEffectChains_l(effectChains);
- }
-
- if (mActiveTrack != 0) {
- if (mActiveTrack->mState != TrackBase::ACTIVE &&
- mActiveTrack->mState != TrackBase::RESUMING) {
- unlockEffectChains(effectChains);
- usleep(kRecordThreadSleepUs);
- continue;
- }
- for (size_t i = 0; i < effectChains.size(); i ++) {
- effectChains[i]->process_l();
- }
-
- buffer.frameCount = mFrameCount;
- if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
- readOnce = true;
- size_t framesOut = buffer.frameCount;
- if (mResampler == NULL) {
- // no resampling
- while (framesOut) {
- size_t framesIn = mFrameCount - mRsmpInIndex;
- if (framesIn) {
- int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
- int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * mActiveTrack->mCblk->frameSize;
- if (framesIn > framesOut)
- framesIn = framesOut;
- mRsmpInIndex += framesIn;
- framesOut -= framesIn;
- if ((int)mChannelCount == mReqChannelCount ||
- mFormat != AUDIO_FORMAT_PCM_16_BIT) {
- memcpy(dst, src, framesIn * mFrameSize);
- } else {
- if (mChannelCount == 1) {
- upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
- (int16_t *)src, framesIn);
- } else {
- downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
- (int16_t *)src, framesIn);
- }
- }
- }
- if (framesOut && mFrameCount == mRsmpInIndex) {
- if (framesOut == mFrameCount &&
- ((int)mChannelCount == mReqChannelCount || mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
- mBytesRead = mInput->stream->read(mInput->stream, buffer.raw, mInputBytes);
- framesOut = 0;
- } else {
- mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
- mRsmpInIndex = 0;
- }
- if (mBytesRead <= 0) {
- if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
- {
- ALOGE("Error reading audio input");
- // Force input into standby so that it tries to
- // recover at next read attempt
- inputStandBy();
- usleep(kRecordThreadSleepUs);
- }
- mRsmpInIndex = mFrameCount;
- framesOut = 0;
- buffer.frameCount = 0;
- }
- }
- }
- } else {
- // resampling
-
- memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t));
- // alter output frame count as if we were expecting stereo samples
- if (mChannelCount == 1 && mReqChannelCount == 1) {
- framesOut >>= 1;
- }
- mResampler->resample(mRsmpOutBuffer, framesOut, this /* AudioBufferProvider* */);
- // ditherAndClamp() works as long as all buffers returned by mActiveTrack->getNextBuffer()
- // are 32 bit aligned which should be always true.
- if (mChannelCount == 2 && mReqChannelCount == 1) {
- ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
- // the resampler always outputs stereo samples: do post stereo to mono conversion
- downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
- framesOut);
- } else {
- ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
- }
-
- }
- if (mFramestoDrop == 0) {
- mActiveTrack->releaseBuffer(&buffer);
- } else {
- if (mFramestoDrop > 0) {
- mFramestoDrop -= buffer.frameCount;
- if (mFramestoDrop <= 0) {
- clearSyncStartEvent();
- }
- } else {
- mFramestoDrop += buffer.frameCount;
- if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
- mSyncStartEvent->isCancelled()) {
- ALOGW("Synced record %s, session %d, trigger session %d",
- (mFramestoDrop >= 0) ? "timed out" : "cancelled",
- mActiveTrack->sessionId(),
- (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
- clearSyncStartEvent();
- }
- }
- }
- mActiveTrack->clearOverflow();
- }
- // client isn't retrieving buffers fast enough
- else {
- if (!mActiveTrack->setOverflow()) {
- nsecs_t now = systemTime();
- if ((now - lastWarning) > kWarningThrottleNs) {
- ALOGW("RecordThread: buffer overflow");
- lastWarning = now;
- }
- }
- // Release the processor for a while before asking for a new buffer.
- // This will give the application more chance to read from the buffer and
- // clear the overflow.
- usleep(kRecordThreadSleepUs);
- }
- }
- // enable changes in effect chain
- unlockEffectChains(effectChains);
- effectChains.clear();
- }
-
- standby();
-
- {
- Mutex::Autolock _l(mLock);
- mActiveTrack.clear();
- mStartStopCond.broadcast();
- }
-
- releaseWakeLock();
-
- ALOGV("RecordThread %p exiting", this);
- return false;
-}
-
-void AudioFlinger::RecordThread::standby()
-{
- if (!mStandby) {
- inputStandBy();
- mStandby = true;
- }
-}
-
-void AudioFlinger::RecordThread::inputStandBy()
-{
- mInput->stream->common.standby(&mInput->stream->common);
-}
-
-sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
- const sp<AudioFlinger::Client>& client,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- int sessionId,
- IAudioFlinger::track_flags_t flags,
- pid_t tid,
- status_t *status)
-{
- sp<RecordTrack> track;
- status_t lStatus;
-
- lStatus = initCheck();
- if (lStatus != NO_ERROR) {
- ALOGE("Audio driver not initialized.");
- goto Exit;
- }
-
- // FIXME use flags and tid similar to createTrack_l()
-
- { // scope for mLock
- Mutex::Autolock _l(mLock);
-
- track = new RecordTrack(this, client, sampleRate,
- format, channelMask, frameCount, sessionId);
-
- if (track->getCblk() == 0) {
- lStatus = NO_MEMORY;
- goto Exit;
- }
- mTracks.add(track);
-
- // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
- bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
- mAudioFlinger->btNrecIsOff();
- setEffectSuspended_l(FX_IID_AEC, suspend, sessionId);
- setEffectSuspended_l(FX_IID_NS, suspend, sessionId);
- }
- lStatus = NO_ERROR;
-
-Exit:
- if (status) {
- *status = lStatus;
- }
- return track;
-}
-
-status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
- AudioSystem::sync_event_t event,
- int triggerSession)
-{
- ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
- sp<ThreadBase> strongMe = this;
- status_t status = NO_ERROR;
-
- if (event == AudioSystem::SYNC_EVENT_NONE) {
- clearSyncStartEvent();
- } else if (event != AudioSystem::SYNC_EVENT_SAME) {
- mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
- triggerSession,
- recordTrack->sessionId(),
- syncStartEventCallback,
- this);
- // Sync event can be cancelled by the trigger session if the track is not in a
- // compatible state in which case we start record immediately
- if (mSyncStartEvent->isCancelled()) {
- clearSyncStartEvent();
- } else {
- // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs
- mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000);
- }
- }
-
- {
- AutoMutex lock(mLock);
- if (mActiveTrack != 0) {
- if (recordTrack != mActiveTrack.get()) {
- status = -EBUSY;
- } else if (mActiveTrack->mState == TrackBase::PAUSING) {
- mActiveTrack->mState = TrackBase::ACTIVE;
- }
- return status;
- }
-
- recordTrack->mState = TrackBase::IDLE;
- mActiveTrack = recordTrack;
- mLock.unlock();
- status_t status = AudioSystem::startInput(mId);
- mLock.lock();
- if (status != NO_ERROR) {
- mActiveTrack.clear();
- clearSyncStartEvent();
- return status;
- }
- mRsmpInIndex = mFrameCount;
- mBytesRead = 0;
- if (mResampler != NULL) {
- mResampler->reset();
- }
- mActiveTrack->mState = TrackBase::RESUMING;
- // signal thread to start
- ALOGV("Signal record thread");
- mWaitWorkCV.broadcast();
- // do not wait for mStartStopCond if exiting
- if (exitPending()) {
- mActiveTrack.clear();
- status = INVALID_OPERATION;
- goto startError;
- }
- mStartStopCond.wait(mLock);
- if (mActiveTrack == 0) {
- ALOGV("Record failed to start");
- status = BAD_VALUE;
- goto startError;
- }
- ALOGV("Record started OK");
- return status;
- }
-startError:
- AudioSystem::stopInput(mId);
- clearSyncStartEvent();
- return status;
-}
-
-void AudioFlinger::RecordThread::clearSyncStartEvent()
-{
- if (mSyncStartEvent != 0) {
- mSyncStartEvent->cancel();
- }
- mSyncStartEvent.clear();
- mFramestoDrop = 0;
-}
-
-void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
-{
- sp<SyncEvent> strongEvent = event.promote();
-
- if (strongEvent != 0) {
- RecordThread *me = (RecordThread *)strongEvent->cookie();
- me->handleSyncStartEvent(strongEvent);
- }
-}
-
-void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event)
-{
- if (event == mSyncStartEvent) {
- // TODO: use actual buffer filling status instead of 2 buffers when info is available
- // from audio HAL
- mFramestoDrop = mFrameCount * 2;
- }
-}
-
-bool AudioFlinger::RecordThread::stop_l(RecordThread::RecordTrack* recordTrack) {
- ALOGV("RecordThread::stop");
- if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
- return false;
- }
- recordTrack->mState = TrackBase::PAUSING;
- // do not wait for mStartStopCond if exiting
- if (exitPending()) {
- return true;
- }
- mStartStopCond.wait(mLock);
- // if we have been restarted, recordTrack == mActiveTrack.get() here
- if (exitPending() || recordTrack != mActiveTrack.get()) {
- ALOGV("Record stopped OK");
- return true;
- }
- return false;
-}
-
-bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const
-{
- return false;
-}
-
-status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event)
-{
-#if 0 // This branch is currently dead code, but is preserved in case it will be needed in future
- if (!isValidSyncEvent(event)) {
- return BAD_VALUE;
- }
-
- int eventSession = event->triggerSession();
- status_t ret = NAME_NOT_FOUND;
-
- Mutex::Autolock _l(mLock);
-
- for (size_t i = 0; i < mTracks.size(); i++) {
- sp<RecordTrack> track = mTracks[i];
- if (eventSession == track->sessionId()) {
- (void) track->setSyncEvent(event);
- ret = NO_ERROR;
- }
- }
- return ret;
-#else
- return BAD_VALUE;
-#endif
-}
-
-void AudioFlinger::RecordThread::RecordTrack::destroy()
-{
- // see comments at AudioFlinger::PlaybackThread::Track::destroy()
- sp<RecordTrack> keep(this);
- {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- if (mState == ACTIVE || mState == RESUMING) {
- AudioSystem::stopInput(thread->id());
- }
- AudioSystem::releaseInput(thread->id());
- Mutex::Autolock _l(thread->mLock);
- RecordThread *recordThread = (RecordThread *) thread.get();
- recordThread->destroyTrack_l(this);
- }
- }
-}
-
-// destroyTrack_l() must be called with ThreadBase::mLock held
-void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
-{
- track->mState = TrackBase::TERMINATED;
- // active tracks are removed by threadLoop()
- if (mActiveTrack != track) {
- removeTrack_l(track);
- }
-}
-
-void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track)
-{
- mTracks.remove(track);
- // need anything related to effects here?
-}
-
-void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
-{
- dumpInternals(fd, args);
- dumpTracks(fd, args);
- dumpEffectChains(fd, args);
-}
-
-void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "\nInput thread %p internals\n", this);
- result.append(buffer);
-
- if (mActiveTrack != 0) {
- snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex);
- result.append(buffer);
- snprintf(buffer, SIZE, "In size: %d\n", mInputBytes);
- result.append(buffer);
- snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL));
- result.append(buffer);
- snprintf(buffer, SIZE, "Out channel count: %d\n", mReqChannelCount);
- result.append(buffer);
- snprintf(buffer, SIZE, "Out sample rate: %d\n", mReqSampleRate);
- result.append(buffer);
- } else {
- result.append("No active record client\n");
- }
-
- write(fd, result.string(), result.size());
-
- dumpBase(fd, args);
-}
-
-void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "Input thread %p tracks\n", this);
- result.append(buffer);
- RecordTrack::appendDumpHeader(result);
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<RecordTrack> track = mTracks[i];
- if (track != 0) {
- track->dump(buffer, SIZE);
- result.append(buffer);
- }
- }
-
- if (mActiveTrack != 0) {
- snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this);
- result.append(buffer);
- RecordTrack::appendDumpHeader(result);
- mActiveTrack->dump(buffer, SIZE);
- result.append(buffer);
-
- }
- write(fd, result.string(), result.size());
-}
-
-// AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
-{
- size_t framesReq = buffer->frameCount;
- size_t framesReady = mFrameCount - mRsmpInIndex;
- int channelCount;
-
- if (framesReady == 0) {
- mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
- if (mBytesRead <= 0) {
- if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) {
- ALOGE("RecordThread::getNextBuffer() Error reading audio input");
- // Force input into standby so that it tries to
- // recover at next read attempt
- inputStandBy();
- usleep(kRecordThreadSleepUs);
- }
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return NOT_ENOUGH_DATA;
- }
- mRsmpInIndex = 0;
- framesReady = mFrameCount;
- }
-
- if (framesReq > framesReady) {
- framesReq = framesReady;
- }
-
- if (mChannelCount == 1 && mReqChannelCount == 2) {
- channelCount = 1;
- } else {
- channelCount = 2;
- }
- buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount;
- buffer->frameCount = framesReq;
- return NO_ERROR;
-}
-
-// AudioBufferProvider interface
-void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
-{
- mRsmpInIndex += buffer->frameCount;
- buffer->frameCount = 0;
-}
-
-bool AudioFlinger::RecordThread::checkForNewParameters_l()
-{
- bool reconfig = false;
-
- while (!mNewParameters.isEmpty()) {
- status_t status = NO_ERROR;
- String8 keyValuePair = mNewParameters[0];
- AudioParameter param = AudioParameter(keyValuePair);
- int value;
- audio_format_t reqFormat = mFormat;
- int reqSamplingRate = mReqSampleRate;
- int reqChannelCount = mReqChannelCount;
-
- if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
- reqSamplingRate = value;
- reconfig = true;
- }
- if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
- reqFormat = (audio_format_t) value;
- reconfig = true;
- }
- if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
- reqChannelCount = popcount(value);
- reconfig = true;
- }
- if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
- // do not accept frame count changes if tracks are open as the track buffer
- // size depends on frame count and correct behavior would not be guaranteed
- // if frame count is changed after track creation
- if (mActiveTrack != 0) {
- status = INVALID_OPERATION;
- } else {
- reconfig = true;
- }
- }
- if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
- // forward device change to effects that have requested to be
- // aware of attached audio device.
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->setDevice_l(value);
- }
-
- // store input device and output device but do not forward output device to audio HAL.
- // Note that status is ignored by the caller for output device
- // (see AudioFlinger::setParameters()
- if (audio_is_output_devices(value)) {
- mOutDevice = value;
- status = BAD_VALUE;
- } else {
- mInDevice = value;
- // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
- if (mTracks.size() > 0) {
- bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
- mAudioFlinger->btNrecIsOff();
- for (size_t i = 0; i < mTracks.size(); i++) {
- sp<RecordTrack> track = mTracks[i];
- setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
- setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId());
- }
- }
- }
- }
- if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR &&
- mAudioSource != (audio_source_t)value) {
- // forward device change to effects that have requested to be
- // aware of attached audio device.
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->setAudioSource_l((audio_source_t)value);
- }
- mAudioSource = (audio_source_t)value;
- }
- if (status == NO_ERROR) {
- status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
- if (status == INVALID_OPERATION) {
- inputStandBy();
- status = mInput->stream->common.set_parameters(&mInput->stream->common,
- keyValuePair.string());
- }
- if (reconfig) {
- if (status == BAD_VALUE &&
- reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
- reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
- ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) &&
- popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
- (reqChannelCount <= FCC_2)) {
- status = NO_ERROR;
- }
- if (status == NO_ERROR) {
- readInputParameters();
- sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED);
- }
- }
- }
-
- mNewParameters.removeAt(0);
-
- mParamStatus = status;
- mParamCond.signal();
- // wait for condition with time out in case the thread calling ThreadBase::setParameters()
- // already timed out waiting for the status and will never signal the condition.
- mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
- }
- return reconfig;
-}
-
-String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
-{
- char *s;
- String8 out_s8 = String8();
-
- Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return out_s8;
- }
-
- s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
- out_s8 = String8(s);
- free(s);
- return out_s8;
-}
-
-void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
- AudioSystem::OutputDescriptor desc;
- void *param2 = NULL;
-
- switch (event) {
- case AudioSystem::INPUT_OPENED:
- case AudioSystem::INPUT_CONFIG_CHANGED:
- desc.channels = mChannelMask;
- desc.samplingRate = mSampleRate;
- desc.format = mFormat;
- desc.frameCount = mFrameCount;
- desc.latency = 0;
- param2 = &desc;
- break;
-
- case AudioSystem::INPUT_CLOSED:
- default:
- break;
- }
- mAudioFlinger->audioConfigChanged_l(event, mId, param2);
-}
-
-void AudioFlinger::RecordThread::readInputParameters()
-{
- delete mRsmpInBuffer;
- // mRsmpInBuffer is always assigned a new[] below
- delete mRsmpOutBuffer;
- mRsmpOutBuffer = NULL;
- delete mResampler;
- mResampler = NULL;
-
- mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
- mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
- mChannelCount = (uint16_t)popcount(mChannelMask);
- mFormat = mInput->stream->common.get_format(&mInput->stream->common);
- mFrameSize = audio_stream_frame_size(&mInput->stream->common);
- mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
- mFrameCount = mInputBytes / mFrameSize;
- mNormalFrameCount = mFrameCount; // not used by record, but used by input effects
- mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
-
- if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
- {
- int channelCount;
- // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
- // stereo to mono post process as the resampler always outputs stereo.
- if (mChannelCount == 1 && mReqChannelCount == 2) {
- channelCount = 1;
- } else {
- channelCount = 2;
- }
- mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
- mResampler->setSampleRate(mSampleRate);
- mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
- mRsmpOutBuffer = new int32_t[mFrameCount * 2];
-
- // optmization: if mono to mono, alter input frame count as if we were inputing stereo samples
- if (mChannelCount == 1 && mReqChannelCount == 1) {
- mFrameCount >>= 1;
- }
-
- }
- mRsmpInIndex = mFrameCount;
-}
-
-unsigned int AudioFlinger::RecordThread::getInputFramesLost()
-{
- Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return 0;
- }
-
- return mInput->stream->get_input_frames_lost(mInput->stream);
-}
-
-uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId) const
-{
- Mutex::Autolock _l(mLock);
- uint32_t result = 0;
- if (getEffectChain_l(sessionId) != 0) {
- result = EFFECT_SESSION;
- }
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- if (sessionId == mTracks[i]->sessionId()) {
- result |= TRACK_SESSION;
- break;
- }
- }
-
- return result;
-}
-
-KeyedVector<int, bool> AudioFlinger::RecordThread::sessionIds() const
-{
- KeyedVector<int, bool> ids;
- Mutex::Autolock _l(mLock);
- for (size_t j = 0; j < mTracks.size(); ++j) {
- sp<RecordThread::RecordTrack> track = mTracks[j];
- int sessionId = track->sessionId();
- if (ids.indexOfKey(sessionId) < 0) {
- ids.add(sessionId, true);
- }
- }
- return ids;
-}
-
-AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::clearInput()
-{
- Mutex::Autolock _l(mLock);
- AudioStreamIn *input = mInput;
- mInput = NULL;
- return input;
-}
-
-// this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::RecordThread::stream() const
-{
- if (mInput == NULL) {
- return NULL;
- }
- return &mInput->stream->common;
-}
// ----------------------------------------------------------------------------
@@ -6924,14 +1284,14 @@ audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
// ----------------------------------------------------------------------------
-int32_t AudioFlinger::getPrimaryOutputSamplingRate()
+uint32_t AudioFlinger::getPrimaryOutputSamplingRate()
{
Mutex::Autolock _l(mLock);
PlaybackThread *thread = primaryPlaybackThread_l();
return thread != NULL ? thread->sampleRate() : 0;
}
-int32_t AudioFlinger::getPrimaryOutputFrameCount()
+size_t AudioFlinger::getPrimaryOutputFrameCount()
{
Mutex::Autolock _l(mLock);
PlaybackThread *thread = primaryPlaybackThread_l();
@@ -6989,7 +1349,8 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
&outStream);
mHardwareStatus = AUDIO_HW_IDLE;
- ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
+ ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, "
+ "Channels %x, status %d",
outStream,
config.sample_rate,
config.format,
@@ -7042,7 +1403,8 @@ audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1,
MixerThread *thread2 = checkMixerThread_l(output2);
if (thread1 == NULL || thread2 == NULL) {
- ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, output2);
+ ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1,
+ output2);
return 0;
}
@@ -7077,7 +1439,8 @@ status_t AudioFlinger::closeOutput_nonvirtual(audio_io_handle_t output)
if (thread->type() == ThreadBase::MIXER) {
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
if (mPlaybackThreads.valueAt(i)->type() == ThreadBase::DUPLICATING) {
- DuplicatingThread *dupThread = (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
+ DuplicatingThread *dupThread =
+ (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
dupThread->removeOutputTrack((MixerThread *)thread.get());
}
}
@@ -7164,16 +1527,17 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
&inStream);
- ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, status %d",
+ ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, "
+ "status %d",
inStream,
config.sample_rate,
config.format,
config.channel_mask,
status);
- // If the input could not be opened with the requested parameters and we can handle the conversion internally,
- // try to open again with the proposed parameters. The AudioFlinger can resample the input and do mono to stereo
- // or stereo to mono conversions on 16 bit PCM inputs.
+ // If the input could not be opened with the requested parameters and we can handle the
+ // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
+ // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs.
if (status == BAD_VALUE &&
reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
(config.sample_rate <= 2 * reqSamplingRate) &&
@@ -7184,18 +1548,66 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
}
if (status == NO_ERROR && inStream != NULL) {
+
+ // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
+ // or (re-)create if current Pipe is idle and does not match the new format
+ sp<NBAIO_Sink> teeSink;
+#ifdef TEE_SINK_INPUT_FRAMES
+ enum {
+ TEE_SINK_NO, // don't copy input
+ TEE_SINK_NEW, // copy input using a new pipe
+ TEE_SINK_OLD, // copy input using an existing pipe
+ } kind;
+ NBAIO_Format format = Format_from_SR_C(inStream->common.get_sample_rate(&inStream->common),
+ popcount(inStream->common.get_channels(&inStream->common)));
+ if (format == Format_Invalid) {
+ kind = TEE_SINK_NO;
+ } else if (mRecordTeeSink == 0) {
+ kind = TEE_SINK_NEW;
+ } else if (mRecordTeeSink->getStrongCount() != 1) {
+ kind = TEE_SINK_NO;
+ } else if (format == mRecordTeeSink->format()) {
+ kind = TEE_SINK_OLD;
+ } else {
+ kind = TEE_SINK_NEW;
+ }
+ switch (kind) {
+ case TEE_SINK_NEW: {
+ Pipe *pipe = new Pipe(TEE_SINK_INPUT_FRAMES, format);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {format};
+ ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ PipeReader *pipeReader = new PipeReader(*pipe);
+ numCounterOffers = 0;
+ index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ mRecordTeeSink = pipe;
+ mRecordTeeSource = pipeReader;
+ teeSink = pipe;
+ }
+ break;
+ case TEE_SINK_OLD:
+ teeSink = mRecordTeeSink;
+ break;
+ case TEE_SINK_NO:
+ default:
+ break;
+ }
+#endif
AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
// Start record thread
// RecorThread require both input and output device indication to forward to audio
// pre processing modules
audio_devices_t device = (*pDevices) | primaryOutputDevice_l();
+
thread = new RecordThread(this,
input,
reqSamplingRate,
reqChannels,
id,
- device);
+ device, teeSink);
mRecordThreads.add(id, thread);
ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
@@ -7736,2025 +2148,67 @@ status_t AudioFlinger::moveEffectChain_l(int sessionId,
return NO_ERROR;
}
-
-// PlaybackThread::createEffect_l() must be called with AudioFlinger::mLock held
-sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
- const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
- int32_t priority,
- int sessionId,
- effect_descriptor_t *desc,
- int *enabled,
- status_t *status
- )
+void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id)
{
- sp<EffectModule> effect;
- sp<EffectHandle> handle;
- status_t lStatus;
- sp<EffectChain> chain;
- bool chainCreated = false;
- bool effectCreated = false;
- bool effectRegistered = false;
-
- lStatus = initCheck();
- if (lStatus != NO_ERROR) {
- ALOGW("createEffect_l() Audio driver not initialized.");
- goto Exit;
- }
-
- // Do not allow effects with session ID 0 on direct output or duplicating threads
- // TODO: add rule for hw accelerated effects on direct outputs with non PCM format
- if (sessionId == AUDIO_SESSION_OUTPUT_MIX && mType != MIXER) {
- ALOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
- desc->name, sessionId);
- lStatus = BAD_VALUE;
- goto Exit;
- }
- // Only Pre processor effects are allowed on input threads and only on input threads
- if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
- ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
- desc->name, desc->flags, mType);
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
-
- { // scope for mLock
- Mutex::Autolock _l(mLock);
-
- // check for existing effect chain with the requested audio session
- chain = getEffectChain_l(sessionId);
- if (chain == 0) {
- // create a new chain for this session
- ALOGV("createEffect_l() new effect chain for session %d", sessionId);
- chain = new EffectChain(this, sessionId);
- addEffectChain_l(chain);
- chain->setStrategy(getStrategyForSession_l(sessionId));
- chainCreated = true;
- } else {
- effect = chain->getEffectFromDesc_l(desc);
- }
-
- ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
-
- if (effect == 0) {
- int id = mAudioFlinger->nextUniqueId();
- // Check CPU and memory usage
- lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
- if (lStatus != NO_ERROR) {
- goto Exit;
- }
- effectRegistered = true;
- // create a new effect module if none present in the chain
- effect = new EffectModule(this, chain, desc, id, sessionId);
- lStatus = effect->status();
- if (lStatus != NO_ERROR) {
- goto Exit;
- }
- lStatus = chain->addEffect_l(effect);
- if (lStatus != NO_ERROR) {
- goto Exit;
- }
- effectCreated = true;
-
- effect->setDevice(mOutDevice);
- effect->setDevice(mInDevice);
- effect->setMode(mAudioFlinger->getMode());
- effect->setAudioSource(mAudioSource);
- }
- // create effect handle and connect it to effect module
- handle = new EffectHandle(effect, client, effectClient, priority);
- lStatus = effect->addHandle(handle.get());
- if (enabled != NULL) {
- *enabled = (int)effect->isEnabled();
- }
- }
-
-Exit:
- if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
- Mutex::Autolock _l(mLock);
- if (effectCreated) {
- chain->removeEffect_l(effect);
- }
- if (effectRegistered) {
- AudioSystem::unregisterEffect(effect->id());
- }
- if (chainCreated) {
- removeEffectChain_l(chain);
- }
- handle.clear();
- }
-
- if (status != NULL) {
- *status = lStatus;
- }
- return handle;
-}
-
-sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId)
-{
- Mutex::Autolock _l(mLock);
- return getEffect_l(sessionId, effectId);
-}
-
-sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
-{
- sp<EffectChain> chain = getEffectChain_l(sessionId);
- return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
-}
-
-// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
-// PlaybackThread::mLock held
-status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
-{
- // check for existing effect chain with the requested audio session
- int sessionId = effect->sessionId();
- sp<EffectChain> chain = getEffectChain_l(sessionId);
- bool chainCreated = false;
-
- if (chain == 0) {
- // create a new chain for this session
- ALOGV("addEffect_l() new effect chain for session %d", sessionId);
- chain = new EffectChain(this, sessionId);
- addEffectChain_l(chain);
- chain->setStrategy(getStrategyForSession_l(sessionId));
- chainCreated = true;
- }
- ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get());
-
- if (chain->getEffectFromId_l(effect->id()) != 0) {
- ALOGW("addEffect_l() %p effect %s already present in chain %p",
- this, effect->desc().name, chain.get());
- return BAD_VALUE;
- }
-
- status_t status = chain->addEffect_l(effect);
- if (status != NO_ERROR) {
- if (chainCreated) {
- removeEffectChain_l(chain);
- }
- return status;
- }
-
- effect->setDevice(mOutDevice);
- effect->setDevice(mInDevice);
- effect->setMode(mAudioFlinger->getMode());
- effect->setAudioSource(mAudioSource);
- return NO_ERROR;
-}
-
-void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) {
-
- ALOGV("removeEffect_l() %p effect %p", this, effect.get());
- effect_descriptor_t desc = effect->desc();
- if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- detachAuxEffect_l(effect->id());
- }
-
- sp<EffectChain> chain = effect->chain().promote();
- if (chain != 0) {
- // remove effect chain if removing last effect
- if (chain->removeEffect_l(effect) == 0) {
- removeEffectChain_l(chain);
- }
- } else {
- ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
- }
-}
-
-void AudioFlinger::ThreadBase::lockEffectChains_l(
- Vector< sp<AudioFlinger::EffectChain> >& effectChains)
-{
- effectChains = mEffectChains;
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->lock();
- }
-}
-
-void AudioFlinger::ThreadBase::unlockEffectChains(
- const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
-{
- for (size_t i = 0; i < effectChains.size(); i++) {
- effectChains[i]->unlock();
- }
-}
-
-sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId)
-{
- Mutex::Autolock _l(mLock);
- return getEffectChain_l(sessionId);
-}
-
-sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const
-{
- size_t size = mEffectChains.size();
- for (size_t i = 0; i < size; i++) {
- if (mEffectChains[i]->sessionId() == sessionId) {
- return mEffectChains[i];
- }
- }
- return 0;
-}
-
-void AudioFlinger::ThreadBase::setMode(audio_mode_t mode)
-{
- Mutex::Autolock _l(mLock);
- size_t size = mEffectChains.size();
- for (size_t i = 0; i < size; i++) {
- mEffectChains[i]->setMode_l(mode);
- }
-}
-
-void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect,
- EffectHandle *handle,
- bool unpinIfLast) {
-
- Mutex::Autolock _l(mLock);
- ALOGV("disconnectEffect() %p effect %p", this, effect.get());
- // delete the effect module if removing last handle on it
- if (effect->removeHandle(handle) == 0) {
- if (!effect->isPinned() || unpinIfLast) {
- removeEffect_l(effect);
- AudioSystem::unregisterEffect(effect->id());
- }
- }
-}
-
-status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
-{
- int session = chain->sessionId();
- int16_t *buffer = mMixBuffer;
- bool ownsBuffer = false;
-
- ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
- if (session > 0) {
- // Only one effect chain can be present in direct output thread and it uses
- // the mix buffer as input
- if (mType != DIRECT) {
- size_t numSamples = mNormalFrameCount * mChannelCount;
- buffer = new int16_t[numSamples];
- memset(buffer, 0, numSamples * sizeof(int16_t));
- ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session);
- ownsBuffer = true;
- }
-
- // Attach all tracks with same session ID to this chain.
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> track = mTracks[i];
- if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(), buffer);
- track->setMainBuffer(buffer);
- chain->incTrackCnt();
- }
- }
-
- // indicate all active tracks in the chain
- for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
- sp<Track> track = mActiveTracks[i].promote();
- if (track == 0) continue;
- if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
- chain->incActiveTrackCnt();
- }
- }
- }
-
- chain->setInBuffer(buffer, ownsBuffer);
- chain->setOutBuffer(mMixBuffer);
- // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
- // chains list in order to be processed last as it contains output stage effects
- // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
- // session AUDIO_SESSION_OUTPUT_STAGE to be processed
- // after track specific effects and before output stage
- // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
- // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX
- // Effect chain for other sessions are inserted at beginning of effect
- // chains list to be processed before output mix effects. Relative order between other
- // sessions is not important
- size_t size = mEffectChains.size();
- size_t i = 0;
- for (i = 0; i < size; i++) {
- if (mEffectChains[i]->sessionId() < session) break;
- }
- mEffectChains.insertAt(chain, i);
- checkSuspendOnAddEffectChain_l(chain);
-
- return NO_ERROR;
-}
-
-size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain)
-{
- int session = chain->sessionId();
-
- ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
-
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- if (chain == mEffectChains[i]) {
- mEffectChains.removeAt(i);
- // detach all active tracks from the chain
- for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
- sp<Track> track = mActiveTracks[i].promote();
- if (track == 0) continue;
- if (session == track->sessionId()) {
- ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
- chain.get(), session);
- chain->decActiveTrackCnt();
- }
- }
-
- // detach all tracks with same session ID from this chain
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> track = mTracks[i];
- if (session == track->sessionId()) {
- track->setMainBuffer(mMixBuffer);
- chain->decTrackCnt();
- }
- }
- break;
- }
- }
- return mEffectChains.size();
-}
-
-status_t AudioFlinger::PlaybackThread::attachAuxEffect(
- const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
-{
- Mutex::Autolock _l(mLock);
- return attachAuxEffect_l(track, EffectId);
-}
-
-status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(
- const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
-{
- status_t status = NO_ERROR;
-
- if (EffectId == 0) {
- track->setAuxBuffer(0, NULL);
- } else {
- // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
- sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
- if (effect != 0) {
- if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
- } else {
- status = INVALID_OPERATION;
- }
- } else {
- status = BAD_VALUE;
- }
- }
- return status;
-}
-
-void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId)
-{
- for (size_t i = 0; i < mTracks.size(); ++i) {
- sp<Track> track = mTracks[i];
- if (track->auxEffectId() == effectId) {
- attachAuxEffect_l(track, 0);
- }
- }
-}
-
-status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
-{
- // only one chain per input thread
- if (mEffectChains.size() != 0) {
- return INVALID_OPERATION;
- }
- ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this);
-
- chain->setInBuffer(NULL);
- chain->setOutBuffer(NULL);
-
- checkSuspendOnAddEffectChain_l(chain);
-
- mEffectChains.add(chain);
-
- return NO_ERROR;
-}
-
-size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& chain)
-{
- ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
- ALOGW_IF(mEffectChains.size() != 1,
- "removeEffectChain_l() %p invalid chain size %d on thread %p",
- chain.get(), mEffectChains.size(), this);
- if (mEffectChains.size() == 1) {
- mEffectChains.removeAt(0);
- }
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// EffectModule implementation
-// ----------------------------------------------------------------------------
-
-#undef LOG_TAG
-#define LOG_TAG "AudioFlinger::EffectModule"
-
-AudioFlinger::EffectModule::EffectModule(ThreadBase *thread,
- const wp<AudioFlinger::EffectChain>& chain,
- effect_descriptor_t *desc,
- int id,
- int sessionId)
- : mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX),
- mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
- mDescriptor(*desc),
- // mConfig is set by configure() and not used before then
- mEffectInterface(NULL),
- mStatus(NO_INIT), mState(IDLE),
- // mMaxDisableWaitCnt is set by configure() and not used before then
- // mDisableWaitCnt is set by process() and updateState() and not used before then
- mSuspended(false)
-{
- ALOGV("Constructor %p", this);
- int lStatus;
-
- // create effect engine from effect factory
- mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface);
-
- if (mStatus != NO_ERROR) {
- return;
- }
- lStatus = init();
- if (lStatus < 0) {
- mStatus = lStatus;
- goto Error;
- }
-
- ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface);
- return;
-Error:
- EffectRelease(mEffectInterface);
- mEffectInterface = NULL;
- ALOGV("Constructor Error %d", mStatus);
-}
-
-AudioFlinger::EffectModule::~EffectModule()
-{
- ALOGV("Destructor %p", this);
- if (mEffectInterface != NULL) {
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
- (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- audio_stream_t *stream = thread->stream();
- if (stream != NULL) {
- stream->remove_audio_effect(stream, mEffectInterface);
- }
- }
- }
- // release effect engine
- EffectRelease(mEffectInterface);
- }
-}
-
-status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle)
-{
- status_t status;
-
- Mutex::Autolock _l(mLock);
- int priority = handle->priority();
- size_t size = mHandles.size();
- EffectHandle *controlHandle = NULL;
- size_t i;
- for (i = 0; i < size; i++) {
- EffectHandle *h = mHandles[i];
- if (h == NULL || h->destroyed_l()) continue;
- // first non destroyed handle is considered in control
- if (controlHandle == NULL)
- controlHandle = h;
- if (h->priority() <= priority) break;
- }
- // if inserted in first place, move effect control from previous owner to this handle
- if (i == 0) {
- bool enabled = false;
- if (controlHandle != NULL) {
- enabled = controlHandle->enabled();
- controlHandle->setControl(false/*hasControl*/, true /*signal*/, enabled /*enabled*/);
- }
- handle->setControl(true /*hasControl*/, false /*signal*/, enabled /*enabled*/);
- status = NO_ERROR;
- } else {
- status = ALREADY_EXISTS;
- }
- ALOGV("addHandle() %p added handle %p in position %d", this, handle, i);
- mHandles.insertAt(handle, i);
- return status;
-}
-
-size_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle)
-{
- Mutex::Autolock _l(mLock);
- size_t size = mHandles.size();
- size_t i;
- for (i = 0; i < size; i++) {
- if (mHandles[i] == handle) break;
- }
- if (i == size) {
- return size;
- }
- ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i);
-
- mHandles.removeAt(i);
- // if removed from first place, move effect control from this handle to next in line
- if (i == 0) {
- EffectHandle *h = controlHandle_l();
- if (h != NULL) {
- h->setControl(true /*hasControl*/, true /*signal*/ , handle->enabled() /*enabled*/);
- }
- }
-
- // Prevent calls to process() and other functions on effect interface from now on.
- // The effect engine will be released by the destructor when the last strong reference on
- // this object is released which can happen after next process is called.
- if (mHandles.size() == 0 && !mPinned) {
- mState = DESTROYED;
- }
-
- return mHandles.size();
-}
-
-// must be called with EffectModule::mLock held
-AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l()
-{
- // the first valid handle in the list has control over the module
- for (size_t i = 0; i < mHandles.size(); i++) {
- EffectHandle *h = mHandles[i];
- if (h != NULL && !h->destroyed_l()) {
- return h;
- }
- }
-
- return NULL;
-}
-
-size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast)
-{
- ALOGV("disconnect() %p handle %p", this, handle);
- // keep a strong reference on this EffectModule to avoid calling the
- // destructor before we exit
- sp<EffectModule> keep(this);
- {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- thread->disconnectEffect(keep, handle, unpinIfLast);
- }
- }
- return mHandles.size();
-}
-
-void AudioFlinger::EffectModule::updateState() {
- Mutex::Autolock _l(mLock);
-
- switch (mState) {
- case RESTART:
- reset_l();
- // FALL THROUGH
-
- case STARTING:
- // clear auxiliary effect input buffer for next accumulation
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- memset(mConfig.inputCfg.buffer.raw,
- 0,
- mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
- }
- start_l();
- mState = ACTIVE;
- break;
- case STOPPING:
- stop_l();
- mDisableWaitCnt = mMaxDisableWaitCnt;
- mState = STOPPED;
- break;
- case STOPPED:
- // mDisableWaitCnt is forced to 1 by process() when the engine indicates the end of the
- // turn off sequence.
- if (--mDisableWaitCnt == 0) {
- reset_l();
- mState = IDLE;
- }
- break;
- default: //IDLE , ACTIVE, DESTROYED
- break;
- }
-}
-
-void AudioFlinger::EffectModule::process()
-{
- Mutex::Autolock _l(mLock);
-
- if (mState == DESTROYED || mEffectInterface == NULL ||
- mConfig.inputCfg.buffer.raw == NULL ||
- mConfig.outputCfg.buffer.raw == NULL) {
- return;
- }
-
- if (isProcessEnabled()) {
- // do 32 bit to 16 bit conversion for auxiliary effect input buffer
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- ditherAndClamp(mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.frameCount/2);
- }
-
- // do the actual processing in the effect engine
- int ret = (*mEffectInterface)->process(mEffectInterface,
- &mConfig.inputCfg.buffer,
- &mConfig.outputCfg.buffer);
-
- // force transition to IDLE state when engine is ready
- if (mState == STOPPED && ret == -ENODATA) {
- mDisableWaitCnt = 1;
- }
-
- // clear auxiliary effect input buffer for next accumulation
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- memset(mConfig.inputCfg.buffer.raw, 0,
- mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
- }
- } else if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_INSERT &&
- mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
- // If an insert effect is idle and input buffer is different from output buffer,
- // accumulate input onto output
- sp<EffectChain> chain = mChain.promote();
- if (chain != 0 && chain->activeTrackCnt() != 0) {
- size_t frameCnt = mConfig.inputCfg.buffer.frameCount * 2; //always stereo here
- int16_t *in = mConfig.inputCfg.buffer.s16;
- int16_t *out = mConfig.outputCfg.buffer.s16;
- for (size_t i = 0; i < frameCnt; i++) {
- out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]);
- }
- }
- }
-}
-
-void AudioFlinger::EffectModule::reset_l()
-{
- if (mEffectInterface == NULL) {
- return;
- }
- (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
-}
-
-status_t AudioFlinger::EffectModule::configure()
-{
- if (mEffectInterface == NULL) {
- return NO_INIT;
- }
-
- sp<ThreadBase> thread = mThread.promote();
- if (thread == 0) {
- return DEAD_OBJECT;
- }
-
- // TODO: handle configuration of effects replacing track process
- audio_channel_mask_t channelMask = thread->channelMask();
-
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
- } else {
- mConfig.inputCfg.channels = channelMask;
- }
- mConfig.outputCfg.channels = channelMask;
- mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
- mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
- mConfig.inputCfg.samplingRate = thread->sampleRate();
- mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
- mConfig.inputCfg.bufferProvider.cookie = NULL;
- mConfig.inputCfg.bufferProvider.getBuffer = NULL;
- mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
- mConfig.outputCfg.bufferProvider.cookie = NULL;
- mConfig.outputCfg.bufferProvider.getBuffer = NULL;
- mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
- mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
- // Insert effect:
- // - in session AUDIO_SESSION_OUTPUT_MIX or AUDIO_SESSION_OUTPUT_STAGE,
- // always overwrites output buffer: input buffer == output buffer
- // - in other sessions:
- // last effect in the chain accumulates in output buffer: input buffer != output buffer
- // other effect: overwrites output buffer: input buffer == output buffer
- // Auxiliary effect:
- // accumulates in output buffer: input buffer != output buffer
- // Therefore: accumulate <=> input buffer != output buffer
- if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
- mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
- } else {
- mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
- }
- mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
- mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
- mConfig.inputCfg.buffer.frameCount = thread->frameCount();
- mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
-
- ALOGV("configure() %p thread %p buffer %p framecount %d",
- this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
-
- status_t cmdStatus;
- uint32_t size = sizeof(int);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_CONFIG,
- sizeof(effect_config_t),
- &mConfig,
- &size,
- &cmdStatus);
- if (status == 0) {
- status = cmdStatus;
- }
-
- if (status == 0 &&
- (memcmp(&mDescriptor.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0)) {
- uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
- effect_param_t *p = (effect_param_t *)buf32;
-
- p->psize = sizeof(uint32_t);
- p->vsize = sizeof(uint32_t);
- size = sizeof(int);
- *(int32_t *)p->data = VISUALIZER_PARAM_LATENCY;
-
- uint32_t latency = 0;
- PlaybackThread *pbt = thread->mAudioFlinger->checkPlaybackThread_l(thread->mId);
- if (pbt != NULL) {
- latency = pbt->latency_l();
- }
-
- *((int32_t *)p->data + 1)= latency;
- (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_PARAM,
- sizeof(effect_param_t) + 8,
- &buf32,
- &size,
- &cmdStatus);
- }
-
- mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
- (1000 * mConfig.outputCfg.buffer.frameCount);
-
- return status;
-}
-
-status_t AudioFlinger::EffectModule::init()
-{
- Mutex::Autolock _l(mLock);
- if (mEffectInterface == NULL) {
- return NO_INIT;
- }
- status_t cmdStatus;
- uint32_t size = sizeof(status_t);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_INIT,
- 0,
- NULL,
- &size,
- &cmdStatus);
- if (status == 0) {
- status = cmdStatus;
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::start()
-{
- Mutex::Autolock _l(mLock);
- return start_l();
-}
-
-status_t AudioFlinger::EffectModule::start_l()
-{
- if (mEffectInterface == NULL) {
- return NO_INIT;
- }
- status_t cmdStatus;
- uint32_t size = sizeof(status_t);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_ENABLE,
- 0,
- NULL,
- &size,
- &cmdStatus);
- if (status == 0) {
- status = cmdStatus;
- }
- if (status == 0 &&
- ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
- (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- audio_stream_t *stream = thread->stream();
- if (stream != NULL) {
- stream->add_audio_effect(stream, mEffectInterface);
- }
- }
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::stop()
-{
- Mutex::Autolock _l(mLock);
- return stop_l();
-}
-
-status_t AudioFlinger::EffectModule::stop_l()
-{
- if (mEffectInterface == NULL) {
- return NO_INIT;
- }
- status_t cmdStatus;
- uint32_t size = sizeof(status_t);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_DISABLE,
- 0,
- NULL,
- &size,
- &cmdStatus);
- if (status == 0) {
- status = cmdStatus;
- }
- if (status == 0 &&
- ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
- (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- audio_stream_t *stream = thread->stream();
- if (stream != NULL) {
- stream->remove_audio_effect(stream, mEffectInterface);
- }
- }
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData)
-{
- Mutex::Autolock _l(mLock);
-// ALOGV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface);
-
- if (mState == DESTROYED || mEffectInterface == NULL) {
- return NO_INIT;
- }
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- cmdCode,
- cmdSize,
- pCmdData,
- replySize,
- pReplyData);
- if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) {
- uint32_t size = (replySize == NULL) ? 0 : *replySize;
- for (size_t i = 1; i < mHandles.size(); i++) {
- EffectHandle *h = mHandles[i];
- if (h != NULL && !h->destroyed_l()) {
- h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData);
- }
- }
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::setEnabled(bool enabled)
-{
- Mutex::Autolock _l(mLock);
- return setEnabled_l(enabled);
-}
-
-// must be called with EffectModule::mLock held
-status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
-{
-
- ALOGV("setEnabled %p enabled %d", this, enabled);
-
- if (enabled != isEnabled()) {
- status_t status = AudioSystem::setEffectEnabled(mId, enabled);
- if (enabled && status != NO_ERROR) {
- return status;
- }
-
- switch (mState) {
- // going from disabled to enabled
- case IDLE:
- mState = STARTING;
- break;
- case STOPPED:
- mState = RESTART;
- break;
- case STOPPING:
- mState = ACTIVE;
- break;
-
- // going from enabled to disabled
- case RESTART:
- mState = STOPPED;
- break;
- case STARTING:
- mState = IDLE;
- break;
- case ACTIVE:
- mState = STOPPING;
- break;
- case DESTROYED:
- return NO_ERROR; // simply ignore as we are being destroyed
- }
- for (size_t i = 1; i < mHandles.size(); i++) {
- EffectHandle *h = mHandles[i];
- if (h != NULL && !h->destroyed_l()) {
- h->setEnabled(enabled);
- }
- }
- }
- return NO_ERROR;
-}
-
-bool AudioFlinger::EffectModule::isEnabled() const
-{
- switch (mState) {
- case RESTART:
- case STARTING:
- case ACTIVE:
- return true;
- case IDLE:
- case STOPPING:
- case STOPPED:
- case DESTROYED:
- default:
- return false;
- }
-}
-
-bool AudioFlinger::EffectModule::isProcessEnabled() const
-{
- switch (mState) {
- case RESTART:
- case ACTIVE:
- case STOPPING:
- case STOPPED:
- return true;
- case IDLE:
- case STARTING:
- case DESTROYED:
- default:
- return false;
- }
-}
-
-status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
-{
- Mutex::Autolock _l(mLock);
- status_t status = NO_ERROR;
-
- // Send volume indication if EFFECT_FLAG_VOLUME_IND is set and read back altered volume
- // if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set)
- if (isProcessEnabled() &&
- ((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
- (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
- status_t cmdStatus;
- uint32_t volume[2];
- uint32_t *pVolume = NULL;
- uint32_t size = sizeof(volume);
- volume[0] = *left;
- volume[1] = *right;
- if (controller) {
- pVolume = volume;
- }
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_VOLUME,
- size,
- volume,
- &size,
- pVolume);
- if (controller && status == NO_ERROR && size == sizeof(volume)) {
- *left = volume[0];
- *right = volume[1];
- }
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device)
-{
- if (device == AUDIO_DEVICE_NONE) {
- return NO_ERROR;
- }
-
- Mutex::Autolock _l(mLock);
- status_t status = NO_ERROR;
- if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
- status_t cmdStatus;
- uint32_t size = sizeof(status_t);
- uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
- EFFECT_CMD_SET_INPUT_DEVICE;
- status = (*mEffectInterface)->command(mEffectInterface,
- cmd,
- sizeof(uint32_t),
- &device,
- &size,
- &cmdStatus);
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::setMode(audio_mode_t mode)
-{
- Mutex::Autolock _l(mLock);
- status_t status = NO_ERROR;
- if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) {
- status_t cmdStatus;
- uint32_t size = sizeof(status_t);
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_AUDIO_MODE,
- sizeof(audio_mode_t),
- &mode,
- &size,
- &cmdStatus);
- if (status == NO_ERROR) {
- status = cmdStatus;
- }
- }
- return status;
-}
-
-status_t AudioFlinger::EffectModule::setAudioSource(audio_source_t source)
-{
- Mutex::Autolock _l(mLock);
- status_t status = NO_ERROR;
- if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) {
- uint32_t size = 0;
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_AUDIO_SOURCE,
- sizeof(audio_source_t),
- &source,
- &size,
- NULL);
- }
- return status;
-}
-
-void AudioFlinger::EffectModule::setSuspended(bool suspended)
-{
- Mutex::Autolock _l(mLock);
- mSuspended = suspended;
-}
-
-bool AudioFlinger::EffectModule::suspended() const
-{
- Mutex::Autolock _l(mLock);
- return mSuspended;
-}
-
-bool AudioFlinger::EffectModule::purgeHandles()
-{
- bool enabled = false;
- Mutex::Autolock _l(mLock);
- for (size_t i = 0; i < mHandles.size(); i++) {
- EffectHandle *handle = mHandles[i];
- if (handle != NULL && !handle->destroyed_l()) {
- handle->effect().clear();
- if (handle->hasControl()) {
- enabled = handle->enabled();
- }
- }
- }
- return enabled;
-}
-
-void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "\tEffect ID %d:\n", mId);
- result.append(buffer);
-
- bool locked = tryLock(mLock);
- // failed to lock - AudioFlinger is probably deadlocked
- if (!locked) {
- result.append("\t\tCould not lock Fx mutex:\n");
- }
-
- result.append("\t\tSession Status State Engine:\n");
- snprintf(buffer, SIZE, "\t\t%05d %03d %03d 0x%08x\n",
- mSessionId, mStatus, mState, (uint32_t)mEffectInterface);
- result.append(buffer);
-
- result.append("\t\tDescriptor:\n");
- snprintf(buffer, SIZE, "\t\t- UUID: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n",
- mDescriptor.uuid.timeLow, mDescriptor.uuid.timeMid, mDescriptor.uuid.timeHiAndVersion,
- mDescriptor.uuid.clockSeq, mDescriptor.uuid.node[0], mDescriptor.uuid.node[1],mDescriptor.uuid.node[2],
- mDescriptor.uuid.node[3],mDescriptor.uuid.node[4],mDescriptor.uuid.node[5]);
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- TYPE: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n",
- mDescriptor.type.timeLow, mDescriptor.type.timeMid, mDescriptor.type.timeHiAndVersion,
- mDescriptor.type.clockSeq, mDescriptor.type.node[0], mDescriptor.type.node[1],mDescriptor.type.node[2],
- mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]);
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n",
- mDescriptor.apiVersion,
- mDescriptor.flags);
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- name: %s\n",
- mDescriptor.name);
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- implementor: %s\n",
- mDescriptor.implementor);
- result.append(buffer);
-
- result.append("\t\t- Input configuration:\n");
- result.append("\t\t\tBuffer Frames Smp rate Channels Format\n");
- snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n",
- (uint32_t)mConfig.inputCfg.buffer.raw,
- mConfig.inputCfg.buffer.frameCount,
- mConfig.inputCfg.samplingRate,
- mConfig.inputCfg.channels,
- mConfig.inputCfg.format);
- result.append(buffer);
-
- result.append("\t\t- Output configuration:\n");
- result.append("\t\t\tBuffer Frames Smp rate Channels Format\n");
- snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n",
- (uint32_t)mConfig.outputCfg.buffer.raw,
- mConfig.outputCfg.buffer.frameCount,
- mConfig.outputCfg.samplingRate,
- mConfig.outputCfg.channels,
- mConfig.outputCfg.format);
- result.append(buffer);
-
- snprintf(buffer, SIZE, "\t\t%d Clients:\n", mHandles.size());
- result.append(buffer);
- result.append("\t\t\tPid Priority Ctrl Locked client server\n");
- for (size_t i = 0; i < mHandles.size(); ++i) {
- EffectHandle *handle = mHandles[i];
- if (handle != NULL && !handle->destroyed_l()) {
- handle->dump(buffer, SIZE);
- result.append(buffer);
- }
- }
-
- result.append("\n");
-
- write(fd, result.string(), result.length());
-
- if (locked) {
- mLock.unlock();
- }
-}
-
-// ----------------------------------------------------------------------------
-// EffectHandle implementation
-// ----------------------------------------------------------------------------
-
-#undef LOG_TAG
-#define LOG_TAG "AudioFlinger::EffectHandle"
-
-AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect,
- const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
- int32_t priority)
- : BnEffect(),
- mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
- mPriority(priority), mHasControl(false), mEnabled(false), mDestroyed(false)
-{
- ALOGV("constructor %p", this);
-
- if (client == 0) {
- return;
- }
- int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
- mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
- if (mCblkMemory != 0) {
- mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer());
-
- if (mCblk != NULL) {
- new(mCblk) effect_param_cblk_t();
- mBuffer = (uint8_t *)mCblk + bufOffset;
- }
- } else {
- ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t));
- return;
- }
-}
-
-AudioFlinger::EffectHandle::~EffectHandle()
-{
- ALOGV("Destructor %p", this);
-
- if (mEffect == 0) {
- mDestroyed = true;
- return;
- }
- mEffect->lock();
- mDestroyed = true;
- mEffect->unlock();
- disconnect(false);
-}
-
-status_t AudioFlinger::EffectHandle::enable()
-{
- ALOGV("enable %p", this);
- if (!mHasControl) return INVALID_OPERATION;
- if (mEffect == 0) return DEAD_OBJECT;
-
- if (mEnabled) {
- return NO_ERROR;
- }
-
- mEnabled = true;
-
- sp<ThreadBase> thread = mEffect->thread().promote();
- if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, true, mEffect->sessionId());
- }
-
- // checkSuspendOnEffectEnabled() can suspend this same effect when enabled
- if (mEffect->suspended()) {
- return NO_ERROR;
- }
-
- status_t status = mEffect->setEnabled(true);
- if (status != NO_ERROR) {
- if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
- }
- mEnabled = false;
- }
- return status;
-}
-
-status_t AudioFlinger::EffectHandle::disable()
-{
- ALOGV("disable %p", this);
- if (!mHasControl) return INVALID_OPERATION;
- if (mEffect == 0) return DEAD_OBJECT;
-
- if (!mEnabled) {
- return NO_ERROR;
- }
- mEnabled = false;
-
- if (mEffect->suspended()) {
- return NO_ERROR;
- }
-
- status_t status = mEffect->setEnabled(false);
-
- sp<ThreadBase> thread = mEffect->thread().promote();
- if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
- }
-
- return status;
-}
-
-void AudioFlinger::EffectHandle::disconnect()
-{
- disconnect(true);
-}
-
-void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast)
-{
- ALOGV("disconnect(%s)", unpinIfLast ? "true" : "false");
- if (mEffect == 0) {
- return;
- }
- // restore suspended effects if the disconnected handle was enabled and the last one.
- if ((mEffect->disconnect(this, unpinIfLast) == 0) && mEnabled) {
- sp<ThreadBase> thread = mEffect->thread().promote();
- if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
- }
- }
-
- // release sp on module => module destructor can be called now
- mEffect.clear();
- if (mClient != 0) {
- if (mCblk != NULL) {
- // unlike ~TrackBase(), mCblk is never a local new, so don't delete
- mCblk->~effect_param_cblk_t(); // destroy our shared-structure.
- }
- mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
- // Client destructor must run with AudioFlinger mutex locked
- Mutex::Autolock _l(mClient->audioFlinger()->mLock);
- mClient.clear();
- }
-}
-
-status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData)
-{
-// ALOGV("command(), cmdCode: %d, mHasControl: %d, mEffect: %p",
-// cmdCode, mHasControl, (mEffect == 0) ? 0 : mEffect.get());
-
- // only get parameter command is permitted for applications not controlling the effect
- if (!mHasControl && cmdCode != EFFECT_CMD_GET_PARAM) {
- return INVALID_OPERATION;
- }
- if (mEffect == 0) return DEAD_OBJECT;
- if (mClient == 0) return INVALID_OPERATION;
-
- // handle commands that are not forwarded transparently to effect engine
- if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) {
- // No need to trylock() here as this function is executed in the binder thread serving a particular client process:
- // no risk to block the whole media server process or mixer threads is we are stuck here
- Mutex::Autolock _l(mCblk->lock);
- if (mCblk->clientIndex > EFFECT_PARAM_BUFFER_SIZE ||
- mCblk->serverIndex > EFFECT_PARAM_BUFFER_SIZE) {
- mCblk->serverIndex = 0;
- mCblk->clientIndex = 0;
- return BAD_VALUE;
- }
- status_t status = NO_ERROR;
- while (mCblk->serverIndex < mCblk->clientIndex) {
- int reply;
- uint32_t rsize = sizeof(int);
- int *p = (int *)(mBuffer + mCblk->serverIndex);
- int size = *p++;
- if (((uint8_t *)p + size) > mBuffer + mCblk->clientIndex) {
- ALOGW("command(): invalid parameter block size");
- break;
- }
- effect_param_t *param = (effect_param_t *)p;
- if (param->psize == 0 || param->vsize == 0) {
- ALOGW("command(): null parameter or value size");
- mCblk->serverIndex += size;
- continue;
- }
- uint32_t psize = sizeof(effect_param_t) +
- ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) +
- param->vsize;
- status_t ret = mEffect->command(EFFECT_CMD_SET_PARAM,
- psize,
- p,
- &rsize,
- &reply);
- // stop at first error encountered
- if (ret != NO_ERROR) {
- status = ret;
- *(int *)pReplyData = reply;
- break;
- } else if (reply != NO_ERROR) {
- *(int *)pReplyData = reply;
- break;
- }
- mCblk->serverIndex += size;
- }
- mCblk->serverIndex = 0;
- mCblk->clientIndex = 0;
- return status;
- } else if (cmdCode == EFFECT_CMD_ENABLE) {
- *(int *)pReplyData = NO_ERROR;
- return enable();
- } else if (cmdCode == EFFECT_CMD_DISABLE) {
- *(int *)pReplyData = NO_ERROR;
- return disable();
- }
-
- return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
-}
-
-void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled)
-{
- ALOGV("setControl %p control %d", this, hasControl);
-
- mHasControl = hasControl;
- mEnabled = enabled;
-
- if (signal && mEffectClient != 0) {
- mEffectClient->controlStatusChanged(hasControl);
- }
-}
-
-void AudioFlinger::EffectHandle::commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData)
-{
- if (mEffectClient != 0) {
- mEffectClient->commandExecuted(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- }
-}
-
-
-
-void AudioFlinger::EffectHandle::setEnabled(bool enabled)
-{
- if (mEffectClient != 0) {
- mEffectClient->enableStatusChanged(enabled);
- }
-}
-
-status_t AudioFlinger::EffectHandle::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- return BnEffect::onTransact(code, data, reply, flags);
-}
-
-
-void AudioFlinger::EffectHandle::dump(char* buffer, size_t size)
-{
- bool locked = mCblk != NULL && tryLock(mCblk->lock);
-
- snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n",
- (mClient == 0) ? getpid_cached : mClient->pid(),
- mPriority,
- mHasControl,
- !locked,
- mCblk ? mCblk->clientIndex : 0,
- mCblk ? mCblk->serverIndex : 0
- );
-
- if (locked) {
- mCblk->lock.unlock();
- }
-}
-
-#undef LOG_TAG
-#define LOG_TAG "AudioFlinger::EffectChain"
-
-AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
- int sessionId)
- : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
- mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
- mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
-{
- mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
- if (thread == NULL) {
- return;
- }
- mMaxTailBuffers = ((kProcessTailDurationMs * thread->sampleRate()) / 1000) /
- thread->frameCount();
-}
-
-AudioFlinger::EffectChain::~EffectChain()
-{
- if (mOwnInBuffer) {
- delete mInBuffer;
- }
-
-}
-
-// getEffectFromDesc_l() must be called with ThreadBase::mLock held
-sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l(effect_descriptor_t *descriptor)
-{
- size_t size = mEffects.size();
-
- for (size_t i = 0; i < size; i++) {
- if (memcmp(&mEffects[i]->desc().uuid, &descriptor->uuid, sizeof(effect_uuid_t)) == 0) {
- return mEffects[i];
- }
- }
- return 0;
-}
-
-// getEffectFromId_l() must be called with ThreadBase::mLock held
-sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int id)
-{
- size_t size = mEffects.size();
-
- for (size_t i = 0; i < size; i++) {
- // by convention, return first effect if id provided is 0 (0 is never a valid id)
- if (id == 0 || mEffects[i]->id() == id) {
- return mEffects[i];
- }
- }
- return 0;
-}
-
-// getEffectFromType_l() must be called with ThreadBase::mLock held
-sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l(
- const effect_uuid_t *type)
-{
- size_t size = mEffects.size();
-
- for (size_t i = 0; i < size; i++) {
- if (memcmp(&mEffects[i]->desc().type, type, sizeof(effect_uuid_t)) == 0) {
- return mEffects[i];
- }
- }
- return 0;
-}
-
-void AudioFlinger::EffectChain::clearInputBuffer()
-{
- Mutex::Autolock _l(mLock);
- sp<ThreadBase> thread = mThread.promote();
- if (thread == 0) {
- ALOGW("clearInputBuffer(): cannot promote mixer thread");
- return;
- }
- clearInputBuffer_l(thread);
-}
-
-// Must be called with EffectChain::mLock locked
-void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
-{
- size_t numSamples = thread->frameCount() * thread->channelCount();
- memset(mInBuffer, 0, numSamples * sizeof(int16_t));
-
-}
-
-// Must be called with EffectChain::mLock locked
-void AudioFlinger::EffectChain::process_l()
-{
- sp<ThreadBase> thread = mThread.promote();
- if (thread == 0) {
- ALOGW("process_l(): cannot promote mixer thread");
- return;
- }
- bool isGlobalSession = (mSessionId == AUDIO_SESSION_OUTPUT_MIX) ||
- (mSessionId == AUDIO_SESSION_OUTPUT_STAGE);
- // always process effects unless no more tracks are on the session and the effect tail
- // has been rendered
- bool doProcess = true;
- if (!isGlobalSession) {
- bool tracksOnSession = (trackCnt() != 0);
-
- if (!tracksOnSession && mTailBufferCount == 0) {
- doProcess = false;
- }
-
- if (activeTrackCnt() == 0) {
- // if no track is active and the effect tail has not been rendered,
- // the input buffer must be cleared here as the mixer process will not do it
- if (tracksOnSession || mTailBufferCount > 0) {
- clearInputBuffer_l(thread);
- if (mTailBufferCount > 0) {
- mTailBufferCount--;
- }
- }
- }
- }
-
- size_t size = mEffects.size();
- if (doProcess) {
- for (size_t i = 0; i < size; i++) {
- mEffects[i]->process();
- }
- }
- for (size_t i = 0; i < size; i++) {
- mEffects[i]->updateState();
- }
-}
-
-// addEffect_l() must be called with PlaybackThread::mLock held
-status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect)
-{
- effect_descriptor_t desc = effect->desc();
- uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
-
- Mutex::Autolock _l(mLock);
- effect->setChain(this);
- sp<ThreadBase> thread = mThread.promote();
- if (thread == 0) {
- return NO_INIT;
- }
- effect->setThread(thread);
-
- if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- // Auxiliary effects are inserted at the beginning of mEffects vector as
- // they are processed first and accumulated in chain input buffer
- mEffects.insertAt(effect, 0);
-
- // the input buffer for auxiliary effect contains mono samples in
- // 32 bit format. This is to avoid saturation in AudoMixer
- // accumulation stage. Saturation is done in EffectModule::process() before
- // calling the process in effect engine
- size_t numSamples = thread->frameCount();
- int32_t *buffer = new int32_t[numSamples];
- memset(buffer, 0, numSamples * sizeof(int32_t));
- effect->setInBuffer((int16_t *)buffer);
- // auxiliary effects output samples to chain input buffer for further processing
- // by insert effects
- effect->setOutBuffer(mInBuffer);
- } else {
- // Insert effects are inserted at the end of mEffects vector as they are processed
- // after track and auxiliary effects.
- // Insert effect order as a function of indicated preference:
- // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
- // another effect is present
- // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
- // last effect claiming first position
- // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
- // first effect claiming last position
- // else if EFFECT_FLAG_INSERT_ANY insert after first or before last
- // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
- // already present
-
- size_t size = mEffects.size();
- size_t idx_insert = size;
- ssize_t idx_insert_first = -1;
- ssize_t idx_insert_last = -1;
-
- for (size_t i = 0; i < size; i++) {
- effect_descriptor_t d = mEffects[i]->desc();
- uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
- uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
- if (iMode == EFFECT_FLAG_TYPE_INSERT) {
- // check invalid effect chaining combinations
- if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
- iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
- ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s", desc.name, d.name);
- return INVALID_OPERATION;
- }
- // remember position of first insert effect and by default
- // select this as insert position for new effect
- if (idx_insert == size) {
- idx_insert = i;
- }
- // remember position of last insert effect claiming
- // first position
- if (iPref == EFFECT_FLAG_INSERT_FIRST) {
- idx_insert_first = i;
- }
- // remember position of first insert effect claiming
- // last position
- if (iPref == EFFECT_FLAG_INSERT_LAST &&
- idx_insert_last == -1) {
- idx_insert_last = i;
- }
- }
- }
-
- // modify idx_insert from first position if needed
- if (insertPref == EFFECT_FLAG_INSERT_LAST) {
- if (idx_insert_last != -1) {
- idx_insert = idx_insert_last;
- } else {
- idx_insert = size;
- }
- } else {
- if (idx_insert_first != -1) {
- idx_insert = idx_insert_first + 1;
- }
- }
-
- // always read samples from chain input buffer
- effect->setInBuffer(mInBuffer);
-
- // if last effect in the chain, output samples to chain
- // output buffer, otherwise to chain input buffer
- if (idx_insert == size) {
- if (idx_insert != 0) {
- mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
- mEffects[idx_insert-1]->configure();
- }
- effect->setOutBuffer(mOutBuffer);
- } else {
- effect->setOutBuffer(mInBuffer);
- }
- mEffects.insertAt(effect, idx_insert);
-
- ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this, idx_insert);
- }
- effect->configure();
- return NO_ERROR;
-}
-
-// removeEffect_l() must be called with PlaybackThread::mLock held
-size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect)
-{
- Mutex::Autolock _l(mLock);
- size_t size = mEffects.size();
- uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK;
-
- for (size_t i = 0; i < size; i++) {
- if (effect == mEffects[i]) {
- // calling stop here will remove pre-processing effect from the audio HAL.
- // This is safe as we hold the EffectChain mutex which guarantees that we are not in
- // the middle of a read from audio HAL
- if (mEffects[i]->state() == EffectModule::ACTIVE ||
- mEffects[i]->state() == EffectModule::STOPPING) {
- mEffects[i]->stop();
- }
- if (type == EFFECT_FLAG_TYPE_AUXILIARY) {
- delete[] effect->inBuffer();
- } else {
- if (i == size - 1 && i != 0) {
- mEffects[i - 1]->setOutBuffer(mOutBuffer);
- mEffects[i - 1]->configure();
- }
- }
- mEffects.removeAt(i);
- ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(), this, i);
- break;
- }
- }
-
- return mEffects.size();
-}
-
-// setDevice_l() must be called with PlaybackThread::mLock held
-void AudioFlinger::EffectChain::setDevice_l(audio_devices_t device)
-{
- size_t size = mEffects.size();
- for (size_t i = 0; i < size; i++) {
- mEffects[i]->setDevice(device);
- }
-}
-
-// setMode_l() must be called with PlaybackThread::mLock held
-void AudioFlinger::EffectChain::setMode_l(audio_mode_t mode)
-{
- size_t size = mEffects.size();
- for (size_t i = 0; i < size; i++) {
- mEffects[i]->setMode(mode);
- }
-}
-
-// setAudioSource_l() must be called with PlaybackThread::mLock held
-void AudioFlinger::EffectChain::setAudioSource_l(audio_source_t source)
-{
- size_t size = mEffects.size();
- for (size_t i = 0; i < size; i++) {
- mEffects[i]->setAudioSource(source);
- }
-}
-
-// setVolume_l() must be called with PlaybackThread::mLock held
-bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right)
-{
- uint32_t newLeft = *left;
- uint32_t newRight = *right;
- bool hasControl = false;
- int ctrlIdx = -1;
- size_t size = mEffects.size();
-
- // first update volume controller
- for (size_t i = size; i > 0; i--) {
- if (mEffects[i - 1]->isProcessEnabled() &&
- (mEffects[i - 1]->desc().flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL) {
- ctrlIdx = i - 1;
- hasControl = true;
- break;
- }
- }
-
- if (ctrlIdx == mVolumeCtrlIdx && *left == mLeftVolume && *right == mRightVolume) {
- if (hasControl) {
- *left = mNewLeftVolume;
- *right = mNewRightVolume;
- }
- return hasControl;
- }
-
- mVolumeCtrlIdx = ctrlIdx;
- mLeftVolume = newLeft;
- mRightVolume = newRight;
-
- // second get volume update from volume controller
- if (ctrlIdx >= 0) {
- mEffects[ctrlIdx]->setVolume(&newLeft, &newRight, true);
- mNewLeftVolume = newLeft;
- mNewRightVolume = newRight;
- }
- // then indicate volume to all other effects in chain.
- // Pass altered volume to effects before volume controller
- // and requested volume to effects after controller
- uint32_t lVol = newLeft;
- uint32_t rVol = newRight;
-
- for (size_t i = 0; i < size; i++) {
- if ((int)i == ctrlIdx) continue;
- // this also works for ctrlIdx == -1 when there is no volume controller
- if ((int)i > ctrlIdx) {
- lVol = *left;
- rVol = *right;
- }
- mEffects[i]->setVolume(&lVol, &rVol, false);
- }
- *left = newLeft;
- *right = newRight;
-
- return hasControl;
-}
-
-void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId);
- result.append(buffer);
-
- bool locked = tryLock(mLock);
- // failed to lock - AudioFlinger is probably deadlocked
- if (!locked) {
- result.append("\tCould not lock mutex:\n");
- }
-
- result.append("\tNum fx In buffer Out buffer Active tracks:\n");
- snprintf(buffer, SIZE, "\t%02d 0x%08x 0x%08x %d\n",
- mEffects.size(),
- (uint32_t)mInBuffer,
- (uint32_t)mOutBuffer,
- mActiveTrackCnt);
- result.append(buffer);
- write(fd, result.string(), result.size());
-
- for (size_t i = 0; i < mEffects.size(); ++i) {
- sp<EffectModule> effect = mEffects[i];
- if (effect != 0) {
- effect->dump(fd, args);
- }
- }
-
- if (locked) {
- mLock.unlock();
- }
-}
-
-// must be called with ThreadBase::mLock held
-void AudioFlinger::EffectChain::setEffectSuspended_l(
- const effect_uuid_t *type, bool suspend)
-{
- sp<SuspendedEffectDesc> desc;
- // use effect type UUID timelow as key as there is no real risk of identical
- // timeLow fields among effect type UUIDs.
- ssize_t index = mSuspendedEffects.indexOfKey(type->timeLow);
- if (suspend) {
- if (index >= 0) {
- desc = mSuspendedEffects.valueAt(index);
- } else {
- desc = new SuspendedEffectDesc();
- desc->mType = *type;
- mSuspendedEffects.add(type->timeLow, desc);
- ALOGV("setEffectSuspended_l() add entry for %08x", type->timeLow);
- }
- if (desc->mRefCount++ == 0) {
- sp<EffectModule> effect = getEffectIfEnabled(type);
- if (effect != 0) {
- desc->mEffect = effect;
- effect->setSuspended(true);
- effect->setEnabled(false);
- }
- }
- } else {
- if (index < 0) {
- return;
- }
- desc = mSuspendedEffects.valueAt(index);
- if (desc->mRefCount <= 0) {
- ALOGW("setEffectSuspended_l() restore refcount should not be 0 %d", desc->mRefCount);
- desc->mRefCount = 1;
- }
- if (--desc->mRefCount == 0) {
- ALOGV("setEffectSuspended_l() remove entry for %08x", mSuspendedEffects.keyAt(index));
- if (desc->mEffect != 0) {
- sp<EffectModule> effect = desc->mEffect.promote();
- if (effect != 0) {
- effect->setSuspended(false);
- effect->lock();
- EffectHandle *handle = effect->controlHandle_l();
- if (handle != NULL && !handle->destroyed_l()) {
- effect->setEnabled_l(handle->enabled());
+ NBAIO_Source *teeSource = source.get();
+ if (teeSource != NULL) {
+ char teeTime[16];
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ struct tm tm;
+ localtime_r(&tv.tv_sec, &tm);
+ strftime(teeTime, sizeof(teeTime), "%T", &tm);
+ char teePath[64];
+ sprintf(teePath, "/data/misc/media/%s_%d.wav", teeTime, id);
+ int teeFd = open(teePath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
+ if (teeFd >= 0) {
+ char wavHeader[44];
+ memcpy(wavHeader,
+ "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
+ sizeof(wavHeader));
+ NBAIO_Format format = teeSource->format();
+ unsigned channelCount = Format_channelCount(format);
+ ALOG_ASSERT(channelCount <= FCC_2);
+ uint32_t sampleRate = Format_sampleRate(format);
+ wavHeader[22] = channelCount; // number of channels
+ wavHeader[24] = sampleRate; // sample rate
+ wavHeader[25] = sampleRate >> 8;
+ wavHeader[32] = channelCount * 2; // block alignment
+ write(teeFd, wavHeader, sizeof(wavHeader));
+ size_t total = 0;
+ bool firstRead = true;
+ for (;;) {
+#define TEE_SINK_READ 1024
+ short buffer[TEE_SINK_READ * FCC_2];
+ size_t count = TEE_SINK_READ;
+ ssize_t actual = teeSource->read(buffer, count,
+ AudioBufferProvider::kInvalidPTS);
+ bool wasFirstRead = firstRead;
+ firstRead = false;
+ if (actual <= 0) {
+ if (actual == (ssize_t) OVERRUN && wasFirstRead) {
+ continue;
}
- effect->unlock();
+ break;
}
- desc->mEffect.clear();
+ ALOG_ASSERT(actual <= (ssize_t)count);
+ write(teeFd, buffer, actual * channelCount * sizeof(short));
+ total += actual;
}
- mSuspendedEffects.removeItemsAt(index);
- }
- }
-}
-
-// must be called with ThreadBase::mLock held
-void AudioFlinger::EffectChain::setEffectSuspendedAll_l(bool suspend)
-{
- sp<SuspendedEffectDesc> desc;
-
- ssize_t index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
- if (suspend) {
- if (index >= 0) {
- desc = mSuspendedEffects.valueAt(index);
+ lseek(teeFd, (off_t) 4, SEEK_SET);
+ uint32_t temp = 44 + total * channelCount * sizeof(short) - 8;
+ write(teeFd, &temp, sizeof(temp));
+ lseek(teeFd, (off_t) 40, SEEK_SET);
+ temp = total * channelCount * sizeof(short);
+ write(teeFd, &temp, sizeof(temp));
+ close(teeFd);
+ fdprintf(fd, "FastMixer tee copied to %s\n", teePath);
} else {
- desc = new SuspendedEffectDesc();
- mSuspendedEffects.add((int)kKeyForSuspendAll, desc);
- ALOGV("setEffectSuspendedAll_l() add entry for 0");
- }
- if (desc->mRefCount++ == 0) {
- Vector< sp<EffectModule> > effects;
- getSuspendEligibleEffects(effects);
- for (size_t i = 0; i < effects.size(); i++) {
- setEffectSuspended_l(&effects[i]->desc().type, true);
- }
- }
- } else {
- if (index < 0) {
- return;
- }
- desc = mSuspendedEffects.valueAt(index);
- if (desc->mRefCount <= 0) {
- ALOGW("setEffectSuspendedAll_l() restore refcount should not be 0 %d", desc->mRefCount);
- desc->mRefCount = 1;
- }
- if (--desc->mRefCount == 0) {
- Vector<const effect_uuid_t *> types;
- for (size_t i = 0; i < mSuspendedEffects.size(); i++) {
- if (mSuspendedEffects.keyAt(i) == (int)kKeyForSuspendAll) {
- continue;
- }
- types.add(&mSuspendedEffects.valueAt(i)->mType);
- }
- for (size_t i = 0; i < types.size(); i++) {
- setEffectSuspended_l(types[i], false);
- }
- ALOGV("setEffectSuspendedAll_l() remove entry for %08x", mSuspendedEffects.keyAt(index));
- mSuspendedEffects.removeItem((int)kKeyForSuspendAll);
- }
- }
-}
-
-
-// The volume effect is used for automated tests only
-#ifndef OPENSL_ES_H_
-static const effect_uuid_t SL_IID_VOLUME_ = { 0x09e8ede0, 0xddde, 0x11db, 0xb4f6,
- { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } };
-const effect_uuid_t * const SL_IID_VOLUME = &SL_IID_VOLUME_;
-#endif //OPENSL_ES_H_
-
-bool AudioFlinger::EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc)
-{
- // auxiliary effects and visualizer are never suspended on output mix
- if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
- (((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) ||
- (memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) ||
- (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) {
- return false;
- }
- return true;
-}
-
-void AudioFlinger::EffectChain::getSuspendEligibleEffects(Vector< sp<AudioFlinger::EffectModule> > &effects)
-{
- effects.clear();
- for (size_t i = 0; i < mEffects.size(); i++) {
- if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
- effects.add(mEffects[i]);
- }
- }
-}
-
-sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectIfEnabled(
- const effect_uuid_t *type)
-{
- sp<EffectModule> effect = getEffectFromType_l(type);
- return effect != 0 && effect->isEnabled() ? effect : 0;
-}
-
-void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
- bool enabled)
-{
- ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
- if (enabled) {
- if (index < 0) {
- // if the effect is not suspend check if all effects are suspended
- index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
- if (index < 0) {
- return;
- }
- if (!isEffectEligibleForSuspend(effect->desc())) {
- return;
- }
- setEffectSuspended_l(&effect->desc().type, enabled);
- index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
- if (index < 0) {
- ALOGW("checkSuspendOnEffectEnabled() Fx should be suspended here!");
- return;
- }
- }
- ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x",
- effect->desc().type.timeLow);
- sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
- // if effect is requested to suspended but was not yet enabled, supend it now.
- if (desc->mEffect == 0) {
- desc->mEffect = effect;
- effect->setEnabled(false);
- effect->setSuspended(true);
- }
- } else {
- if (index < 0) {
- return;
+ fdprintf(fd, "FastMixer unable to create tee %s: \n", strerror(errno));
}
- ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x",
- effect->desc().type.timeLow);
- sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
- desc->mEffect.clear();
- effect->setSuspended(false);
}
}
-#undef LOG_TAG
-#define LOG_TAG "AudioFlinger"
-
// ----------------------------------------------------------------------------
status_t AudioFlinger::onTransact(
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 49e2b2c..46a8e0f 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -75,6 +75,11 @@ class FastMixer;
static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
+#define MAX_GAIN 4096.0f
+#define MAX_GAIN_INT 0x1000
+
+#define INCLUDING_FROM_AUDIOFLINGER_H
+
class AudioFlinger :
public BinderService<AudioFlinger>,
public BnAudioFlinger
@@ -92,8 +97,8 @@ public:
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
- IAudioFlinger::track_flags_t flags,
+ size_t frameCount,
+ IAudioFlinger::track_flags_t *flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
@@ -106,7 +111,7 @@ public:
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
IAudioFlinger::track_flags_t flags,
pid_t tid,
int *sessionId,
@@ -174,7 +179,7 @@ public:
virtual status_t setVoiceVolume(float volume);
- virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+ virtual status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
audio_io_handle_t output) const;
virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const;
@@ -207,8 +212,8 @@ public:
virtual audio_module_handle_t loadHwModule(const char *name);
- virtual int32_t getPrimaryOutputSamplingRate();
- virtual int32_t getPrimaryOutputFrameCount();
+ virtual uint32_t getPrimaryOutputSamplingRate();
+ virtual size_t getPrimaryOutputFrameCount();
virtual status_t onTransact(
uint32_t code,
@@ -269,19 +274,28 @@ private:
virtual ~AudioFlinger();
// call in any IAudioFlinger method that accesses mPrimaryHardwareDev
- status_t initCheck() const { return mPrimaryHardwareDev == NULL ? NO_INIT : NO_ERROR; }
+ status_t initCheck() const { return mPrimaryHardwareDev == NULL ?
+ NO_INIT : NO_ERROR; }
// RefBase
virtual void onFirstRef();
- AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module, audio_devices_t devices);
+ AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module,
+ audio_devices_t devices);
void purgeStaleEffects_l();
// standby delay for MIXER and DUPLICATING playback threads is read from property
// ro.audio.flinger_standbytime_ms or defaults to kDefaultStandbyTimeInNsecs
static nsecs_t mStandbyTimeInNsecs;
+ // incremented by 2 when screen state changes, bit 0 == 1 means "off"
+ // AudioFlinger::setParameters() updates, other threads read w/o lock
+ static uint32_t mScreenState;
+
// Internal dump utilities.
+ static const int kDumpLockRetries = 50;
+ static const int kDumpLockSleepUs = 20000;
+ static bool dumpTryLock(Mutex& mutex);
void dumpPermissionDenial(int fd, const Vector<String16>& args);
void dumpClients(int fd, const Vector<String16>& args);
void dumpInternals(int fd, const Vector<String16>& args);
@@ -346,409 +360,6 @@ private:
struct AudioStreamOut;
struct AudioStreamIn;
- class ThreadBase : public Thread {
- public:
-
- enum type_t {
- MIXER, // Thread class is MixerThread
- DIRECT, // Thread class is DirectOutputThread
- DUPLICATING, // Thread class is DuplicatingThread
- RECORD // Thread class is RecordThread
- };
-
- ThreadBase (const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
- audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
- virtual ~ThreadBase();
-
- void dumpBase(int fd, const Vector<String16>& args);
- void dumpEffectChains(int fd, const Vector<String16>& args);
-
- void clearPowerManager();
-
- // base for record and playback
- class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
-
- public:
- enum track_state {
- IDLE,
- TERMINATED,
- FLUSHED,
- STOPPED,
- // next 2 states are currently used for fast tracks only
- STOPPING_1, // waiting for first underrun
- STOPPING_2, // waiting for presentation complete
- RESUMING,
- ACTIVE,
- PAUSING,
- PAUSED
- };
-
- TrackBase(ThreadBase *thread,
- const sp<Client>& client,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId);
- virtual ~TrackBase();
-
- virtual status_t start(AudioSystem::sync_event_t event,
- int triggerSession) = 0;
- virtual void stop() = 0;
- sp<IMemory> getCblk() const { return mCblkMemory; }
- audio_track_cblk_t* cblk() const { return mCblk; }
- int sessionId() const { return mSessionId; }
- virtual status_t setSyncEvent(const sp<SyncEvent>& event);
-
- protected:
- TrackBase(const TrackBase&);
- TrackBase& operator = (const TrackBase&);
-
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
- virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
-
- // ExtendedAudioBufferProvider interface is only needed for Track,
- // but putting it in TrackBase avoids the complexity of virtual inheritance
- virtual size_t framesReady() const { return SIZE_MAX; }
-
- audio_format_t format() const {
- return mFormat;
- }
-
- int channelCount() const { return mChannelCount; }
-
- audio_channel_mask_t channelMask() const { return mChannelMask; }
-
- int sampleRate() const; // FIXME inline after cblk sr moved
-
- // Return a pointer to the start of a contiguous slice of the track buffer.
- // Parameter 'offset' is the requested start position, expressed in
- // monotonically increasing frame units relative to the track epoch.
- // Parameter 'frames' is the requested length, also in frame units.
- // Always returns non-NULL. It is the caller's responsibility to
- // verify that this will be successful; the result of calling this
- // function with invalid 'offset' or 'frames' is undefined.
- void* getBuffer(uint32_t offset, uint32_t frames) const;
-
- bool isStopped() const {
- return (mState == STOPPED || mState == FLUSHED);
- }
-
- // for fast tracks only
- bool isStopping() const {
- return mState == STOPPING_1 || mState == STOPPING_2;
- }
- bool isStopping_1() const {
- return mState == STOPPING_1;
- }
- bool isStopping_2() const {
- return mState == STOPPING_2;
- }
-
- bool isTerminated() const {
- return mState == TERMINATED;
- }
-
- bool step();
- void reset();
-
- const wp<ThreadBase> mThread;
- /*const*/ sp<Client> mClient; // see explanation at ~TrackBase() why not const
- sp<IMemory> mCblkMemory;
- audio_track_cblk_t* mCblk;
- void* mBuffer; // start of track buffer, typically in shared memory
- void* mBufferEnd; // &mBuffer[mFrameCount * frameSize], where frameSize
- // is based on mChannelCount and 16-bit samples
- uint32_t mFrameCount;
- // we don't really need a lock for these
- track_state mState;
- const uint32_t mSampleRate; // initial sample rate only; for tracks which
- // support dynamic rates, the current value is in control block
- const audio_format_t mFormat;
- bool mStepServerFailed;
- const int mSessionId;
- uint8_t mChannelCount;
- audio_channel_mask_t mChannelMask;
- Vector < sp<SyncEvent> >mSyncEvents;
- };
-
- enum {
- CFG_EVENT_IO,
- CFG_EVENT_PRIO
- };
-
- class ConfigEvent {
- public:
- ConfigEvent(int type) : mType(type) {}
- virtual ~ConfigEvent() {}
-
- int type() const { return mType; }
-
- virtual void dump(char *buffer, size_t size) = 0;
-
- private:
- const int mType;
- };
-
- class IoConfigEvent : public ConfigEvent {
- public:
- IoConfigEvent(int event, int param) :
- ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {}
- virtual ~IoConfigEvent() {}
-
- int event() const { return mEvent; }
- int param() const { return mParam; }
-
- virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam);
- }
-
- private:
- const int mEvent;
- const int mParam;
- };
-
- class PrioConfigEvent : public ConfigEvent {
- public:
- PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) :
- ConfigEvent(CFG_EVENT_PRIO), mPid(pid), mTid(tid), mPrio(prio) {}
- virtual ~PrioConfigEvent() {}
-
- pid_t pid() const { return mPid; }
- pid_t tid() const { return mTid; }
- int32_t prio() const { return mPrio; }
-
- virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio);
- }
-
- private:
- const pid_t mPid;
- const pid_t mTid;
- const int32_t mPrio;
- };
-
-
- class PMDeathRecipient : public IBinder::DeathRecipient {
- public:
- PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
- virtual ~PMDeathRecipient() {}
-
- // IBinder::DeathRecipient
- virtual void binderDied(const wp<IBinder>& who);
-
- private:
- PMDeathRecipient(const PMDeathRecipient&);
- PMDeathRecipient& operator = (const PMDeathRecipient&);
-
- wp<ThreadBase> mThread;
- };
-
- virtual status_t initCheck() const = 0;
-
- // static externally-visible
- type_t type() const { return mType; }
- audio_io_handle_t id() const { return mId;}
-
- // dynamic externally-visible
- uint32_t sampleRate() const { return mSampleRate; }
- int channelCount() const { return mChannelCount; }
- audio_channel_mask_t channelMask() const { return mChannelMask; }
- audio_format_t format() const { return mFormat; }
- // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
- // and returns the normal mix buffer's frame count.
- size_t frameCount() const { return mNormalFrameCount; }
- // Return's the HAL's frame count i.e. fast mixer buffer size.
- size_t frameCountHAL() const { return mFrameCount; }
-
- // Should be "virtual status_t requestExitAndWait()" and override same
- // method in Thread, but Thread::requestExitAndWait() is not yet virtual.
- void exit();
- virtual bool checkForNewParameters_l() = 0;
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys) = 0;
- virtual void audioConfigChanged_l(int event, int param = 0) = 0;
- void sendIoConfigEvent(int event, int param = 0);
- void sendIoConfigEvent_l(int event, int param = 0);
- void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
- void processConfigEvents();
-
- // see note at declaration of mStandby, mOutDevice and mInDevice
- bool standby() const { return mStandby; }
- audio_devices_t outDevice() const { return mOutDevice; }
- audio_devices_t inDevice() const { return mInDevice; }
-
- virtual audio_stream_t* stream() const = 0;
-
- sp<EffectHandle> createEffect_l(
- const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
- int32_t priority,
- int sessionId,
- effect_descriptor_t *desc,
- int *enabled,
- status_t *status);
- void disconnectEffect(const sp< EffectModule>& effect,
- EffectHandle *handle,
- bool unpinIfLast);
-
- // return values for hasAudioSession (bit field)
- enum effect_state {
- EFFECT_SESSION = 0x1, // the audio session corresponds to at least one
- // effect
- TRACK_SESSION = 0x2 // the audio session corresponds to at least one
- // track
- };
-
- // get effect chain corresponding to session Id.
- sp<EffectChain> getEffectChain(int sessionId);
- // same as getEffectChain() but must be called with ThreadBase mutex locked
- sp<EffectChain> getEffectChain_l(int sessionId) const;
- // add an effect chain to the chain list (mEffectChains)
- virtual status_t addEffectChain_l(const sp<EffectChain>& chain) = 0;
- // remove an effect chain from the chain list (mEffectChains)
- virtual size_t removeEffectChain_l(const sp<EffectChain>& chain) = 0;
- // lock all effect chains Mutexes. Must be called before releasing the
- // ThreadBase mutex before processing the mixer and effects. This guarantees the
- // integrity of the chains during the process.
- // Also sets the parameter 'effectChains' to current value of mEffectChains.
- void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
- // unlock effect chains after process
- void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
- // set audio mode to all effect chains
- void setMode(audio_mode_t mode);
- // get effect module with corresponding ID on specified audio session
- sp<AudioFlinger::EffectModule> getEffect(int sessionId, int effectId);
- sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
- // add and effect module. Also creates the effect chain is none exists for
- // the effects audio session
- status_t addEffect_l(const sp< EffectModule>& effect);
- // remove and effect module. Also removes the effect chain is this was the last
- // effect
- void removeEffect_l(const sp< EffectModule>& effect);
- // detach all tracks connected to an auxiliary effect
- virtual void detachAuxEffect_l(int effectId) {}
- // returns either EFFECT_SESSION if effects on this audio session exist in one
- // chain, or TRACK_SESSION if tracks on this audio session exist, or both
- virtual uint32_t hasAudioSession(int sessionId) const = 0;
- // the value returned by default implementation is not important as the
- // strategy is only meaningful for PlaybackThread which implements this method
- virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; }
-
- // suspend or restore effect according to the type of effect passed. a NULL
- // type pointer means suspend all effects in the session
- void setEffectSuspended(const effect_uuid_t *type,
- bool suspend,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX);
- // check if some effects must be suspended/restored when an effect is enabled
- // or disabled
- void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
- bool enabled,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX);
- void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
- bool enabled,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX);
-
- virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0;
- virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0;
-
-
- mutable Mutex mLock;
-
- protected:
-
- // entry describing an effect being suspended in mSuspendedSessions keyed vector
- class SuspendedSessionDesc : public RefBase {
- public:
- SuspendedSessionDesc() : mRefCount(0) {}
-
- int mRefCount; // number of active suspend requests
- effect_uuid_t mType; // effect type UUID
- };
-
- void acquireWakeLock();
- void acquireWakeLock_l();
- void releaseWakeLock();
- void releaseWakeLock_l();
- void setEffectSuspended_l(const effect_uuid_t *type,
- bool suspend,
- int sessionId);
- // updated mSuspendedSessions when an effect suspended or restored
- void updateSuspendedSessions_l(const effect_uuid_t *type,
- bool suspend,
- int sessionId);
- // check if some effects must be suspended when an effect chain is added
- void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
-
- virtual void preExit() { }
-
- friend class AudioFlinger; // for mEffectChains
-
- const type_t mType;
-
- // Used by parameters, config events, addTrack_l, exit
- Condition mWaitWorkCV;
-
- const sp<AudioFlinger> mAudioFlinger;
- uint32_t mSampleRate;
- size_t mFrameCount; // output HAL, direct output, record
- size_t mNormalFrameCount; // normal mixer and effects
- audio_channel_mask_t mChannelMask;
- uint16_t mChannelCount;
- size_t mFrameSize;
- audio_format_t mFormat;
-
- // Parameter sequence by client: binder thread calling setParameters():
- // 1. Lock mLock
- // 2. Append to mNewParameters
- // 3. mWaitWorkCV.signal
- // 4. mParamCond.waitRelative with timeout
- // 5. read mParamStatus
- // 6. mWaitWorkCV.signal
- // 7. Unlock
- //
- // Parameter sequence by server: threadLoop calling checkForNewParameters_l():
- // 1. Lock mLock
- // 2. If there is an entry in mNewParameters proceed ...
- // 2. Read first entry in mNewParameters
- // 3. Process
- // 4. Remove first entry from mNewParameters
- // 5. Set mParamStatus
- // 6. mParamCond.signal
- // 7. mWaitWorkCV.wait with timeout (this is to avoid overwriting mParamStatus)
- // 8. Unlock
- Condition mParamCond;
- Vector<String8> mNewParameters;
- status_t mParamStatus;
-
- Vector<ConfigEvent *> mConfigEvents;
-
- // These fields are written and read by thread itself without lock or barrier,
- // and read by other threads without lock or barrier via standby() , outDevice()
- // and inDevice().
- // Because of the absence of a lock or barrier, any other thread that reads
- // these fields must use the information in isolation, or be prepared to deal
- // with possibility that it might be inconsistent with other information.
- bool mStandby; // Whether thread is currently in standby.
- audio_devices_t mOutDevice; // output device
- audio_devices_t mInDevice; // input device
- audio_source_t mAudioSource; // (see audio.h, audio_source_t)
-
- const audio_io_handle_t mId;
- Vector< sp<EffectChain> > mEffectChains;
-
- static const int kNameLength = 16; // prctl(PR_SET_NAME) limit
- char mName[kNameLength];
- sp<IPowerManager> mPowerManager;
- sp<IBinder> mWakeLockToken;
- const sp<PMDeathRecipient> mDeathRecipient;
- // list of suspended effects per session and per type. The first vector is
- // keyed by session ID, the second by type UUID timeLow field
- KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > > mSuspendedSessions;
- };
-
struct stream_type_t {
stream_type_t()
: volume(1.0f),
@@ -760,644 +371,10 @@ private:
};
// --- PlaybackThread ---
- class PlaybackThread : public ThreadBase {
- public:
-
- enum mixer_state {
- MIXER_IDLE, // no active tracks
- MIXER_TRACKS_ENABLED, // at least one active track, but no track has any data ready
- MIXER_TRACKS_READY // at least one active track, and at least one track has data
- // standby mode does not have an enum value
- // suspend by audio policy manager is orthogonal to mixer state
- };
-
- // playback track
- class Track : public TrackBase, public VolumeProvider {
- public:
- Track( PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- IAudioFlinger::track_flags_t flags);
- virtual ~Track();
-
- static void appendDumpHeader(String8& result);
- void dump(char* buffer, size_t size);
- virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
- int triggerSession = 0);
- virtual void stop();
- void pause();
-
- void flush();
- void destroy();
- void mute(bool);
- int name() const { return mName; }
-
- audio_stream_type_t streamType() const {
- return mStreamType;
- }
- status_t attachAuxEffect(int EffectId);
- void setAuxBuffer(int EffectId, int32_t *buffer);
- int32_t *auxBuffer() const { return mAuxBuffer; }
- void setMainBuffer(int16_t *buffer) { mMainBuffer = buffer; }
- int16_t *mainBuffer() const { return mMainBuffer; }
- int auxEffectId() const { return mAuxEffectId; }
-
- // implement FastMixerState::VolumeProvider interface
- virtual uint32_t getVolumeLR();
- virtual status_t setSyncEvent(const sp<SyncEvent>& event);
-
- protected:
- // for numerous
- friend class PlaybackThread;
- friend class MixerThread;
- friend class DirectOutputThread;
-
- Track(const Track&);
- Track& operator = (const Track&);
-
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
- // releaseBuffer() not overridden
-
- virtual size_t framesReady() const;
-
- bool isMuted() const { return mMute; }
- bool isPausing() const {
- return mState == PAUSING;
- }
- bool isPaused() const {
- return mState == PAUSED;
- }
- bool isResuming() const {
- return mState == RESUMING;
- }
- bool isReady() const;
- void setPaused() { mState = PAUSED; }
- void reset();
-
- bool isOutputTrack() const {
- return (mStreamType == AUDIO_STREAM_CNT);
- }
-
- sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
-
- bool presentationComplete(size_t framesWritten, size_t audioHalFrames);
-
- public:
- void triggerEvents(AudioSystem::sync_event_t type);
- virtual bool isTimedTrack() const { return false; }
- bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
-
- protected:
-
- // written by Track::mute() called by binder thread(s), without a mutex or barrier.
- // read by Track::isMuted() called by playback thread, also without a mutex or barrier.
- // The lack of mutex or barrier is safe because the mute status is only used by itself.
- bool mMute;
-
- // FILLED state is used for suppressing volume ramp at begin of playing
- enum {FS_INVALID, FS_FILLING, FS_FILLED, FS_ACTIVE};
- mutable uint8_t mFillingUpStatus;
- int8_t mRetryCount;
- const sp<IMemory> mSharedBuffer;
- bool mResetDone;
- const audio_stream_type_t mStreamType;
- int mName; // track name on the normal mixer,
- // allocated statically at track creation time,
- // and is even allocated (though unused) for fast tracks
- // FIXME don't allocate track name for fast tracks
- int16_t *mMainBuffer;
- int32_t *mAuxBuffer;
- int mAuxEffectId;
- bool mHasVolumeController;
- size_t mPresentationCompleteFrames; // number of frames written to the audio HAL
- // when this track will be fully rendered
- private:
- IAudioFlinger::track_flags_t mFlags;
-
- // The following fields are only for fast tracks, and should be in a subclass
- int mFastIndex; // index within FastMixerState::mFastTracks[];
- // either mFastIndex == -1 if not isFastTrack()
- // or 0 < mFastIndex < FastMixerState::kMaxFast because
- // index 0 is reserved for normal mixer's submix;
- // index is allocated statically at track creation time
- // but the slot is only used if track is active
- FastTrackUnderruns mObservedUnderruns; // Most recently observed value of
- // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns
- uint32_t mUnderrunCount; // Counter of total number of underruns, never reset
- volatile float mCachedVolume; // combined master volume and stream type volume;
- // 'volatile' means accessed without lock or
- // barrier, but is read/written atomically
- }; // end of Track
-
- class TimedTrack : public Track {
- public:
- static sp<TimedTrack> create(PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId);
- virtual ~TimedTrack();
-
- class TimedBuffer {
- public:
- TimedBuffer();
- TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
- const sp<IMemory>& buffer() const { return mBuffer; }
- int64_t pts() const { return mPTS; }
- uint32_t position() const { return mPosition; }
- void setPosition(uint32_t pos) { mPosition = pos; }
- private:
- sp<IMemory> mBuffer;
- int64_t mPTS;
- uint32_t mPosition;
- };
-
- // Mixer facing methods.
- virtual bool isTimedTrack() const { return true; }
- virtual size_t framesReady() const;
-
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts);
- virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
-
- // Client/App facing methods.
- status_t allocateTimedBuffer(size_t size,
- sp<IMemory>* buffer);
- status_t queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts);
- status_t setMediaTimeTransform(const LinearTransform& xform,
- TimedAudioTrack::TargetTimeline target);
-
- private:
- TimedTrack(PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId);
-
- void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
- void timedYieldSilence_l(uint32_t numFrames,
- AudioBufferProvider::Buffer* buffer);
- void trimTimedBufferQueue_l();
- void trimTimedBufferQueueHead_l(const char* logTag);
- void updateFramesPendingAfterTrim_l(const TimedBuffer& buf,
- const char* logTag);
-
- uint64_t mLocalTimeFreq;
- LinearTransform mLocalTimeToSampleTransform;
- LinearTransform mMediaTimeToSampleTransform;
- sp<MemoryDealer> mTimedMemoryDealer;
-
- Vector<TimedBuffer> mTimedBufferQueue;
- bool mQueueHeadInFlight;
- bool mTrimQueueHeadOnRelease;
- uint32_t mFramesPendingInQueue;
-
- uint8_t* mTimedSilenceBuffer;
- uint32_t mTimedSilenceBufferSize;
- mutable Mutex mTimedBufferQueueLock;
- bool mTimedAudioOutputOnTime;
- CCHelper mCCHelper;
-
- Mutex mMediaTimeTransformLock;
- LinearTransform mMediaTimeTransform;
- bool mMediaTimeTransformValid;
- TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
- };
-
-
- // playback track
- class OutputTrack : public Track {
- public:
-
- class Buffer: public AudioBufferProvider::Buffer {
- public:
- int16_t *mBuffer;
- };
-
- OutputTrack(PlaybackThread *thread,
- DuplicatingThread *sourceThread,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount);
- virtual ~OutputTrack();
-
- virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
- int triggerSession = 0);
- virtual void stop();
- bool write(int16_t* data, uint32_t frames);
- bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
- bool isActive() const { return mActive; }
- const wp<ThreadBase>& thread() const { return mThread; }
-
- private:
-
- enum {
- NO_MORE_BUFFERS = 0x80000001, // same in AudioTrack.h, ok to be different value
- };
-
- status_t obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs);
- void clearBufferQueue();
-
- // Maximum number of pending buffers allocated by OutputTrack::write()
- static const uint8_t kMaxOverFlowBuffers = 10;
-
- Vector < Buffer* > mBufferQueue;
- AudioBufferProvider::Buffer mOutBuffer;
- bool mActive;
- DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
- }; // end of OutputTrack
-
- PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, type_t type);
- virtual ~PlaybackThread();
-
- void dump(int fd, const Vector<String16>& args);
-
- // Thread virtuals
- virtual status_t readyToRun();
- virtual bool threadLoop();
-
- // RefBase
- virtual void onFirstRef();
-
-protected:
- // Code snippets that were lifted up out of threadLoop()
- virtual void threadLoop_mix() = 0;
- virtual void threadLoop_sleepTime() = 0;
- virtual void threadLoop_write();
- virtual void threadLoop_standby();
- virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
-
- // prepareTracks_l reads and writes mActiveTracks, and returns
- // the pending set of tracks to remove via Vector 'tracksToRemove'. The caller
- // is responsible for clearing or destroying this Vector later on, when it
- // is safe to do so. That will drop the final ref count and destroy the tracks.
- virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
-
- // ThreadBase virtuals
- virtual void preExit();
-
-public:
-
- virtual status_t initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
-
- // return estimated latency in milliseconds, as reported by HAL
- uint32_t latency() const;
- // same, but lock must already be held
- uint32_t latency_l() const;
-
- void setMasterVolume(float value);
- void setMasterMute(bool muted);
-
- void setStreamVolume(audio_stream_type_t stream, float value);
- void setStreamMute(audio_stream_type_t stream, bool muted);
-
- float streamVolume(audio_stream_type_t stream) const;
-
- sp<Track> createTrack_l(
- const sp<AudioFlinger::Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- IAudioFlinger::track_flags_t flags,
- pid_t tid,
- status_t *status);
-
- AudioStreamOut* getOutput() const;
- AudioStreamOut* clearOutput();
- virtual audio_stream_t* stream() const;
-
- // a very large number of suspend() will eventually wraparound, but unlikely
- void suspend() { (void) android_atomic_inc(&mSuspended); }
- void restore()
- {
- // if restore() is done without suspend(), get back into
- // range so that the next suspend() will operate correctly
- if (android_atomic_dec(&mSuspended) <= 0) {
- android_atomic_release_store(0, &mSuspended);
- }
- }
- bool isSuspended() const
- { return android_atomic_acquire_load(&mSuspended) > 0; }
-
- virtual String8 getParameters(const String8& keys);
- virtual void audioConfigChanged_l(int event, int param = 0);
- status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
- int16_t *mixBuffer() const { return mMixBuffer; };
-
- virtual void detachAuxEffect_l(int effectId);
- status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track,
- int EffectId);
- status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track,
- int EffectId);
-
- virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
- virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
- virtual uint32_t hasAudioSession(int sessionId) const;
- virtual uint32_t getStrategyForSession_l(int sessionId);
-
-
- virtual status_t setSyncEvent(const sp<SyncEvent>& event);
- virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
- void invalidateTracks(audio_stream_type_t streamType);
-
-
- protected:
- int16_t* mMixBuffer;
-
- // suspend count, > 0 means suspended. While suspended, the thread continues to pull from
- // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle
- // concurrent use of both of them, so Audio Policy Service suspends one of the threads to
- // workaround that restriction.
- // 'volatile' means accessed via atomic operations and no lock.
- volatile int32_t mSuspended;
-
- int mBytesWritten;
- private:
- // mMasterMute is in both PlaybackThread and in AudioFlinger. When a
- // PlaybackThread needs to find out if master-muted, it checks it's local
- // copy rather than the one in AudioFlinger. This optimization saves a lock.
- bool mMasterMute;
- void setMasterMute_l(bool muted) { mMasterMute = muted; }
- protected:
- SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<>
-
- // Allocate a track name for a given channel mask.
- // Returns name >= 0 if successful, -1 on failure.
- virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId) = 0;
- virtual void deleteTrackName_l(int name) = 0;
-
- // Time to sleep between cycles when:
- virtual uint32_t activeSleepTimeUs() const; // mixer state MIXER_TRACKS_ENABLED
- virtual uint32_t idleSleepTimeUs() const = 0; // mixer state MIXER_IDLE
- virtual uint32_t suspendSleepTimeUs() const = 0; // audio policy manager suspended us
- // No sleep when mixer state == MIXER_TRACKS_READY; relies on audio HAL stream->write()
- // No sleep in standby mode; waits on a condition
-
- // Code snippets that are temporarily lifted up out of threadLoop() until the merge
- void checkSilentMode_l();
-
- // Non-trivial for DUPLICATING only
- virtual void saveOutputTracks() { }
- virtual void clearOutputTracks() { }
-
- // Cache various calculated values, at threadLoop() entry and after a parameter change
- virtual void cacheParameters_l();
-
- virtual uint32_t correctLatency(uint32_t latency) const;
-
- private:
-
- friend class AudioFlinger; // for numerous
-
- PlaybackThread(const Client&);
- PlaybackThread& operator = (const PlaybackThread&);
-
- status_t addTrack_l(const sp<Track>& track);
- void destroyTrack_l(const sp<Track>& track);
- void removeTrack_l(const sp<Track>& track);
-
- void readOutputParameters();
-
- virtual void dumpInternals(int fd, const Vector<String16>& args);
- void dumpTracks(int fd, const Vector<String16>& args);
-
- SortedVector< sp<Track> > mTracks;
- // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by DuplicatingThread
- stream_type_t mStreamTypes[AUDIO_STREAM_CNT + 1];
- AudioStreamOut *mOutput;
-
- float mMasterVolume;
- nsecs_t mLastWriteTime;
- int mNumWrites;
- int mNumDelayedWrites;
- bool mInWrite;
-
- // FIXME rename these former local variables of threadLoop to standard "m" names
- nsecs_t standbyTime;
- size_t mixBufferSize;
-
- // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l()
- uint32_t activeSleepTime;
- uint32_t idleSleepTime;
-
- uint32_t sleepTime;
-
- // mixer status returned by prepareTracks_l()
- mixer_state mMixerStatus; // current cycle
- // previous cycle when in prepareTracks_l()
- mixer_state mMixerStatusIgnoringFastTracks;
- // FIXME or a separate ready state per track
-
- // FIXME move these declarations into the specific sub-class that needs them
- // MIXER only
- uint32_t sleepTimeShift;
-
- // same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value
- nsecs_t standbyDelay;
-
- // MIXER only
- nsecs_t maxPeriod;
-
- // DUPLICATING only
- uint32_t writeFrames;
-
- private:
- // The HAL output sink is treated as non-blocking, but current implementation is blocking
- sp<NBAIO_Sink> mOutputSink;
- // If a fast mixer is present, the blocking pipe sink, otherwise clear
- sp<NBAIO_Sink> mPipeSink;
- // The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink
- sp<NBAIO_Sink> mNormalSink;
- // For dumpsys
- sp<NBAIO_Sink> mTeeSink;
- sp<NBAIO_Source> mTeeSource;
- uint32_t mScreenState; // cached copy of gScreenState
- public:
- virtual bool hasFastMixer() const = 0;
- virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const
- { FastTrackUnderruns dummy; return dummy; }
-
- protected:
- // accessed by both binder threads and within threadLoop(), lock on mutex needed
- unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available
-
- };
-
- class MixerThread : public PlaybackThread {
- public:
- MixerThread (const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output,
- audio_io_handle_t id,
- audio_devices_t device,
- type_t type = MIXER);
- virtual ~MixerThread();
-
- // Thread virtuals
-
- virtual bool checkForNewParameters_l();
- virtual void dumpInternals(int fd, const Vector<String16>& args);
-
- protected:
- virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
- virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId);
- virtual void deleteTrackName_l(int name);
- virtual uint32_t idleSleepTimeUs() const;
- virtual uint32_t suspendSleepTimeUs() const;
- virtual void cacheParameters_l();
-
- // threadLoop snippets
- virtual void threadLoop_write();
- virtual void threadLoop_standby();
- virtual void threadLoop_mix();
- virtual void threadLoop_sleepTime();
- virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
- virtual uint32_t correctLatency(uint32_t latency) const;
-
- AudioMixer* mAudioMixer; // normal mixer
- private:
- // one-time initialization, no locks required
- FastMixer* mFastMixer; // non-NULL if there is also a fast mixer
- sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread
-
- // contents are not guaranteed to be consistent, no locks required
- FastMixerDumpState mFastMixerDumpState;
-#ifdef STATE_QUEUE_DUMP
- StateQueueObserverDump mStateQueueObserverDump;
- StateQueueMutatorDump mStateQueueMutatorDump;
-#endif
- AudioWatchdogDump mAudioWatchdogDump;
-
- // accessible only within the threadLoop(), no locks required
- // mFastMixer->sq() // for mutating and pushing state
- int32_t mFastMixerFutex; // for cold idle
-
- public:
- virtual bool hasFastMixer() const { return mFastMixer != NULL; }
- virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const {
- ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks);
- return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
- }
- };
-
- class DirectOutputThread : public PlaybackThread {
- public:
-
- DirectOutputThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device);
- virtual ~DirectOutputThread();
-
- // Thread virtuals
-
- virtual bool checkForNewParameters_l();
-
- protected:
- virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId);
- virtual void deleteTrackName_l(int name);
- virtual uint32_t activeSleepTimeUs() const;
- virtual uint32_t idleSleepTimeUs() const;
- virtual uint32_t suspendSleepTimeUs() const;
- virtual void cacheParameters_l();
-
- // threadLoop snippets
- virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
- virtual void threadLoop_mix();
- virtual void threadLoop_sleepTime();
-
- // volumes last sent to audio HAL with stream->set_volume()
- float mLeftVolFloat;
- float mRightVolFloat;
-
-private:
- // prepareTracks_l() tells threadLoop_mix() the name of the single active track
- sp<Track> mActiveTrack;
- public:
- virtual bool hasFastMixer() const { return false; }
- };
-
- class DuplicatingThread : public MixerThread {
- public:
- DuplicatingThread (const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
- audio_io_handle_t id);
- virtual ~DuplicatingThread();
-
- // Thread virtuals
- void addOutputTrack(MixerThread* thread);
- void removeOutputTrack(MixerThread* thread);
- uint32_t waitTimeMs() const { return mWaitTimeMs; }
- protected:
- virtual uint32_t activeSleepTimeUs() const;
-
- private:
- bool outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
- protected:
- // threadLoop snippets
- virtual void threadLoop_mix();
- virtual void threadLoop_sleepTime();
- virtual void threadLoop_write();
- virtual void threadLoop_standby();
- virtual void cacheParameters_l();
-
- private:
- // called from threadLoop, addOutputTrack, removeOutputTrack
- virtual void updateWaitTime_l();
- protected:
- virtual void saveOutputTracks();
- virtual void clearOutputTracks();
- private:
-
- uint32_t mWaitTimeMs;
- SortedVector < sp<OutputTrack> > outputTracks;
- SortedVector < sp<OutputTrack> > mOutputTracks;
- public:
- virtual bool hasFastMixer() const { return false; }
- };
- PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
- MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
- RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
- // no range check, AudioFlinger::mLock held
- bool streamMute_l(audio_stream_type_t stream) const
- { return mStreamTypes[stream].mute; }
- // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
- float streamVolume_l(audio_stream_type_t stream) const
- { return mStreamTypes[stream].volume; }
- void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2);
+#include "Threads.h"
- // allocate an audio_io_handle_t, session ID, or effect ID
- uint32_t nextUniqueId();
-
- status_t moveEffectChain_l(int sessionId,
- PlaybackThread *srcThread,
- PlaybackThread *dstThread,
- bool reRegister);
- // return thread associated with primary hardware device, or NULL
- PlaybackThread *primaryPlaybackThread_l() const;
- audio_devices_t primaryOutputDevice_l() const;
-
- sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId);
+#include "Effects.h"
// server side of the client's IAudioTrack
class TrackHandle : public android::BnAudioTrack {
@@ -1423,157 +400,6 @@ private:
const sp<PlaybackThread::Track> mTrack;
};
- void removeClient_l(pid_t pid);
- void removeNotificationClient(pid_t pid);
-
-
- // record thread
- class RecordThread : public ThreadBase, public AudioBufferProvider
- // derives from AudioBufferProvider interface for use by resampler
- {
- public:
-
- // record track
- class RecordTrack : public TrackBase {
- public:
- RecordTrack(RecordThread *thread,
- const sp<Client>& client,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- int sessionId);
- virtual ~RecordTrack();
-
- virtual status_t start(AudioSystem::sync_event_t event, int triggerSession);
- virtual void stop();
-
- void destroy();
-
- // clear the buffer overflow flag
- void clearOverflow() { mOverflow = false; }
- // set the buffer overflow flag and return previous value
- bool setOverflow() { bool tmp = mOverflow; mOverflow = true; return tmp; }
-
- static void appendDumpHeader(String8& result);
- void dump(char* buffer, size_t size);
-
- private:
- friend class AudioFlinger; // for mState
-
- RecordTrack(const RecordTrack&);
- RecordTrack& operator = (const RecordTrack&);
-
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
- // releaseBuffer() not overridden
-
- bool mOverflow; // overflow on most recent attempt to fill client buffer
- };
-
- RecordThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamIn *input,
- uint32_t sampleRate,
- audio_channel_mask_t channelMask,
- audio_io_handle_t id,
- audio_devices_t device);
- virtual ~RecordThread();
-
- // no addTrack_l ?
- void destroyTrack_l(const sp<RecordTrack>& track);
- void removeTrack_l(const sp<RecordTrack>& track);
-
- void dumpInternals(int fd, const Vector<String16>& args);
- void dumpTracks(int fd, const Vector<String16>& args);
-
- // Thread virtuals
- virtual bool threadLoop();
- virtual status_t readyToRun();
-
- // RefBase
- virtual void onFirstRef();
-
- virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
- sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
- const sp<AudioFlinger::Client>& client,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- int frameCount,
- int sessionId,
- IAudioFlinger::track_flags_t flags,
- pid_t tid,
- status_t *status);
-
- status_t start(RecordTrack* recordTrack,
- AudioSystem::sync_event_t event,
- int triggerSession);
-
- // ask the thread to stop the specified track, and
- // return true if the caller should then do it's part of the stopping process
- bool stop_l(RecordTrack* recordTrack);
-
- void dump(int fd, const Vector<String16>& args);
- AudioStreamIn* clearInput();
- virtual audio_stream_t* stream() const;
-
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
- virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
-
- virtual bool checkForNewParameters_l();
- virtual String8 getParameters(const String8& keys);
- virtual void audioConfigChanged_l(int event, int param = 0);
- void readInputParameters();
- virtual unsigned int getInputFramesLost();
-
- virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
- virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
- virtual uint32_t hasAudioSession(int sessionId) const;
-
- // Return the set of unique session IDs across all tracks.
- // The keys are the session IDs, and the associated values are meaningless.
- // FIXME replace by Set [and implement Bag/Multiset for other uses].
- KeyedVector<int, bool> sessionIds() const;
-
- virtual status_t setSyncEvent(const sp<SyncEvent>& event);
- virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
-
- static void syncStartEventCallback(const wp<SyncEvent>& event);
- void handleSyncStartEvent(const sp<SyncEvent>& event);
-
- private:
- void clearSyncStartEvent();
-
- // Enter standby if not already in standby, and set mStandby flag
- void standby();
-
- // Call the HAL standby method unconditionally, and don't change mStandby flag
- void inputStandBy();
-
- AudioStreamIn *mInput;
- SortedVector < sp<RecordTrack> > mTracks;
- // mActiveTrack has dual roles: it indicates the current active track, and
- // is used together with mStartStopCond to indicate start()/stop() progress
- sp<RecordTrack> mActiveTrack;
- Condition mStartStopCond;
- AudioResampler *mResampler;
- int32_t *mRsmpOutBuffer;
- int16_t *mRsmpInBuffer;
- size_t mRsmpInIndex;
- size_t mInputBytes;
- const int mReqChannelCount;
- const uint32_t mReqSampleRate;
- ssize_t mBytesRead;
- // sync event triggering actual audio capture. Frames read before this event will
- // be dropped and therefore not read by the application.
- sp<SyncEvent> mSyncStartEvent;
- // number of captured frames to drop after the start sync event has been received.
- // when < 0, maximum frames to drop before starting capture even if sync event is
- // not received
- ssize_t mFramestoDrop;
- };
-
// server side of the client's IAudioRecord
class RecordHandle : public android::BnAudioRecord {
public:
@@ -1591,343 +417,33 @@ private:
void stop_nonvirtual();
};
- //--- Audio Effect Management
-
- // EffectModule and EffectChain classes both have their own mutex to protect
- // state changes or resource modifications. Always respect the following order
- // if multiple mutexes must be acquired to avoid cross deadlock:
- // AudioFlinger -> ThreadBase -> EffectChain -> EffectModule
-
- // The EffectModule class is a wrapper object controlling the effect engine implementation
- // in the effect library. It prevents concurrent calls to process() and command() functions
- // from different client threads. It keeps a list of EffectHandle objects corresponding
- // to all client applications using this effect and notifies applications of effect state,
- // control or parameter changes. It manages the activation state machine to send appropriate
- // reset, enable, disable commands to effect engine and provide volume
- // ramping when effects are activated/deactivated.
- // When controlling an auxiliary effect, the EffectModule also provides an input buffer used by
- // the attached track(s) to accumulate their auxiliary channel.
- class EffectModule: public RefBase {
- public:
- EffectModule(ThreadBase *thread,
- const wp<AudioFlinger::EffectChain>& chain,
- effect_descriptor_t *desc,
- int id,
- int sessionId);
- virtual ~EffectModule();
-
- enum effect_state {
- IDLE,
- RESTART,
- STARTING,
- ACTIVE,
- STOPPING,
- STOPPED,
- DESTROYED
- };
-
- int id() const { return mId; }
- void process();
- void updateState();
- status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData);
-
- void reset_l();
- status_t configure();
- status_t init();
- effect_state state() const {
- return mState;
- }
- uint32_t status() {
- return mStatus;
- }
- int sessionId() const {
- return mSessionId;
- }
- status_t setEnabled(bool enabled);
- status_t setEnabled_l(bool enabled);
- bool isEnabled() const;
- bool isProcessEnabled() const;
-
- void setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; }
- int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; }
- void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; }
- int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; }
- void setChain(const wp<EffectChain>& chain) { mChain = chain; }
- void setThread(const wp<ThreadBase>& thread) { mThread = thread; }
- const wp<ThreadBase>& thread() { return mThread; }
-
- status_t addHandle(EffectHandle *handle);
- size_t disconnect(EffectHandle *handle, bool unpinIfLast);
- size_t removeHandle(EffectHandle *handle);
-
- const effect_descriptor_t& desc() const { return mDescriptor; }
- wp<EffectChain>& chain() { return mChain; }
-
- status_t setDevice(audio_devices_t device);
- status_t setVolume(uint32_t *left, uint32_t *right, bool controller);
- status_t setMode(audio_mode_t mode);
- status_t setAudioSource(audio_source_t source);
- status_t start();
- status_t stop();
- void setSuspended(bool suspended);
- bool suspended() const;
-
- EffectHandle* controlHandle_l();
-
- bool isPinned() const { return mPinned; }
- void unPin() { mPinned = false; }
- bool purgeHandles();
- void lock() { mLock.lock(); }
- void unlock() { mLock.unlock(); }
-
- void dump(int fd, const Vector<String16>& args);
-
- protected:
- friend class AudioFlinger; // for mHandles
- bool mPinned;
-
- // Maximum time allocated to effect engines to complete the turn off sequence
- static const uint32_t MAX_DISABLE_TIME_MS = 10000;
-
- EffectModule(const EffectModule&);
- EffectModule& operator = (const EffectModule&);
-
- status_t start_l();
- status_t stop_l();
-
-mutable Mutex mLock; // mutex for process, commands and handles list protection
- wp<ThreadBase> mThread; // parent thread
- wp<EffectChain> mChain; // parent effect chain
- const int mId; // this instance unique ID
- const int mSessionId; // audio session ID
- const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
- effect_config_t mConfig; // input and output audio configuration
- effect_handle_t mEffectInterface; // Effect module C API
- status_t mStatus; // initialization status
- effect_state mState; // current activation state
- Vector<EffectHandle *> mHandles; // list of client handles
- // First handle in mHandles has highest priority and controls the effect module
- uint32_t mMaxDisableWaitCnt; // maximum grace period before forcing an effect off after
- // sending disable command.
- uint32_t mDisableWaitCnt; // current process() calls count during disable period.
- bool mSuspended; // effect is suspended: temporarily disabled by framework
- };
-
- // The EffectHandle class implements the IEffect interface. It provides resources
- // to receive parameter updates, keeps track of effect control
- // ownership and state and has a pointer to the EffectModule object it is controlling.
- // There is one EffectHandle object for each application controlling (or using)
- // an effect module.
- // The EffectHandle is obtained by calling AudioFlinger::createEffect().
- class EffectHandle: public android::BnEffect {
- public:
-
- EffectHandle(const sp<EffectModule>& effect,
- const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
- int32_t priority);
- virtual ~EffectHandle();
-
- // IEffect
- virtual status_t enable();
- virtual status_t disable();
- virtual status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData);
- virtual void disconnect();
- private:
- void disconnect(bool unpinIfLast);
- public:
- virtual sp<IMemory> getCblk() const { return mCblkMemory; }
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags);
-
-
- // Give or take control of effect module
- // - hasControl: true if control is given, false if removed
- // - signal: true client app should be signaled of change, false otherwise
- // - enabled: state of the effect when control is passed
- void setControl(bool hasControl, bool signal, bool enabled);
- void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData);
- void setEnabled(bool enabled);
- bool enabled() const { return mEnabled; }
-
- // Getters
- int id() const { return mEffect->id(); }
- int priority() const { return mPriority; }
- bool hasControl() const { return mHasControl; }
- sp<EffectModule> effect() const { return mEffect; }
- // destroyed_l() must be called with the associated EffectModule mLock held
- bool destroyed_l() const { return mDestroyed; }
-
- void dump(char* buffer, size_t size);
-
- protected:
- friend class AudioFlinger; // for mEffect, mHasControl, mEnabled
- EffectHandle(const EffectHandle&);
- EffectHandle& operator =(const EffectHandle&);
-
- sp<EffectModule> mEffect; // pointer to controlled EffectModule
- sp<IEffectClient> mEffectClient; // callback interface for client notifications
- /*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect()
- sp<IMemory> mCblkMemory; // shared memory for control block
- effect_param_cblk_t* mCblk; // control block for deferred parameter setting via shared memory
- uint8_t* mBuffer; // pointer to parameter area in shared memory
- int mPriority; // client application priority to control the effect
- bool mHasControl; // true if this handle is controlling the effect
- bool mEnabled; // cached enable state: needed when the effect is
- // restored after being suspended
- bool mDestroyed; // Set to true by destructor. Access with EffectModule
- // mLock held
- };
-
- // the EffectChain class represents a group of effects associated to one audio session.
- // There can be any number of EffectChain objects per output mixer thread (PlaybackThread).
- // The EffecChain with session ID 0 contains global effects applied to the output mix.
- // Effects in this chain can be insert or auxiliary. Effects in other chains (attached to tracks)
- // are insert only. The EffectChain maintains an ordered list of effect module, the order corresponding
- // in the effect process order. When attached to a track (session ID != 0), it also provide it's own
- // input buffer used by the track as accumulation buffer.
- class EffectChain: public RefBase {
- public:
- EffectChain(const wp<ThreadBase>& wThread, int sessionId);
- EffectChain(ThreadBase *thread, int sessionId);
- virtual ~EffectChain();
-
- // special key used for an entry in mSuspendedEffects keyed vector
- // corresponding to a suspend all request.
- static const int kKeyForSuspendAll = 0;
-
- // minimum duration during which we force calling effect process when last track on
- // a session is stopped or removed to allow effect tail to be rendered
- static const int kProcessTailDurationMs = 1000;
-
- void process_l();
-
- void lock() {
- mLock.lock();
- }
- void unlock() {
- mLock.unlock();
- }
-
- status_t addEffect_l(const sp<EffectModule>& handle);
- size_t removeEffect_l(const sp<EffectModule>& handle);
-
- int sessionId() const { return mSessionId; }
- void setSessionId(int sessionId) { mSessionId = sessionId; }
-
- sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
- sp<EffectModule> getEffectFromId_l(int id);
- sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type);
- bool setVolume_l(uint32_t *left, uint32_t *right);
- void setDevice_l(audio_devices_t device);
- void setMode_l(audio_mode_t mode);
- void setAudioSource_l(audio_source_t source);
-
- void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
- mInBuffer = buffer;
- mOwnInBuffer = ownsBuffer;
- }
- int16_t *inBuffer() const {
- return mInBuffer;
- }
- void setOutBuffer(int16_t *buffer) {
- mOutBuffer = buffer;
- }
- int16_t *outBuffer() const {
- return mOutBuffer;
- }
-
- void incTrackCnt() { android_atomic_inc(&mTrackCnt); }
- void decTrackCnt() { android_atomic_dec(&mTrackCnt); }
- int32_t trackCnt() const { return android_atomic_acquire_load(&mTrackCnt); }
-
- void incActiveTrackCnt() { android_atomic_inc(&mActiveTrackCnt);
- mTailBufferCount = mMaxTailBuffers; }
- void decActiveTrackCnt() { android_atomic_dec(&mActiveTrackCnt); }
- int32_t activeTrackCnt() const { return android_atomic_acquire_load(&mActiveTrackCnt); }
-
- uint32_t strategy() const { return mStrategy; }
- void setStrategy(uint32_t strategy)
- { mStrategy = strategy; }
-
- // suspend effect of the given type
- void setEffectSuspended_l(const effect_uuid_t *type,
- bool suspend);
- // suspend all eligible effects
- void setEffectSuspendedAll_l(bool suspend);
- // check if effects should be suspend or restored when a given effect is enable or disabled
- void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
- bool enabled);
-
- void clearInputBuffer();
+ PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
+ MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
+ RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
+ // no range check, AudioFlinger::mLock held
+ bool streamMute_l(audio_stream_type_t stream) const
+ { return mStreamTypes[stream].mute; }
+ // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
+ float streamVolume_l(audio_stream_type_t stream) const
+ { return mStreamTypes[stream].volume; }
+ void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2);
- void dump(int fd, const Vector<String16>& args);
+ // allocate an audio_io_handle_t, session ID, or effect ID
+ uint32_t nextUniqueId();
- protected:
- friend class AudioFlinger; // for mThread, mEffects
- EffectChain(const EffectChain&);
- EffectChain& operator =(const EffectChain&);
+ status_t moveEffectChain_l(int sessionId,
+ PlaybackThread *srcThread,
+ PlaybackThread *dstThread,
+ bool reRegister);
+ // return thread associated with primary hardware device, or NULL
+ PlaybackThread *primaryPlaybackThread_l() const;
+ audio_devices_t primaryOutputDevice_l() const;
- class SuspendedEffectDesc : public RefBase {
- public:
- SuspendedEffectDesc() : mRefCount(0) {}
+ sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId);
- int mRefCount;
- effect_uuid_t mType;
- wp<EffectModule> mEffect;
- };
- // get a list of effect modules to suspend when an effect of the type
- // passed is enabled.
- void getSuspendEligibleEffects(Vector< sp<EffectModule> > &effects);
-
- // get an effect module if it is currently enable
- sp<EffectModule> getEffectIfEnabled(const effect_uuid_t *type);
- // true if the effect whose descriptor is passed can be suspended
- // OEMs can modify the rules implemented in this method to exclude specific effect
- // types or implementations from the suspend/restore mechanism.
- bool isEffectEligibleForSuspend(const effect_descriptor_t& desc);
-
- void clearInputBuffer_l(sp<ThreadBase> thread);
-
- wp<ThreadBase> mThread; // parent mixer thread
- Mutex mLock; // mutex protecting effect list
- Vector< sp<EffectModule> > mEffects; // list of effect modules
- int mSessionId; // audio session ID
- int16_t *mInBuffer; // chain input buffer
- int16_t *mOutBuffer; // chain output buffer
-
- // 'volatile' here means these are accessed with atomic operations instead of mutex
- volatile int32_t mActiveTrackCnt; // number of active tracks connected
- volatile int32_t mTrackCnt; // number of tracks connected
-
- int32_t mTailBufferCount; // current effect tail buffer count
- int32_t mMaxTailBuffers; // maximum effect tail buffers
- bool mOwnInBuffer; // true if the chain owns its input buffer
- int mVolumeCtrlIdx; // index of insert effect having control over volume
- uint32_t mLeftVolume; // previous volume on left channel
- uint32_t mRightVolume; // previous volume on right channel
- uint32_t mNewLeftVolume; // new volume on left channel
- uint32_t mNewRightVolume; // new volume on right channel
- uint32_t mStrategy; // strategy for this effect chain
- // mSuspendedEffects lists all effects currently suspended in the chain.
- // Use effect type UUID timelow field as key. There is no real risk of identical
- // timeLow fields among effect type UUIDs.
- // Updated by updateSuspendedSessions_l() only.
- KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
- };
+ void removeClient_l(pid_t pid);
+ void removeNotificationClient(pid_t pid);
class AudioHwDevice {
public:
@@ -2064,8 +580,16 @@ private:
// for use from destructor
status_t closeOutput_nonvirtual(audio_io_handle_t output);
status_t closeInput_nonvirtual(audio_io_handle_t input);
+
+ // all record threads serially share a common tee sink, which is re-created on format change
+ sp<NBAIO_Sink> mRecordTeeSink;
+ sp<NBAIO_Source> mRecordTeeSource;
+
+public:
+ static void dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id = 0);
};
+#undef INCLUDING_FROM_AUDIOFLINGER_H
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index af169d5..b3ca877 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -106,8 +106,16 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr
ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u",
maxNumTracks, MAX_NUM_TRACKS);
+ // AudioMixer is not yet capable of more than 32 active track inputs
+ ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS);
+
+ // AudioMixer is not yet capable of multi-channel output beyond stereo
+ ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS);
+
LocalClock lc;
+ pthread_once(&sOnceControl, &sInitRoutine);
+
mState.enabledTracks= 0;
mState.needsChanged = 0;
mState.frameCount = frameCount;
@@ -121,8 +129,6 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr
// and mTrackNames is initially 0. However, leave it here until that's verified.
track_t* t = mState.tracks;
for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
- // FIXME redundant per track
- t->localTimeFreq = lc.getLocalFreq();
t->resampler = NULL;
t->downmixerBufferProvider = NULL;
t++;
@@ -192,7 +198,6 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId)
t->sessionId = sessionId;
// setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
t->bufferProvider = NULL;
- t->downmixerBufferProvider = NULL;
t->buffer.raw = NULL;
// no initialization needed
// t->buffer.frameCount
@@ -203,7 +208,7 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId)
// setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
t->mainBuffer = NULL;
t->auxBuffer = NULL;
- // see t->localTimeFreq in constructor above
+ t->downmixerBufferProvider = NULL;
status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask);
if (status == OK) {
@@ -556,7 +561,7 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
// the resampler sees the number of channels after the downmixer, if any
downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount,
devSampleRate, quality);
- resampler->setLocalTimeFreq(localTimeFreq);
+ resampler->setLocalTimeFreq(sLocalTimeFreq);
}
return true;
}
@@ -760,7 +765,8 @@ void AudioMixer::process__validate(state_t* state, int64_t pts)
}
-void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount,
+ int32_t* temp, int32_t* aux)
{
t->resampler->setSampleRate(t->sampleRate);
@@ -793,11 +799,13 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram
}
}
-void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp,
+ int32_t* aux)
{
}
-void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
int32_t vl = t->prevVolume[0];
int32_t vr = t->prevVolume[1];
@@ -839,7 +847,8 @@ void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, i
t->adjustVolumeRamp(aux != NULL);
}
-void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
const int16_t vl = t->volume[0];
const int16_t vr = t->volume[1];
@@ -867,7 +876,8 @@ void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32
}
}
-void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
const int16_t *in = static_cast<const int16_t *>(t->in);
@@ -957,7 +967,8 @@ void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount
t->in = in;
}
-void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
const int16_t *in = static_cast<int16_t const *>(t->in);
@@ -1142,7 +1153,8 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
while (outFrames) {
size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
if (inFrames) {
- t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux);
+ t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
+ state->resampleTemp, aux);
t.frameCount -= inFrames;
outFrames -= inFrames;
if (CC_UNLIKELY(aux != NULL)) {
@@ -1151,7 +1163,8 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
}
if (t.frameCount == 0 && outFrames) {
t.bufferProvider->releaseBuffer(&t.buffer);
- t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames);
+ t.buffer.frameCount = (state->frameCount - numFrames) -
+ (BLOCKSIZE - outFrames);
int64_t outputPTS = calculateOutputPTS(
t, pts, numFrames + (BLOCKSIZE - outFrames));
t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
@@ -1241,7 +1254,8 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
if (CC_UNLIKELY(aux != NULL)) {
aux += outFrames;
}
- t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux);
+ t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount,
+ state->resampleTemp, aux);
outFrames += t.buffer.frameCount;
t.bufferProvider->releaseBuffer(&t.buffer);
}
@@ -1281,7 +1295,8 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
// been enabled for mixing.
if (in == NULL || ((unsigned long)in & 3)) {
memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t));
- ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: buffer %p track %d, channels %d, needs %08x",
+ ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: "
+ "buffer %p track %d, channels %d, needs %08x",
in, i, t.channelCount, t.needs);
return;
}
@@ -1423,7 +1438,16 @@ int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
if (AudioBufferProvider::kInvalidPTS == basePTS)
return AudioBufferProvider::kInvalidPTS;
- return basePTS + ((outputFrameIndex * t.localTimeFreq) / t.sampleRate);
+ return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate);
+}
+
+/*static*/ uint64_t AudioMixer::sLocalTimeFreq;
+/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
+
+/*static*/ void AudioMixer::sInitRoutine()
+{
+ LocalClock lc;
+ sLocalTimeFreq = lc.getLocalFreq();
}
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 6333357..fd21fda 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -41,8 +41,15 @@ public:
/*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed
+
+ // This mixer has a hard-coded upper limit of 32 active track inputs.
+ // Adding support for > 32 tracks would require more than simply changing this value.
static const uint32_t MAX_NUM_TRACKS = 32;
// maximum number of channels supported by the mixer
+
+ // This mixer has a hard-coded upper limit of 2 channels for output.
+ // There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect.
+ // Adding support for > 2 channel output would require more than simply changing this value.
static const uint32_t MAX_NUM_CHANNELS = 2;
// maximum number of channels supported for the content
static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = 8;
@@ -139,7 +146,8 @@ private:
struct track_t;
class DownmixerBufferProvider;
- typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
+ typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
+ int32_t* aux);
static const int BLOCKSIZE = 16; // 4 cache lines
struct track_t {
@@ -188,12 +196,12 @@ private:
// 16-byte boundary
- uint64_t localTimeFreq;
-
DownmixerBufferProvider* downmixerBufferProvider; // 4 bytes
int32_t sessionId;
+ int32_t padding[2];
+
// 16-byte boundary
bool setResampler(uint32_t sampleRate, uint32_t devSampleRate);
@@ -254,12 +262,17 @@ private:
static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
- static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
- static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+ static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux);
+ static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux);
static void process__validate(state_t* state, int64_t pts);
static void process__nop(state_t* state, int64_t pts);
@@ -274,6 +287,10 @@ private:
static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
int outputFrameIndex);
+
+ static uint64_t sLocalTimeFreq;
+ static pthread_once_t sOnceControl;
+ static void sInitRoutine();
};
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 8b99bd2..b86d3ae 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -145,7 +145,7 @@ status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
return BAD_VALUE;
}
- ALOGV("setDeviceConnectionState() tid %d", gettid());
+ ALOGV("setDeviceConnectionState()");
Mutex::Autolock _l(mLock);
return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
state, device_address);
@@ -174,7 +174,7 @@ status_t AudioPolicyService::setPhoneState(audio_mode_t state)
return BAD_VALUE;
}
- ALOGV("setPhoneState() tid %d", gettid());
+ ALOGV("setPhoneState()");
// TODO: check if it is more appropriate to do it in platform specific policy manager
AudioSystem::setMode(state);
@@ -199,7 +199,7 @@ status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
return BAD_VALUE;
}
- ALOGV("setForceUse() tid %d", gettid());
+ ALOGV("setForceUse()");
Mutex::Autolock _l(mLock);
mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
return NO_ERROR;
@@ -225,9 +225,10 @@ audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
if (mpAudioPolicy == NULL) {
return 0;
}
- ALOGV("getOutput() tid %d", gettid());
+ ALOGV("getOutput()");
Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask, flags);
+ return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask,
+ flags);
}
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -237,7 +238,7 @@ status_t AudioPolicyService::startOutput(audio_io_handle_t output,
if (mpAudioPolicy == NULL) {
return NO_INIT;
}
- ALOGV("startOutput() tid %d", gettid());
+ ALOGV("startOutput()");
Mutex::Autolock _l(mLock);
return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
}
@@ -249,7 +250,7 @@ status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
if (mpAudioPolicy == NULL) {
return NO_INIT;
}
- ALOGV("stopOutput() tid %d", gettid());
+ ALOGV("stopOutput()");
Mutex::Autolock _l(mLock);
return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
}
@@ -259,7 +260,7 @@ void AudioPolicyService::releaseOutput(audio_io_handle_t output)
if (mpAudioPolicy == NULL) {
return;
}
- ALOGV("releaseOutput() tid %d", gettid());
+ ALOGV("releaseOutput()");
Mutex::Autolock _l(mLock);
mpAudioPolicy->release_output(mpAudioPolicy, output);
}
@@ -280,7 +281,7 @@ audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
Mutex::Autolock _l(mLock);
// the audio_in_acoustics_t parameter is ignored by get_input()
audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
- format, channelMask, (audio_in_acoustics_t) 0);
+ format, channelMask, (audio_in_acoustics_t) 0);
if (input == 0) {
return input;
@@ -533,7 +534,7 @@ status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
}
void AudioPolicyService::binderDied(const wp<IBinder>& who) {
- ALOGW("binderDied() %p, tid %d, calling pid %d", who.unsafe_get(), gettid(),
+ ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(),
IPCThreadState::self()->getCallingPid());
}
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 63f9549..92653c1 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -142,11 +142,11 @@ private:
status_t dumpInternals(int fd);
// Thread used for tone playback and to send audio config commands to audio flinger
- // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because startTone()
- // and stopTone() are normally called with mLock locked and requesting a tone start or stop will cause
- // calls to AudioPolicyService and an attempt to lock mLock.
- // For audio config commands, it is necessary because audio flinger requires that the calling process (user)
- // has permission to modify audio settings.
+ // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
+ // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
+ // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+ // For audio config commands, it is necessary because audio flinger requires that the calling
+ // process (user) has permission to modify audio settings.
class AudioCommandThread : public Thread {
class AudioCommand;
public:
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
new file mode 100644
index 0000000..74ba59e
--- /dev/null
+++ b/services/audioflinger/Effects.cpp
@@ -0,0 +1,1684 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <audio_effects/effect_visualizer.h>
+#include <audio_utils/primitives.h>
+#include <private/media/AudioEffectShared.h>
+#include <media/EffectsFactoryApi.h>
+
+#include "AudioFlinger.h"
+#include "ServiceUtilities.h"
+
+// ----------------------------------------------------------------------------
+
+// Note: the following macro is used for extremely verbose logging message. In
+// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
+// 0; but one side effect of this is to turn all LOGV's as well. Some messages
+// are so verbose that we want to suppress them even when we have ALOG_ASSERT
+// turned on. Do not uncomment the #def below unless you really know what you
+// are doing and want to see all of the extremely verbose messages.
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+// EffectModule implementation
+// ----------------------------------------------------------------------------
+
+#undef LOG_TAG
+#define LOG_TAG "AudioFlinger::EffectModule"
+
+AudioFlinger::EffectModule::EffectModule(ThreadBase *thread,
+ const wp<AudioFlinger::EffectChain>& chain,
+ effect_descriptor_t *desc,
+ int id,
+ int sessionId)
+ : mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX),
+ mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
+ mDescriptor(*desc),
+ // mConfig is set by configure() and not used before then
+ mEffectInterface(NULL),
+ mStatus(NO_INIT), mState(IDLE),
+ // mMaxDisableWaitCnt is set by configure() and not used before then
+ // mDisableWaitCnt is set by process() and updateState() and not used before then
+ mSuspended(false)
+{
+ ALOGV("Constructor %p", this);
+ int lStatus;
+
+ // create effect engine from effect factory
+ mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface);
+
+ if (mStatus != NO_ERROR) {
+ return;
+ }
+ lStatus = init();
+ if (lStatus < 0) {
+ mStatus = lStatus;
+ goto Error;
+ }
+
+ ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface);
+ return;
+Error:
+ EffectRelease(mEffectInterface);
+ mEffectInterface = NULL;
+ ALOGV("Constructor Error %d", mStatus);
+}
+
+AudioFlinger::EffectModule::~EffectModule()
+{
+ ALOGV("Destructor %p", this);
+ if (mEffectInterface != NULL) {
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ audio_stream_t *stream = thread->stream();
+ if (stream != NULL) {
+ stream->remove_audio_effect(stream, mEffectInterface);
+ }
+ }
+ }
+ // release effect engine
+ EffectRelease(mEffectInterface);
+ }
+}
+
+status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle)
+{
+ status_t status;
+
+ Mutex::Autolock _l(mLock);
+ int priority = handle->priority();
+ size_t size = mHandles.size();
+ EffectHandle *controlHandle = NULL;
+ size_t i;
+ for (i = 0; i < size; i++) {
+ EffectHandle *h = mHandles[i];
+ if (h == NULL || h->destroyed_l()) {
+ continue;
+ }
+ // first non destroyed handle is considered in control
+ if (controlHandle == NULL)
+ controlHandle = h;
+ if (h->priority() <= priority) {
+ break;
+ }
+ }
+ // if inserted in first place, move effect control from previous owner to this handle
+ if (i == 0) {
+ bool enabled = false;
+ if (controlHandle != NULL) {
+ enabled = controlHandle->enabled();
+ controlHandle->setControl(false/*hasControl*/, true /*signal*/, enabled /*enabled*/);
+ }
+ handle->setControl(true /*hasControl*/, false /*signal*/, enabled /*enabled*/);
+ status = NO_ERROR;
+ } else {
+ status = ALREADY_EXISTS;
+ }
+ ALOGV("addHandle() %p added handle %p in position %d", this, handle, i);
+ mHandles.insertAt(handle, i);
+ return status;
+}
+
+size_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle)
+{
+ Mutex::Autolock _l(mLock);
+ size_t size = mHandles.size();
+ size_t i;
+ for (i = 0; i < size; i++) {
+ if (mHandles[i] == handle) {
+ break;
+ }
+ }
+ if (i == size) {
+ return size;
+ }
+ ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i);
+
+ mHandles.removeAt(i);
+ // if removed from first place, move effect control from this handle to next in line
+ if (i == 0) {
+ EffectHandle *h = controlHandle_l();
+ if (h != NULL) {
+ h->setControl(true /*hasControl*/, true /*signal*/ , handle->enabled() /*enabled*/);
+ }
+ }
+
+ // Prevent calls to process() and other functions on effect interface from now on.
+ // The effect engine will be released by the destructor when the last strong reference on
+ // this object is released which can happen after next process is called.
+ if (mHandles.size() == 0 && !mPinned) {
+ mState = DESTROYED;
+ }
+
+ return mHandles.size();
+}
+
+// must be called with EffectModule::mLock held
+AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l()
+{
+ // the first valid handle in the list has control over the module
+ for (size_t i = 0; i < mHandles.size(); i++) {
+ EffectHandle *h = mHandles[i];
+ if (h != NULL && !h->destroyed_l()) {
+ return h;
+ }
+ }
+
+ return NULL;
+}
+
+size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast)
+{
+ ALOGV("disconnect() %p handle %p", this, handle);
+ // keep a strong reference on this EffectModule to avoid calling the
+ // destructor before we exit
+ sp<EffectModule> keep(this);
+ {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ thread->disconnectEffect(keep, handle, unpinIfLast);
+ }
+ }
+ return mHandles.size();
+}
+
+void AudioFlinger::EffectModule::updateState() {
+ Mutex::Autolock _l(mLock);
+
+ switch (mState) {
+ case RESTART:
+ reset_l();
+ // FALL THROUGH
+
+ case STARTING:
+ // clear auxiliary effect input buffer for next accumulation
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ memset(mConfig.inputCfg.buffer.raw,
+ 0,
+ mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
+ }
+ start_l();
+ mState = ACTIVE;
+ break;
+ case STOPPING:
+ stop_l();
+ mDisableWaitCnt = mMaxDisableWaitCnt;
+ mState = STOPPED;
+ break;
+ case STOPPED:
+ // mDisableWaitCnt is forced to 1 by process() when the engine indicates the end of the
+ // turn off sequence.
+ if (--mDisableWaitCnt == 0) {
+ reset_l();
+ mState = IDLE;
+ }
+ break;
+ default: //IDLE , ACTIVE, DESTROYED
+ break;
+ }
+}
+
+void AudioFlinger::EffectModule::process()
+{
+ Mutex::Autolock _l(mLock);
+
+ if (mState == DESTROYED || mEffectInterface == NULL ||
+ mConfig.inputCfg.buffer.raw == NULL ||
+ mConfig.outputCfg.buffer.raw == NULL) {
+ return;
+ }
+
+ if (isProcessEnabled()) {
+ // do 32 bit to 16 bit conversion for auxiliary effect input buffer
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ ditherAndClamp(mConfig.inputCfg.buffer.s32,
+ mConfig.inputCfg.buffer.s32,
+ mConfig.inputCfg.buffer.frameCount/2);
+ }
+
+ // do the actual processing in the effect engine
+ int ret = (*mEffectInterface)->process(mEffectInterface,
+ &mConfig.inputCfg.buffer,
+ &mConfig.outputCfg.buffer);
+
+ // force transition to IDLE state when engine is ready
+ if (mState == STOPPED && ret == -ENODATA) {
+ mDisableWaitCnt = 1;
+ }
+
+ // clear auxiliary effect input buffer for next accumulation
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ memset(mConfig.inputCfg.buffer.raw, 0,
+ mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
+ }
+ } else if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_INSERT &&
+ mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
+ // If an insert effect is idle and input buffer is different from output buffer,
+ // accumulate input onto output
+ sp<EffectChain> chain = mChain.promote();
+ if (chain != 0 && chain->activeTrackCnt() != 0) {
+ size_t frameCnt = mConfig.inputCfg.buffer.frameCount * 2; //always stereo here
+ int16_t *in = mConfig.inputCfg.buffer.s16;
+ int16_t *out = mConfig.outputCfg.buffer.s16;
+ for (size_t i = 0; i < frameCnt; i++) {
+ out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]);
+ }
+ }
+ }
+}
+
+void AudioFlinger::EffectModule::reset_l()
+{
+ if (mEffectInterface == NULL) {
+ return;
+ }
+ (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
+}
+
+status_t AudioFlinger::EffectModule::configure()
+{
+ if (mEffectInterface == NULL) {
+ return NO_INIT;
+ }
+
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread == 0) {
+ return DEAD_OBJECT;
+ }
+
+ // TODO: handle configuration of effects replacing track process
+ audio_channel_mask_t channelMask = thread->channelMask();
+
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
+ } else {
+ mConfig.inputCfg.channels = channelMask;
+ }
+ mConfig.outputCfg.channels = channelMask;
+ mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ mConfig.inputCfg.samplingRate = thread->sampleRate();
+ mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
+ mConfig.inputCfg.bufferProvider.cookie = NULL;
+ mConfig.inputCfg.bufferProvider.getBuffer = NULL;
+ mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
+ mConfig.outputCfg.bufferProvider.cookie = NULL;
+ mConfig.outputCfg.bufferProvider.getBuffer = NULL;
+ mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
+ mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+ // Insert effect:
+ // - in session AUDIO_SESSION_OUTPUT_MIX or AUDIO_SESSION_OUTPUT_STAGE,
+ // always overwrites output buffer: input buffer == output buffer
+ // - in other sessions:
+ // last effect in the chain accumulates in output buffer: input buffer != output buffer
+ // other effect: overwrites output buffer: input buffer == output buffer
+ // Auxiliary effect:
+ // accumulates in output buffer: input buffer != output buffer
+ // Therefore: accumulate <=> input buffer != output buffer
+ if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
+ mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+ } else {
+ mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+ }
+ mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
+ mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
+ mConfig.inputCfg.buffer.frameCount = thread->frameCount();
+ mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
+
+ ALOGV("configure() %p thread %p buffer %p framecount %d",
+ this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
+
+ status_t cmdStatus;
+ uint32_t size = sizeof(int);
+ status_t status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_CONFIG,
+ sizeof(effect_config_t),
+ &mConfig,
+ &size,
+ &cmdStatus);
+ if (status == 0) {
+ status = cmdStatus;
+ }
+
+ if (status == 0 &&
+ (memcmp(&mDescriptor.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0)) {
+ uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
+ effect_param_t *p = (effect_param_t *)buf32;
+
+ p->psize = sizeof(uint32_t);
+ p->vsize = sizeof(uint32_t);
+ size = sizeof(int);
+ *(int32_t *)p->data = VISUALIZER_PARAM_LATENCY;
+
+ uint32_t latency = 0;
+ PlaybackThread *pbt = thread->mAudioFlinger->checkPlaybackThread_l(thread->mId);
+ if (pbt != NULL) {
+ latency = pbt->latency_l();
+ }
+
+ *((int32_t *)p->data + 1)= latency;
+ (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_PARAM,
+ sizeof(effect_param_t) + 8,
+ &buf32,
+ &size,
+ &cmdStatus);
+ }
+
+ mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
+ (1000 * mConfig.outputCfg.buffer.frameCount);
+
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::init()
+{
+ Mutex::Autolock _l(mLock);
+ if (mEffectInterface == NULL) {
+ return NO_INIT;
+ }
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+ status_t status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_INIT,
+ 0,
+ NULL,
+ &size,
+ &cmdStatus);
+ if (status == 0) {
+ status = cmdStatus;
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::start()
+{
+ Mutex::Autolock _l(mLock);
+ return start_l();
+}
+
+status_t AudioFlinger::EffectModule::start_l()
+{
+ if (mEffectInterface == NULL) {
+ return NO_INIT;
+ }
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+ status_t status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_ENABLE,
+ 0,
+ NULL,
+ &size,
+ &cmdStatus);
+ if (status == 0) {
+ status = cmdStatus;
+ }
+ if (status == 0 &&
+ ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ audio_stream_t *stream = thread->stream();
+ if (stream != NULL) {
+ stream->add_audio_effect(stream, mEffectInterface);
+ }
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::stop()
+{
+ Mutex::Autolock _l(mLock);
+ return stop_l();
+}
+
+status_t AudioFlinger::EffectModule::stop_l()
+{
+ if (mEffectInterface == NULL) {
+ return NO_INIT;
+ }
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+ status_t status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_DISABLE,
+ 0,
+ NULL,
+ &size,
+ &cmdStatus);
+ if (status == 0) {
+ status = cmdStatus;
+ }
+ if (status == 0 &&
+ ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ audio_stream_t *stream = thread->stream();
+ if (stream != NULL) {
+ stream->remove_audio_effect(stream, mEffectInterface);
+ }
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::command(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t *replySize,
+ void *pReplyData)
+{
+ Mutex::Autolock _l(mLock);
+ ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface);
+
+ if (mState == DESTROYED || mEffectInterface == NULL) {
+ return NO_INIT;
+ }
+ status_t status = (*mEffectInterface)->command(mEffectInterface,
+ cmdCode,
+ cmdSize,
+ pCmdData,
+ replySize,
+ pReplyData);
+ if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) {
+ uint32_t size = (replySize == NULL) ? 0 : *replySize;
+ for (size_t i = 1; i < mHandles.size(); i++) {
+ EffectHandle *h = mHandles[i];
+ if (h != NULL && !h->destroyed_l()) {
+ h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData);
+ }
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::setEnabled(bool enabled)
+{
+ Mutex::Autolock _l(mLock);
+ return setEnabled_l(enabled);
+}
+
+// must be called with EffectModule::mLock held
+status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
+{
+
+ ALOGV("setEnabled %p enabled %d", this, enabled);
+
+ if (enabled != isEnabled()) {
+ status_t status = AudioSystem::setEffectEnabled(mId, enabled);
+ if (enabled && status != NO_ERROR) {
+ return status;
+ }
+
+ switch (mState) {
+ // going from disabled to enabled
+ case IDLE:
+ mState = STARTING;
+ break;
+ case STOPPED:
+ mState = RESTART;
+ break;
+ case STOPPING:
+ mState = ACTIVE;
+ break;
+
+ // going from enabled to disabled
+ case RESTART:
+ mState = STOPPED;
+ break;
+ case STARTING:
+ mState = IDLE;
+ break;
+ case ACTIVE:
+ mState = STOPPING;
+ break;
+ case DESTROYED:
+ return NO_ERROR; // simply ignore as we are being destroyed
+ }
+ for (size_t i = 1; i < mHandles.size(); i++) {
+ EffectHandle *h = mHandles[i];
+ if (h != NULL && !h->destroyed_l()) {
+ h->setEnabled(enabled);
+ }
+ }
+ }
+ return NO_ERROR;
+}
+
+bool AudioFlinger::EffectModule::isEnabled() const
+{
+ switch (mState) {
+ case RESTART:
+ case STARTING:
+ case ACTIVE:
+ return true;
+ case IDLE:
+ case STOPPING:
+ case STOPPED:
+ case DESTROYED:
+ default:
+ return false;
+ }
+}
+
+bool AudioFlinger::EffectModule::isProcessEnabled() const
+{
+ switch (mState) {
+ case RESTART:
+ case ACTIVE:
+ case STOPPING:
+ case STOPPED:
+ return true;
+ case IDLE:
+ case STARTING:
+ case DESTROYED:
+ default:
+ return false;
+ }
+}
+
+status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
+{
+ Mutex::Autolock _l(mLock);
+ status_t status = NO_ERROR;
+
+ // Send volume indication if EFFECT_FLAG_VOLUME_IND is set and read back altered volume
+ // if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set)
+ if (isProcessEnabled() &&
+ ((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
+ (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
+ status_t cmdStatus;
+ uint32_t volume[2];
+ uint32_t *pVolume = NULL;
+ uint32_t size = sizeof(volume);
+ volume[0] = *left;
+ volume[1] = *right;
+ if (controller) {
+ pVolume = volume;
+ }
+ status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_VOLUME,
+ size,
+ volume,
+ &size,
+ pVolume);
+ if (controller && status == NO_ERROR && size == sizeof(volume)) {
+ *left = volume[0];
+ *right = volume[1];
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device)
+{
+ if (device == AUDIO_DEVICE_NONE) {
+ return NO_ERROR;
+ }
+
+ Mutex::Autolock _l(mLock);
+ status_t status = NO_ERROR;
+ if (device && (mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+ uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
+ EFFECT_CMD_SET_INPUT_DEVICE;
+ status = (*mEffectInterface)->command(mEffectInterface,
+ cmd,
+ sizeof(uint32_t),
+ &device,
+ &size,
+ &cmdStatus);
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::setMode(audio_mode_t mode)
+{
+ Mutex::Autolock _l(mLock);
+ status_t status = NO_ERROR;
+ if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) {
+ status_t cmdStatus;
+ uint32_t size = sizeof(status_t);
+ status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_AUDIO_MODE,
+ sizeof(audio_mode_t),
+ &mode,
+ &size,
+ &cmdStatus);
+ if (status == NO_ERROR) {
+ status = cmdStatus;
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectModule::setAudioSource(audio_source_t source)
+{
+ Mutex::Autolock _l(mLock);
+ status_t status = NO_ERROR;
+ if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) {
+ uint32_t size = 0;
+ status = (*mEffectInterface)->command(mEffectInterface,
+ EFFECT_CMD_SET_AUDIO_SOURCE,
+ sizeof(audio_source_t),
+ &source,
+ &size,
+ NULL);
+ }
+ return status;
+}
+
+void AudioFlinger::EffectModule::setSuspended(bool suspended)
+{
+ Mutex::Autolock _l(mLock);
+ mSuspended = suspended;
+}
+
+bool AudioFlinger::EffectModule::suspended() const
+{
+ Mutex::Autolock _l(mLock);
+ return mSuspended;
+}
+
+bool AudioFlinger::EffectModule::purgeHandles()
+{
+ bool enabled = false;
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mHandles.size(); i++) {
+ EffectHandle *handle = mHandles[i];
+ if (handle != NULL && !handle->destroyed_l()) {
+ handle->effect().clear();
+ if (handle->hasControl()) {
+ enabled = handle->enabled();
+ }
+ }
+ }
+ return enabled;
+}
+
+void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "\tEffect ID %d:\n", mId);
+ result.append(buffer);
+
+ bool locked = AudioFlinger::dumpTryLock(mLock);
+ // failed to lock - AudioFlinger is probably deadlocked
+ if (!locked) {
+ result.append("\t\tCould not lock Fx mutex:\n");
+ }
+
+ result.append("\t\tSession Status State Engine:\n");
+ snprintf(buffer, SIZE, "\t\t%05d %03d %03d 0x%08x\n",
+ mSessionId, mStatus, mState, (uint32_t)mEffectInterface);
+ result.append(buffer);
+
+ result.append("\t\tDescriptor:\n");
+ snprintf(buffer, SIZE, "\t\t- UUID: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n",
+ mDescriptor.uuid.timeLow, mDescriptor.uuid.timeMid, mDescriptor.uuid.timeHiAndVersion,
+ mDescriptor.uuid.clockSeq, mDescriptor.uuid.node[0], mDescriptor.uuid.node[1],
+ mDescriptor.uuid.node[2],
+ mDescriptor.uuid.node[3],mDescriptor.uuid.node[4],mDescriptor.uuid.node[5]);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "\t\t- TYPE: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n",
+ mDescriptor.type.timeLow, mDescriptor.type.timeMid,
+ mDescriptor.type.timeHiAndVersion,
+ mDescriptor.type.clockSeq, mDescriptor.type.node[0], mDescriptor.type.node[1],
+ mDescriptor.type.node[2],
+ mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n",
+ mDescriptor.apiVersion,
+ mDescriptor.flags);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "\t\t- name: %s\n",
+ mDescriptor.name);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "\t\t- implementor: %s\n",
+ mDescriptor.implementor);
+ result.append(buffer);
+
+ result.append("\t\t- Input configuration:\n");
+ result.append("\t\t\tBuffer Frames Smp rate Channels Format\n");
+ snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n",
+ (uint32_t)mConfig.inputCfg.buffer.raw,
+ mConfig.inputCfg.buffer.frameCount,
+ mConfig.inputCfg.samplingRate,
+ mConfig.inputCfg.channels,
+ mConfig.inputCfg.format);
+ result.append(buffer);
+
+ result.append("\t\t- Output configuration:\n");
+ result.append("\t\t\tBuffer Frames Smp rate Channels Format\n");
+ snprintf(buffer, SIZE, "\t\t\t0x%08x %05d %05d %08x %d\n",
+ (uint32_t)mConfig.outputCfg.buffer.raw,
+ mConfig.outputCfg.buffer.frameCount,
+ mConfig.outputCfg.samplingRate,
+ mConfig.outputCfg.channels,
+ mConfig.outputCfg.format);
+ result.append(buffer);
+
+ snprintf(buffer, SIZE, "\t\t%d Clients:\n", mHandles.size());
+ result.append(buffer);
+ result.append("\t\t\tPid Priority Ctrl Locked client server\n");
+ for (size_t i = 0; i < mHandles.size(); ++i) {
+ EffectHandle *handle = mHandles[i];
+ if (handle != NULL && !handle->destroyed_l()) {
+ handle->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ }
+
+ result.append("\n");
+
+ write(fd, result.string(), result.length());
+
+ if (locked) {
+ mLock.unlock();
+ }
+}
+
+// ----------------------------------------------------------------------------
+// EffectHandle implementation
+// ----------------------------------------------------------------------------
+
+#undef LOG_TAG
+#define LOG_TAG "AudioFlinger::EffectHandle"
+
+AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect,
+ const sp<AudioFlinger::Client>& client,
+ const sp<IEffectClient>& effectClient,
+ int32_t priority)
+ : BnEffect(),
+ mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
+ mPriority(priority), mHasControl(false), mEnabled(false), mDestroyed(false)
+{
+ ALOGV("constructor %p", this);
+
+ if (client == 0) {
+ return;
+ }
+ int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
+ mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
+ if (mCblkMemory != 0) {
+ mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer());
+
+ if (mCblk != NULL) {
+ new(mCblk) effect_param_cblk_t();
+ mBuffer = (uint8_t *)mCblk + bufOffset;
+ }
+ } else {
+ ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE +
+ sizeof(effect_param_cblk_t));
+ return;
+ }
+}
+
+AudioFlinger::EffectHandle::~EffectHandle()
+{
+ ALOGV("Destructor %p", this);
+
+ if (mEffect == 0) {
+ mDestroyed = true;
+ return;
+ }
+ mEffect->lock();
+ mDestroyed = true;
+ mEffect->unlock();
+ disconnect(false);
+}
+
+status_t AudioFlinger::EffectHandle::enable()
+{
+ ALOGV("enable %p", this);
+ if (!mHasControl) {
+ return INVALID_OPERATION;
+ }
+ if (mEffect == 0) {
+ return DEAD_OBJECT;
+ }
+
+ if (mEnabled) {
+ return NO_ERROR;
+ }
+
+ mEnabled = true;
+
+ sp<ThreadBase> thread = mEffect->thread().promote();
+ if (thread != 0) {
+ thread->checkSuspendOnEffectEnabled(mEffect, true, mEffect->sessionId());
+ }
+
+ // checkSuspendOnEffectEnabled() can suspend this same effect when enabled
+ if (mEffect->suspended()) {
+ return NO_ERROR;
+ }
+
+ status_t status = mEffect->setEnabled(true);
+ if (status != NO_ERROR) {
+ if (thread != 0) {
+ thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
+ }
+ mEnabled = false;
+ }
+ return status;
+}
+
+status_t AudioFlinger::EffectHandle::disable()
+{
+ ALOGV("disable %p", this);
+ if (!mHasControl) {
+ return INVALID_OPERATION;
+ }
+ if (mEffect == 0) {
+ return DEAD_OBJECT;
+ }
+
+ if (!mEnabled) {
+ return NO_ERROR;
+ }
+ mEnabled = false;
+
+ if (mEffect->suspended()) {
+ return NO_ERROR;
+ }
+
+ status_t status = mEffect->setEnabled(false);
+
+ sp<ThreadBase> thread = mEffect->thread().promote();
+ if (thread != 0) {
+ thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
+ }
+
+ return status;
+}
+
+void AudioFlinger::EffectHandle::disconnect()
+{
+ disconnect(true);
+}
+
+void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast)
+{
+ ALOGV("disconnect(%s)", unpinIfLast ? "true" : "false");
+ if (mEffect == 0) {
+ return;
+ }
+ // restore suspended effects if the disconnected handle was enabled and the last one.
+ if ((mEffect->disconnect(this, unpinIfLast) == 0) && mEnabled) {
+ sp<ThreadBase> thread = mEffect->thread().promote();
+ if (thread != 0) {
+ thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
+ }
+ }
+
+ // release sp on module => module destructor can be called now
+ mEffect.clear();
+ if (mClient != 0) {
+ if (mCblk != NULL) {
+ // unlike ~TrackBase(), mCblk is never a local new, so don't delete
+ mCblk->~effect_param_cblk_t(); // destroy our shared-structure.
+ }
+ mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
+ // Client destructor must run with AudioFlinger mutex locked
+ Mutex::Autolock _l(mClient->audioFlinger()->mLock);
+ mClient.clear();
+ }
+}
+
+status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t *replySize,
+ void *pReplyData)
+{
+ ALOGVV("command(), cmdCode: %d, mHasControl: %d, mEffect: %p",
+ cmdCode, mHasControl, (mEffect == 0) ? 0 : mEffect.get());
+
+ // only get parameter command is permitted for applications not controlling the effect
+ if (!mHasControl && cmdCode != EFFECT_CMD_GET_PARAM) {
+ return INVALID_OPERATION;
+ }
+ if (mEffect == 0) {
+ return DEAD_OBJECT;
+ }
+ if (mClient == 0) {
+ return INVALID_OPERATION;
+ }
+
+ // handle commands that are not forwarded transparently to effect engine
+ if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) {
+ // No need to trylock() here as this function is executed in the binder thread serving a
+ // particular client process: no risk to block the whole media server process or mixer
+ // threads if we are stuck here
+ Mutex::Autolock _l(mCblk->lock);
+ if (mCblk->clientIndex > EFFECT_PARAM_BUFFER_SIZE ||
+ mCblk->serverIndex > EFFECT_PARAM_BUFFER_SIZE) {
+ mCblk->serverIndex = 0;
+ mCblk->clientIndex = 0;
+ return BAD_VALUE;
+ }
+ status_t status = NO_ERROR;
+ while (mCblk->serverIndex < mCblk->clientIndex) {
+ int reply;
+ uint32_t rsize = sizeof(int);
+ int *p = (int *)(mBuffer + mCblk->serverIndex);
+ int size = *p++;
+ if (((uint8_t *)p + size) > mBuffer + mCblk->clientIndex) {
+ ALOGW("command(): invalid parameter block size");
+ break;
+ }
+ effect_param_t *param = (effect_param_t *)p;
+ if (param->psize == 0 || param->vsize == 0) {
+ ALOGW("command(): null parameter or value size");
+ mCblk->serverIndex += size;
+ continue;
+ }
+ uint32_t psize = sizeof(effect_param_t) +
+ ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) +
+ param->vsize;
+ status_t ret = mEffect->command(EFFECT_CMD_SET_PARAM,
+ psize,
+ p,
+ &rsize,
+ &reply);
+ // stop at first error encountered
+ if (ret != NO_ERROR) {
+ status = ret;
+ *(int *)pReplyData = reply;
+ break;
+ } else if (reply != NO_ERROR) {
+ *(int *)pReplyData = reply;
+ break;
+ }
+ mCblk->serverIndex += size;
+ }
+ mCblk->serverIndex = 0;
+ mCblk->clientIndex = 0;
+ return status;
+ } else if (cmdCode == EFFECT_CMD_ENABLE) {
+ *(int *)pReplyData = NO_ERROR;
+ return enable();
+ } else if (cmdCode == EFFECT_CMD_DISABLE) {
+ *(int *)pReplyData = NO_ERROR;
+ return disable();
+ }
+
+ return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+}
+
+void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled)
+{
+ ALOGV("setControl %p control %d", this, hasControl);
+
+ mHasControl = hasControl;
+ mEnabled = enabled;
+
+ if (signal && mEffectClient != 0) {
+ mEffectClient->controlStatusChanged(hasControl);
+ }
+}
+
+void AudioFlinger::EffectHandle::commandExecuted(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t replySize,
+ void *pReplyData)
+{
+ if (mEffectClient != 0) {
+ mEffectClient->commandExecuted(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ }
+}
+
+
+
+void AudioFlinger::EffectHandle::setEnabled(bool enabled)
+{
+ if (mEffectClient != 0) {
+ mEffectClient->enableStatusChanged(enabled);
+ }
+}
+
+status_t AudioFlinger::EffectHandle::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnEffect::onTransact(code, data, reply, flags);
+}
+
+
+void AudioFlinger::EffectHandle::dump(char* buffer, size_t size)
+{
+ bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
+
+ snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n",
+ (mClient == 0) ? getpid_cached : mClient->pid(),
+ mPriority,
+ mHasControl,
+ !locked,
+ mCblk ? mCblk->clientIndex : 0,
+ mCblk ? mCblk->serverIndex : 0
+ );
+
+ if (locked) {
+ mCblk->lock.unlock();
+ }
+}
+
+#undef LOG_TAG
+#define LOG_TAG "AudioFlinger::EffectChain"
+
+AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
+ int sessionId)
+ : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
+ mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
+ mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
+{
+ mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ if (thread == NULL) {
+ return;
+ }
+ mMaxTailBuffers = ((kProcessTailDurationMs * thread->sampleRate()) / 1000) /
+ thread->frameCount();
+}
+
+AudioFlinger::EffectChain::~EffectChain()
+{
+ if (mOwnInBuffer) {
+ delete mInBuffer;
+ }
+
+}
+
+// getEffectFromDesc_l() must be called with ThreadBase::mLock held
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l(
+ effect_descriptor_t *descriptor)
+{
+ size_t size = mEffects.size();
+
+ for (size_t i = 0; i < size; i++) {
+ if (memcmp(&mEffects[i]->desc().uuid, &descriptor->uuid, sizeof(effect_uuid_t)) == 0) {
+ return mEffects[i];
+ }
+ }
+ return 0;
+}
+
+// getEffectFromId_l() must be called with ThreadBase::mLock held
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int id)
+{
+ size_t size = mEffects.size();
+
+ for (size_t i = 0; i < size; i++) {
+ // by convention, return first effect if id provided is 0 (0 is never a valid id)
+ if (id == 0 || mEffects[i]->id() == id) {
+ return mEffects[i];
+ }
+ }
+ return 0;
+}
+
+// getEffectFromType_l() must be called with ThreadBase::mLock held
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l(
+ const effect_uuid_t *type)
+{
+ size_t size = mEffects.size();
+
+ for (size_t i = 0; i < size; i++) {
+ if (memcmp(&mEffects[i]->desc().type, type, sizeof(effect_uuid_t)) == 0) {
+ return mEffects[i];
+ }
+ }
+ return 0;
+}
+
+void AudioFlinger::EffectChain::clearInputBuffer()
+{
+ Mutex::Autolock _l(mLock);
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread == 0) {
+ ALOGW("clearInputBuffer(): cannot promote mixer thread");
+ return;
+ }
+ clearInputBuffer_l(thread);
+}
+
+// Must be called with EffectChain::mLock locked
+void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
+{
+ size_t numSamples = thread->frameCount() * thread->channelCount();
+ memset(mInBuffer, 0, numSamples * sizeof(int16_t));
+
+}
+
+// Must be called with EffectChain::mLock locked
+void AudioFlinger::EffectChain::process_l()
+{
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread == 0) {
+ ALOGW("process_l(): cannot promote mixer thread");
+ return;
+ }
+ bool isGlobalSession = (mSessionId == AUDIO_SESSION_OUTPUT_MIX) ||
+ (mSessionId == AUDIO_SESSION_OUTPUT_STAGE);
+ // always process effects unless no more tracks are on the session and the effect tail
+ // has been rendered
+ bool doProcess = true;
+ if (!isGlobalSession) {
+ bool tracksOnSession = (trackCnt() != 0);
+
+ if (!tracksOnSession && mTailBufferCount == 0) {
+ doProcess = false;
+ }
+
+ if (activeTrackCnt() == 0) {
+ // if no track is active and the effect tail has not been rendered,
+ // the input buffer must be cleared here as the mixer process will not do it
+ if (tracksOnSession || mTailBufferCount > 0) {
+ clearInputBuffer_l(thread);
+ if (mTailBufferCount > 0) {
+ mTailBufferCount--;
+ }
+ }
+ }
+ }
+
+ size_t size = mEffects.size();
+ if (doProcess) {
+ for (size_t i = 0; i < size; i++) {
+ mEffects[i]->process();
+ }
+ }
+ for (size_t i = 0; i < size; i++) {
+ mEffects[i]->updateState();
+ }
+}
+
+// addEffect_l() must be called with PlaybackThread::mLock held
+status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect)
+{
+ effect_descriptor_t desc = effect->desc();
+ uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
+
+ Mutex::Autolock _l(mLock);
+ effect->setChain(this);
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread == 0) {
+ return NO_INIT;
+ }
+ effect->setThread(thread);
+
+ if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ // Auxiliary effects are inserted at the beginning of mEffects vector as
+ // they are processed first and accumulated in chain input buffer
+ mEffects.insertAt(effect, 0);
+
+ // the input buffer for auxiliary effect contains mono samples in
+ // 32 bit format. This is to avoid saturation in AudoMixer
+ // accumulation stage. Saturation is done in EffectModule::process() before
+ // calling the process in effect engine
+ size_t numSamples = thread->frameCount();
+ int32_t *buffer = new int32_t[numSamples];
+ memset(buffer, 0, numSamples * sizeof(int32_t));
+ effect->setInBuffer((int16_t *)buffer);
+ // auxiliary effects output samples to chain input buffer for further processing
+ // by insert effects
+ effect->setOutBuffer(mInBuffer);
+ } else {
+ // Insert effects are inserted at the end of mEffects vector as they are processed
+ // after track and auxiliary effects.
+ // Insert effect order as a function of indicated preference:
+ // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
+ // another effect is present
+ // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
+ // last effect claiming first position
+ // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
+ // first effect claiming last position
+ // else if EFFECT_FLAG_INSERT_ANY insert after first or before last
+ // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
+ // already present
+
+ size_t size = mEffects.size();
+ size_t idx_insert = size;
+ ssize_t idx_insert_first = -1;
+ ssize_t idx_insert_last = -1;
+
+ for (size_t i = 0; i < size; i++) {
+ effect_descriptor_t d = mEffects[i]->desc();
+ uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
+ uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
+ if (iMode == EFFECT_FLAG_TYPE_INSERT) {
+ // check invalid effect chaining combinations
+ if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
+ iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
+ ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s",
+ desc.name, d.name);
+ return INVALID_OPERATION;
+ }
+ // remember position of first insert effect and by default
+ // select this as insert position for new effect
+ if (idx_insert == size) {
+ idx_insert = i;
+ }
+ // remember position of last insert effect claiming
+ // first position
+ if (iPref == EFFECT_FLAG_INSERT_FIRST) {
+ idx_insert_first = i;
+ }
+ // remember position of first insert effect claiming
+ // last position
+ if (iPref == EFFECT_FLAG_INSERT_LAST &&
+ idx_insert_last == -1) {
+ idx_insert_last = i;
+ }
+ }
+ }
+
+ // modify idx_insert from first position if needed
+ if (insertPref == EFFECT_FLAG_INSERT_LAST) {
+ if (idx_insert_last != -1) {
+ idx_insert = idx_insert_last;
+ } else {
+ idx_insert = size;
+ }
+ } else {
+ if (idx_insert_first != -1) {
+ idx_insert = idx_insert_first + 1;
+ }
+ }
+
+ // always read samples from chain input buffer
+ effect->setInBuffer(mInBuffer);
+
+ // if last effect in the chain, output samples to chain
+ // output buffer, otherwise to chain input buffer
+ if (idx_insert == size) {
+ if (idx_insert != 0) {
+ mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
+ mEffects[idx_insert-1]->configure();
+ }
+ effect->setOutBuffer(mOutBuffer);
+ } else {
+ effect->setOutBuffer(mInBuffer);
+ }
+ mEffects.insertAt(effect, idx_insert);
+
+ ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this,
+ idx_insert);
+ }
+ effect->configure();
+ return NO_ERROR;
+}
+
+// removeEffect_l() must be called with PlaybackThread::mLock held
+size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect)
+{
+ Mutex::Autolock _l(mLock);
+ size_t size = mEffects.size();
+ uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK;
+
+ for (size_t i = 0; i < size; i++) {
+ if (effect == mEffects[i]) {
+ // calling stop here will remove pre-processing effect from the audio HAL.
+ // This is safe as we hold the EffectChain mutex which guarantees that we are not in
+ // the middle of a read from audio HAL
+ if (mEffects[i]->state() == EffectModule::ACTIVE ||
+ mEffects[i]->state() == EffectModule::STOPPING) {
+ mEffects[i]->stop();
+ }
+ if (type == EFFECT_FLAG_TYPE_AUXILIARY) {
+ delete[] effect->inBuffer();
+ } else {
+ if (i == size - 1 && i != 0) {
+ mEffects[i - 1]->setOutBuffer(mOutBuffer);
+ mEffects[i - 1]->configure();
+ }
+ }
+ mEffects.removeAt(i);
+ ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(),
+ this, i);
+ break;
+ }
+ }
+
+ return mEffects.size();
+}
+
+// setDevice_l() must be called with PlaybackThread::mLock held
+void AudioFlinger::EffectChain::setDevice_l(audio_devices_t device)
+{
+ size_t size = mEffects.size();
+ for (size_t i = 0; i < size; i++) {
+ mEffects[i]->setDevice(device);
+ }
+}
+
+// setMode_l() must be called with PlaybackThread::mLock held
+void AudioFlinger::EffectChain::setMode_l(audio_mode_t mode)
+{
+ size_t size = mEffects.size();
+ for (size_t i = 0; i < size; i++) {
+ mEffects[i]->setMode(mode);
+ }
+}
+
+// setAudioSource_l() must be called with PlaybackThread::mLock held
+void AudioFlinger::EffectChain::setAudioSource_l(audio_source_t source)
+{
+ size_t size = mEffects.size();
+ for (size_t i = 0; i < size; i++) {
+ mEffects[i]->setAudioSource(source);
+ }
+}
+
+// setVolume_l() must be called with PlaybackThread::mLock held
+bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right)
+{
+ uint32_t newLeft = *left;
+ uint32_t newRight = *right;
+ bool hasControl = false;
+ int ctrlIdx = -1;
+ size_t size = mEffects.size();
+
+ // first update volume controller
+ for (size_t i = size; i > 0; i--) {
+ if (mEffects[i - 1]->isProcessEnabled() &&
+ (mEffects[i - 1]->desc().flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL) {
+ ctrlIdx = i - 1;
+ hasControl = true;
+ break;
+ }
+ }
+
+ if (ctrlIdx == mVolumeCtrlIdx && *left == mLeftVolume && *right == mRightVolume) {
+ if (hasControl) {
+ *left = mNewLeftVolume;
+ *right = mNewRightVolume;
+ }
+ return hasControl;
+ }
+
+ mVolumeCtrlIdx = ctrlIdx;
+ mLeftVolume = newLeft;
+ mRightVolume = newRight;
+
+ // second get volume update from volume controller
+ if (ctrlIdx >= 0) {
+ mEffects[ctrlIdx]->setVolume(&newLeft, &newRight, true);
+ mNewLeftVolume = newLeft;
+ mNewRightVolume = newRight;
+ }
+ // then indicate volume to all other effects in chain.
+ // Pass altered volume to effects before volume controller
+ // and requested volume to effects after controller
+ uint32_t lVol = newLeft;
+ uint32_t rVol = newRight;
+
+ for (size_t i = 0; i < size; i++) {
+ if ((int)i == ctrlIdx) {
+ continue;
+ }
+ // this also works for ctrlIdx == -1 when there is no volume controller
+ if ((int)i > ctrlIdx) {
+ lVol = *left;
+ rVol = *right;
+ }
+ mEffects[i]->setVolume(&lVol, &rVol, false);
+ }
+ *left = newLeft;
+ *right = newRight;
+
+ return hasControl;
+}
+
+void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId);
+ result.append(buffer);
+
+ bool locked = AudioFlinger::dumpTryLock(mLock);
+ // failed to lock - AudioFlinger is probably deadlocked
+ if (!locked) {
+ result.append("\tCould not lock mutex:\n");
+ }
+
+ result.append("\tNum fx In buffer Out buffer Active tracks:\n");
+ snprintf(buffer, SIZE, "\t%02d 0x%08x 0x%08x %d\n",
+ mEffects.size(),
+ (uint32_t)mInBuffer,
+ (uint32_t)mOutBuffer,
+ mActiveTrackCnt);
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ for (size_t i = 0; i < mEffects.size(); ++i) {
+ sp<EffectModule> effect = mEffects[i];
+ if (effect != 0) {
+ effect->dump(fd, args);
+ }
+ }
+
+ if (locked) {
+ mLock.unlock();
+ }
+}
+
+// must be called with ThreadBase::mLock held
+void AudioFlinger::EffectChain::setEffectSuspended_l(
+ const effect_uuid_t *type, bool suspend)
+{
+ sp<SuspendedEffectDesc> desc;
+ // use effect type UUID timelow as key as there is no real risk of identical
+ // timeLow fields among effect type UUIDs.
+ ssize_t index = mSuspendedEffects.indexOfKey(type->timeLow);
+ if (suspend) {
+ if (index >= 0) {
+ desc = mSuspendedEffects.valueAt(index);
+ } else {
+ desc = new SuspendedEffectDesc();
+ desc->mType = *type;
+ mSuspendedEffects.add(type->timeLow, desc);
+ ALOGV("setEffectSuspended_l() add entry for %08x", type->timeLow);
+ }
+ if (desc->mRefCount++ == 0) {
+ sp<EffectModule> effect = getEffectIfEnabled(type);
+ if (effect != 0) {
+ desc->mEffect = effect;
+ effect->setSuspended(true);
+ effect->setEnabled(false);
+ }
+ }
+ } else {
+ if (index < 0) {
+ return;
+ }
+ desc = mSuspendedEffects.valueAt(index);
+ if (desc->mRefCount <= 0) {
+ ALOGW("setEffectSuspended_l() restore refcount should not be 0 %d", desc->mRefCount);
+ desc->mRefCount = 1;
+ }
+ if (--desc->mRefCount == 0) {
+ ALOGV("setEffectSuspended_l() remove entry for %08x", mSuspendedEffects.keyAt(index));
+ if (desc->mEffect != 0) {
+ sp<EffectModule> effect = desc->mEffect.promote();
+ if (effect != 0) {
+ effect->setSuspended(false);
+ effect->lock();
+ EffectHandle *handle = effect->controlHandle_l();
+ if (handle != NULL && !handle->destroyed_l()) {
+ effect->setEnabled_l(handle->enabled());
+ }
+ effect->unlock();
+ }
+ desc->mEffect.clear();
+ }
+ mSuspendedEffects.removeItemsAt(index);
+ }
+ }
+}
+
+// must be called with ThreadBase::mLock held
+void AudioFlinger::EffectChain::setEffectSuspendedAll_l(bool suspend)
+{
+ sp<SuspendedEffectDesc> desc;
+
+ ssize_t index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
+ if (suspend) {
+ if (index >= 0) {
+ desc = mSuspendedEffects.valueAt(index);
+ } else {
+ desc = new SuspendedEffectDesc();
+ mSuspendedEffects.add((int)kKeyForSuspendAll, desc);
+ ALOGV("setEffectSuspendedAll_l() add entry for 0");
+ }
+ if (desc->mRefCount++ == 0) {
+ Vector< sp<EffectModule> > effects;
+ getSuspendEligibleEffects(effects);
+ for (size_t i = 0; i < effects.size(); i++) {
+ setEffectSuspended_l(&effects[i]->desc().type, true);
+ }
+ }
+ } else {
+ if (index < 0) {
+ return;
+ }
+ desc = mSuspendedEffects.valueAt(index);
+ if (desc->mRefCount <= 0) {
+ ALOGW("setEffectSuspendedAll_l() restore refcount should not be 0 %d", desc->mRefCount);
+ desc->mRefCount = 1;
+ }
+ if (--desc->mRefCount == 0) {
+ Vector<const effect_uuid_t *> types;
+ for (size_t i = 0; i < mSuspendedEffects.size(); i++) {
+ if (mSuspendedEffects.keyAt(i) == (int)kKeyForSuspendAll) {
+ continue;
+ }
+ types.add(&mSuspendedEffects.valueAt(i)->mType);
+ }
+ for (size_t i = 0; i < types.size(); i++) {
+ setEffectSuspended_l(types[i], false);
+ }
+ ALOGV("setEffectSuspendedAll_l() remove entry for %08x",
+ mSuspendedEffects.keyAt(index));
+ mSuspendedEffects.removeItem((int)kKeyForSuspendAll);
+ }
+ }
+}
+
+
+// The volume effect is used for automated tests only
+#ifndef OPENSL_ES_H_
+static const effect_uuid_t SL_IID_VOLUME_ = { 0x09e8ede0, 0xddde, 0x11db, 0xb4f6,
+ { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } };
+const effect_uuid_t * const SL_IID_VOLUME = &SL_IID_VOLUME_;
+#endif //OPENSL_ES_H_
+
+bool AudioFlinger::EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc)
+{
+ // auxiliary effects and visualizer are never suspended on output mix
+ if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
+ (((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) ||
+ (memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) ||
+ (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) {
+ return false;
+ }
+ return true;
+}
+
+void AudioFlinger::EffectChain::getSuspendEligibleEffects(
+ Vector< sp<AudioFlinger::EffectModule> > &effects)
+{
+ effects.clear();
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
+ effects.add(mEffects[i]);
+ }
+ }
+}
+
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectIfEnabled(
+ const effect_uuid_t *type)
+{
+ sp<EffectModule> effect = getEffectFromType_l(type);
+ return effect != 0 && effect->isEnabled() ? effect : 0;
+}
+
+void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
+ bool enabled)
+{
+ ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
+ if (enabled) {
+ if (index < 0) {
+ // if the effect is not suspend check if all effects are suspended
+ index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
+ if (index < 0) {
+ return;
+ }
+ if (!isEffectEligibleForSuspend(effect->desc())) {
+ return;
+ }
+ setEffectSuspended_l(&effect->desc().type, enabled);
+ index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
+ if (index < 0) {
+ ALOGW("checkSuspendOnEffectEnabled() Fx should be suspended here!");
+ return;
+ }
+ }
+ ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x",
+ effect->desc().type.timeLow);
+ sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
+ // if effect is requested to suspended but was not yet enabled, supend it now.
+ if (desc->mEffect == 0) {
+ desc->mEffect = effect;
+ effect->setEnabled(false);
+ effect->setSuspended(true);
+ }
+ } else {
+ if (index < 0) {
+ return;
+ }
+ ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x",
+ effect->desc().type.timeLow);
+ sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
+ desc->mEffect.clear();
+ effect->setSuspended(false);
+ }
+}
+
+}; // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
new file mode 100644
index 0000000..91303ee
--- /dev/null
+++ b/services/audioflinger/Effects.h
@@ -0,0 +1,359 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+//--- Audio Effect Management
+
+// EffectModule and EffectChain classes both have their own mutex to protect
+// state changes or resource modifications. Always respect the following order
+// if multiple mutexes must be acquired to avoid cross deadlock:
+// AudioFlinger -> ThreadBase -> EffectChain -> EffectModule
+
+// The EffectModule class is a wrapper object controlling the effect engine implementation
+// in the effect library. It prevents concurrent calls to process() and command() functions
+// from different client threads. It keeps a list of EffectHandle objects corresponding
+// to all client applications using this effect and notifies applications of effect state,
+// control or parameter changes. It manages the activation state machine to send appropriate
+// reset, enable, disable commands to effect engine and provide volume
+// ramping when effects are activated/deactivated.
+// When controlling an auxiliary effect, the EffectModule also provides an input buffer used by
+// the attached track(s) to accumulate their auxiliary channel.
+class EffectModule : public RefBase {
+public:
+ EffectModule(ThreadBase *thread,
+ const wp<AudioFlinger::EffectChain>& chain,
+ effect_descriptor_t *desc,
+ int id,
+ int sessionId);
+ virtual ~EffectModule();
+
+ enum effect_state {
+ IDLE,
+ RESTART,
+ STARTING,
+ ACTIVE,
+ STOPPING,
+ STOPPED,
+ DESTROYED
+ };
+
+ int id() const { return mId; }
+ void process();
+ void updateState();
+ status_t command(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t *replySize,
+ void *pReplyData);
+
+ void reset_l();
+ status_t configure();
+ status_t init();
+ effect_state state() const {
+ return mState;
+ }
+ uint32_t status() {
+ return mStatus;
+ }
+ int sessionId() const {
+ return mSessionId;
+ }
+ status_t setEnabled(bool enabled);
+ status_t setEnabled_l(bool enabled);
+ bool isEnabled() const;
+ bool isProcessEnabled() const;
+
+ void setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; }
+ int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; }
+ void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; }
+ int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; }
+ void setChain(const wp<EffectChain>& chain) { mChain = chain; }
+ void setThread(const wp<ThreadBase>& thread) { mThread = thread; }
+ const wp<ThreadBase>& thread() { return mThread; }
+
+ status_t addHandle(EffectHandle *handle);
+ size_t disconnect(EffectHandle *handle, bool unpinIfLast);
+ size_t removeHandle(EffectHandle *handle);
+
+ const effect_descriptor_t& desc() const { return mDescriptor; }
+ wp<EffectChain>& chain() { return mChain; }
+
+ status_t setDevice(audio_devices_t device);
+ status_t setVolume(uint32_t *left, uint32_t *right, bool controller);
+ status_t setMode(audio_mode_t mode);
+ status_t setAudioSource(audio_source_t source);
+ status_t start();
+ status_t stop();
+ void setSuspended(bool suspended);
+ bool suspended() const;
+
+ EffectHandle* controlHandle_l();
+
+ bool isPinned() const { return mPinned; }
+ void unPin() { mPinned = false; }
+ bool purgeHandles();
+ void lock() { mLock.lock(); }
+ void unlock() { mLock.unlock(); }
+
+ void dump(int fd, const Vector<String16>& args);
+
+protected:
+ friend class AudioFlinger; // for mHandles
+ bool mPinned;
+
+ // Maximum time allocated to effect engines to complete the turn off sequence
+ static const uint32_t MAX_DISABLE_TIME_MS = 10000;
+
+ EffectModule(const EffectModule&);
+ EffectModule& operator = (const EffectModule&);
+
+ status_t start_l();
+ status_t stop_l();
+
+mutable Mutex mLock; // mutex for process, commands and handles list protection
+ wp<ThreadBase> mThread; // parent thread
+ wp<EffectChain> mChain; // parent effect chain
+ const int mId; // this instance unique ID
+ const int mSessionId; // audio session ID
+ const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
+ effect_config_t mConfig; // input and output audio configuration
+ effect_handle_t mEffectInterface; // Effect module C API
+ status_t mStatus; // initialization status
+ effect_state mState; // current activation state
+ Vector<EffectHandle *> mHandles; // list of client handles
+ // First handle in mHandles has highest priority and controls the effect module
+ uint32_t mMaxDisableWaitCnt; // maximum grace period before forcing an effect off after
+ // sending disable command.
+ uint32_t mDisableWaitCnt; // current process() calls count during disable period.
+ bool mSuspended; // effect is suspended: temporarily disabled by framework
+};
+
+// The EffectHandle class implements the IEffect interface. It provides resources
+// to receive parameter updates, keeps track of effect control
+// ownership and state and has a pointer to the EffectModule object it is controlling.
+// There is one EffectHandle object for each application controlling (or using)
+// an effect module.
+// The EffectHandle is obtained by calling AudioFlinger::createEffect().
+class EffectHandle: public android::BnEffect {
+public:
+
+ EffectHandle(const sp<EffectModule>& effect,
+ const sp<AudioFlinger::Client>& client,
+ const sp<IEffectClient>& effectClient,
+ int32_t priority);
+ virtual ~EffectHandle();
+
+ // IEffect
+ virtual status_t enable();
+ virtual status_t disable();
+ virtual status_t command(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t *replySize,
+ void *pReplyData);
+ virtual void disconnect();
+private:
+ void disconnect(bool unpinIfLast);
+public:
+ virtual sp<IMemory> getCblk() const { return mCblkMemory; }
+ virtual status_t onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags);
+
+
+ // Give or take control of effect module
+ // - hasControl: true if control is given, false if removed
+ // - signal: true client app should be signaled of change, false otherwise
+ // - enabled: state of the effect when control is passed
+ void setControl(bool hasControl, bool signal, bool enabled);
+ void commandExecuted(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t replySize,
+ void *pReplyData);
+ void setEnabled(bool enabled);
+ bool enabled() const { return mEnabled; }
+
+ // Getters
+ int id() const { return mEffect->id(); }
+ int priority() const { return mPriority; }
+ bool hasControl() const { return mHasControl; }
+ sp<EffectModule> effect() const { return mEffect; }
+ // destroyed_l() must be called with the associated EffectModule mLock held
+ bool destroyed_l() const { return mDestroyed; }
+
+ void dump(char* buffer, size_t size);
+
+protected:
+ friend class AudioFlinger; // for mEffect, mHasControl, mEnabled
+ EffectHandle(const EffectHandle&);
+ EffectHandle& operator =(const EffectHandle&);
+
+ sp<EffectModule> mEffect; // pointer to controlled EffectModule
+ sp<IEffectClient> mEffectClient; // callback interface for client notifications
+ /*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect()
+ sp<IMemory> mCblkMemory; // shared memory for control block
+ effect_param_cblk_t* mCblk; // control block for deferred parameter setting via
+ // shared memory
+ uint8_t* mBuffer; // pointer to parameter area in shared memory
+ int mPriority; // client application priority to control the effect
+ bool mHasControl; // true if this handle is controlling the effect
+ bool mEnabled; // cached enable state: needed when the effect is
+ // restored after being suspended
+ bool mDestroyed; // Set to true by destructor. Access with EffectModule
+ // mLock held
+};
+
+// the EffectChain class represents a group of effects associated to one audio session.
+// There can be any number of EffectChain objects per output mixer thread (PlaybackThread).
+// The EffecChain with session ID 0 contains global effects applied to the output mix.
+// Effects in this chain can be insert or auxiliary. Effects in other chains (attached to
+// tracks) are insert only. The EffectChain maintains an ordered list of effect module, the
+// order corresponding in the effect process order. When attached to a track (session ID != 0),
+// it also provide it's own input buffer used by the track as accumulation buffer.
+class EffectChain : public RefBase {
+public:
+ EffectChain(const wp<ThreadBase>& wThread, int sessionId);
+ EffectChain(ThreadBase *thread, int sessionId);
+ virtual ~EffectChain();
+
+ // special key used for an entry in mSuspendedEffects keyed vector
+ // corresponding to a suspend all request.
+ static const int kKeyForSuspendAll = 0;
+
+ // minimum duration during which we force calling effect process when last track on
+ // a session is stopped or removed to allow effect tail to be rendered
+ static const int kProcessTailDurationMs = 1000;
+
+ void process_l();
+
+ void lock() {
+ mLock.lock();
+ }
+ void unlock() {
+ mLock.unlock();
+ }
+
+ status_t addEffect_l(const sp<EffectModule>& handle);
+ size_t removeEffect_l(const sp<EffectModule>& handle);
+
+ int sessionId() const { return mSessionId; }
+ void setSessionId(int sessionId) { mSessionId = sessionId; }
+
+ sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
+ sp<EffectModule> getEffectFromId_l(int id);
+ sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type);
+ bool setVolume_l(uint32_t *left, uint32_t *right);
+ void setDevice_l(audio_devices_t device);
+ void setMode_l(audio_mode_t mode);
+ void setAudioSource_l(audio_source_t source);
+
+ void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
+ mInBuffer = buffer;
+ mOwnInBuffer = ownsBuffer;
+ }
+ int16_t *inBuffer() const {
+ return mInBuffer;
+ }
+ void setOutBuffer(int16_t *buffer) {
+ mOutBuffer = buffer;
+ }
+ int16_t *outBuffer() const {
+ return mOutBuffer;
+ }
+
+ void incTrackCnt() { android_atomic_inc(&mTrackCnt); }
+ void decTrackCnt() { android_atomic_dec(&mTrackCnt); }
+ int32_t trackCnt() const { return android_atomic_acquire_load(&mTrackCnt); }
+
+ void incActiveTrackCnt() { android_atomic_inc(&mActiveTrackCnt);
+ mTailBufferCount = mMaxTailBuffers; }
+ void decActiveTrackCnt() { android_atomic_dec(&mActiveTrackCnt); }
+ int32_t activeTrackCnt() const { return android_atomic_acquire_load(&mActiveTrackCnt); }
+
+ uint32_t strategy() const { return mStrategy; }
+ void setStrategy(uint32_t strategy)
+ { mStrategy = strategy; }
+
+ // suspend effect of the given type
+ void setEffectSuspended_l(const effect_uuid_t *type,
+ bool suspend);
+ // suspend all eligible effects
+ void setEffectSuspendedAll_l(bool suspend);
+ // check if effects should be suspend or restored when a given effect is enable or disabled
+ void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
+ bool enabled);
+
+ void clearInputBuffer();
+
+ void dump(int fd, const Vector<String16>& args);
+
+protected:
+ friend class AudioFlinger; // for mThread, mEffects
+ EffectChain(const EffectChain&);
+ EffectChain& operator =(const EffectChain&);
+
+ class SuspendedEffectDesc : public RefBase {
+ public:
+ SuspendedEffectDesc() : mRefCount(0) {}
+
+ int mRefCount;
+ effect_uuid_t mType;
+ wp<EffectModule> mEffect;
+ };
+
+ // get a list of effect modules to suspend when an effect of the type
+ // passed is enabled.
+ void getSuspendEligibleEffects(Vector< sp<EffectModule> > &effects);
+
+ // get an effect module if it is currently enable
+ sp<EffectModule> getEffectIfEnabled(const effect_uuid_t *type);
+ // true if the effect whose descriptor is passed can be suspended
+ // OEMs can modify the rules implemented in this method to exclude specific effect
+ // types or implementations from the suspend/restore mechanism.
+ bool isEffectEligibleForSuspend(const effect_descriptor_t& desc);
+
+ void clearInputBuffer_l(sp<ThreadBase> thread);
+
+ wp<ThreadBase> mThread; // parent mixer thread
+ Mutex mLock; // mutex protecting effect list
+ Vector< sp<EffectModule> > mEffects; // list of effect modules
+ int mSessionId; // audio session ID
+ int16_t *mInBuffer; // chain input buffer
+ int16_t *mOutBuffer; // chain output buffer
+
+ // 'volatile' here means these are accessed with atomic operations instead of mutex
+ volatile int32_t mActiveTrackCnt; // number of active tracks connected
+ volatile int32_t mTrackCnt; // number of tracks connected
+
+ int32_t mTailBufferCount; // current effect tail buffer count
+ int32_t mMaxTailBuffers; // maximum effect tail buffers
+ bool mOwnInBuffer; // true if the chain owns its input buffer
+ int mVolumeCtrlIdx; // index of insert effect having control over volume
+ uint32_t mLeftVolume; // previous volume on left channel
+ uint32_t mRightVolume; // previous volume on right channel
+ uint32_t mNewLeftVolume; // new volume on left channel
+ uint32_t mNewRightVolume; // new volume on right channel
+ uint32_t mStrategy; // strategy for this effect chain
+ // mSuspendedEffects lists all effects currently suspended in the chain.
+ // Use effect type UUID timelow field as key. There is no real risk of identical
+ // timeLow fields among effect type UUIDs.
+ // Updated by updateSuspendedSessions_l() only.
+ KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
+};
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
new file mode 100644
index 0000000..b898924
--- /dev/null
+++ b/services/audioflinger/PlaybackTracks.h
@@ -0,0 +1,285 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+// playback track
+class Track : public TrackBase, public VolumeProvider {
+public:
+ Track( PlaybackThread *thread,
+ const sp<Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId,
+ IAudioFlinger::track_flags_t flags);
+ virtual ~Track();
+
+ static void appendDumpHeader(String8& result);
+ void dump(char* buffer, size_t size);
+ virtual status_t start(AudioSystem::sync_event_t event =
+ AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
+ virtual void stop();
+ void pause();
+
+ void flush();
+ void destroy();
+ void mute(bool);
+ int name() const { return mName; }
+
+ audio_stream_type_t streamType() const {
+ return mStreamType;
+ }
+ status_t attachAuxEffect(int EffectId);
+ void setAuxBuffer(int EffectId, int32_t *buffer);
+ int32_t *auxBuffer() const { return mAuxBuffer; }
+ void setMainBuffer(int16_t *buffer) { mMainBuffer = buffer; }
+ int16_t *mainBuffer() const { return mMainBuffer; }
+ int auxEffectId() const { return mAuxEffectId; }
+
+// implement FastMixerState::VolumeProvider interface
+ virtual uint32_t getVolumeLR();
+
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+
+protected:
+ // for numerous
+ friend class PlaybackThread;
+ friend class MixerThread;
+ friend class DirectOutputThread;
+
+ Track(const Track&);
+ Track& operator = (const Track&);
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts = kInvalidPTS);
+ // releaseBuffer() not overridden
+
+ virtual size_t framesReady() const;
+
+ bool isMuted() const { return mMute; }
+ bool isPausing() const {
+ return mState == PAUSING;
+ }
+ bool isPaused() const {
+ return mState == PAUSED;
+ }
+ bool isResuming() const {
+ return mState == RESUMING;
+ }
+ bool isReady() const;
+ void setPaused() { mState = PAUSED; }
+ void reset();
+
+ bool isOutputTrack() const {
+ return (mStreamType == AUDIO_STREAM_CNT);
+ }
+
+ sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
+
+ // framesWritten is cumulative, never reset, and is shared all tracks
+ // audioHalFrames is derived from output latency
+ // FIXME parameters not needed, could get them from the thread
+ bool presentationComplete(size_t framesWritten, size_t audioHalFrames);
+
+public:
+ void triggerEvents(AudioSystem::sync_event_t type);
+ virtual bool isTimedTrack() const { return false; }
+ bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
+ virtual bool isOut() const;
+
+protected:
+
+ // written by Track::mute() called by binder thread(s), without a mutex or barrier.
+ // read by Track::isMuted() called by playback thread, also without a mutex or barrier.
+ // The lack of mutex or barrier is safe because the mute status is only used by itself.
+ bool mMute;
+
+ // FILLED state is used for suppressing volume ramp at begin of playing
+ enum {FS_INVALID, FS_FILLING, FS_FILLED, FS_ACTIVE};
+ mutable uint8_t mFillingUpStatus;
+ int8_t mRetryCount;
+ const sp<IMemory> mSharedBuffer;
+ bool mResetDone;
+ const audio_stream_type_t mStreamType;
+ int mName; // track name on the normal mixer,
+ // allocated statically at track creation time,
+ // and is even allocated (though unused) for fast tracks
+ // FIXME don't allocate track name for fast tracks
+ int16_t *mMainBuffer;
+ int32_t *mAuxBuffer;
+ int mAuxEffectId;
+ bool mHasVolumeController;
+ size_t mPresentationCompleteFrames; // number of frames written to the
+ // audio HAL when this track will be fully rendered
+ // zero means not monitoring
+private:
+ IAudioFlinger::track_flags_t mFlags;
+
+ // The following fields are only for fast tracks, and should be in a subclass
+ int mFastIndex; // index within FastMixerState::mFastTracks[];
+ // either mFastIndex == -1 if not isFastTrack()
+ // or 0 < mFastIndex < FastMixerState::kMaxFast because
+ // index 0 is reserved for normal mixer's submix;
+ // index is allocated statically at track creation time
+ // but the slot is only used if track is active
+ FastTrackUnderruns mObservedUnderruns; // Most recently observed value of
+ // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns
+ uint32_t mUnderrunCount; // Counter of total number of underruns, never reset
+ volatile float mCachedVolume; // combined master volume and stream type volume;
+ // 'volatile' means accessed without lock or
+ // barrier, but is read/written atomically
+}; // end of Track
+
+class TimedTrack : public Track {
+ public:
+ static sp<TimedTrack> create(PlaybackThread *thread,
+ const sp<Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId);
+ virtual ~TimedTrack();
+
+ class TimedBuffer {
+ public:
+ TimedBuffer();
+ TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+ const sp<IMemory>& buffer() const { return mBuffer; }
+ int64_t pts() const { return mPTS; }
+ uint32_t position() const { return mPosition; }
+ void setPosition(uint32_t pos) { mPosition = pos; }
+ private:
+ sp<IMemory> mBuffer;
+ int64_t mPTS;
+ uint32_t mPosition;
+ };
+
+ // Mixer facing methods.
+ virtual bool isTimedTrack() const { return true; }
+ virtual size_t framesReady() const;
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts);
+ virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+ // Client/App facing methods.
+ status_t allocateTimedBuffer(size_t size,
+ sp<IMemory>* buffer);
+ status_t queueTimedBuffer(const sp<IMemory>& buffer,
+ int64_t pts);
+ status_t setMediaTimeTransform(const LinearTransform& xform,
+ TimedAudioTrack::TargetTimeline target);
+
+ private:
+ TimedTrack(PlaybackThread *thread,
+ const sp<Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId);
+
+ void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
+ void timedYieldSilence_l(uint32_t numFrames,
+ AudioBufferProvider::Buffer* buffer);
+ void trimTimedBufferQueue_l();
+ void trimTimedBufferQueueHead_l(const char* logTag);
+ void updateFramesPendingAfterTrim_l(const TimedBuffer& buf,
+ const char* logTag);
+
+ uint64_t mLocalTimeFreq;
+ LinearTransform mLocalTimeToSampleTransform;
+ LinearTransform mMediaTimeToSampleTransform;
+ sp<MemoryDealer> mTimedMemoryDealer;
+
+ Vector<TimedBuffer> mTimedBufferQueue;
+ bool mQueueHeadInFlight;
+ bool mTrimQueueHeadOnRelease;
+ uint32_t mFramesPendingInQueue;
+
+ uint8_t* mTimedSilenceBuffer;
+ uint32_t mTimedSilenceBufferSize;
+ mutable Mutex mTimedBufferQueueLock;
+ bool mTimedAudioOutputOnTime;
+ CCHelper mCCHelper;
+
+ Mutex mMediaTimeTransformLock;
+ LinearTransform mMediaTimeTransform;
+ bool mMediaTimeTransformValid;
+ TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
+};
+
+
+// playback track, used by DuplicatingThread
+class OutputTrack : public Track {
+public:
+
+ class Buffer : public AudioBufferProvider::Buffer {
+ public:
+ int16_t *mBuffer;
+ };
+
+ OutputTrack(PlaybackThread *thread,
+ DuplicatingThread *sourceThread,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount);
+ virtual ~OutputTrack();
+
+ virtual status_t start(AudioSystem::sync_event_t event =
+ AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
+ virtual void stop();
+ bool write(int16_t* data, uint32_t frames);
+ bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
+ bool isActive() const { return mActive; }
+ const wp<ThreadBase>& thread() const { return mThread; }
+
+private:
+
+ enum {
+ NO_MORE_BUFFERS = 0x80000001, // same in AudioTrack.h, ok to be different value
+ };
+
+ status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
+ uint32_t waitTimeMs);
+ void clearBufferQueue();
+
+ // Maximum number of pending buffers allocated by OutputTrack::write()
+ static const uint8_t kMaxOverFlowBuffers = 10;
+
+ Vector < Buffer* > mBufferQueue;
+ AudioBufferProvider::Buffer mOutBuffer;
+ bool mActive;
+ DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
+ void* mBuffers; // starting address of buffers in plain memory
+}; // end of OutputTrack
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
new file mode 100644
index 0000000..fe681d7
--- /dev/null
+++ b/services/audioflinger/RecordTracks.h
@@ -0,0 +1,62 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+// record track
+class RecordTrack : public TrackBase {
+public:
+ RecordTrack(RecordThread *thread,
+ const sp<Client>& client,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ int sessionId);
+ virtual ~RecordTrack();
+
+ virtual status_t start(AudioSystem::sync_event_t event, int triggerSession);
+ virtual void stop();
+
+ void destroy();
+
+ // clear the buffer overflow flag
+ void clearOverflow() { mOverflow = false; }
+ // set the buffer overflow flag and return previous value
+ bool setOverflow() { bool tmp = mOverflow; mOverflow = true;
+ return tmp; }
+
+ static void appendDumpHeader(String8& result);
+ void dump(char* buffer, size_t size);
+
+ virtual bool isOut() const;
+
+private:
+ friend class AudioFlinger; // for mState
+
+ RecordTrack(const RecordTrack&);
+ RecordTrack& operator = (const RecordTrack&);
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts = kInvalidPTS);
+ // releaseBuffer() not overridden
+
+ bool mOverflow; // overflow on most recent attempt to fill client buffer
+};
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index eba190c..e33b3c6 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -17,6 +17,72 @@
#ifndef ANDROID_AUDIO_STATE_QUEUE_H
#define ANDROID_AUDIO_STATE_QUEUE_H
+// The state queue template class was originally driven by this use case / requirements:
+// There are two threads: a fast mixer, and a normal mixer, and they share state.
+// The interesting part of the shared state is a set of active fast tracks,
+// and the output HAL configuration (buffer size in frames, sample rate, etc.).
+// Fast mixer thread:
+// periodic with typical period < 10 ms
+// FIFO/RR scheduling policy and a low fixed priority
+// ok to block for bounded time using nanosleep() to achieve desired period
+// must not block on condition wait, mutex lock, atomic operation spin, I/O, etc.
+// under typical operations of mixing, writing, or adding/removing tracks
+// ok to block for unbounded time when the output HAL configuration changes,
+// and this may result in an audible artifact
+// needs read-only access to a recent stable state,
+// but not necessarily the most current one
+// Normal mixer thread:
+// periodic with typical period ~40 ms
+// SCHED_OTHER scheduling policy and nice priority == urgent audio
+// ok to block, but prefer to avoid as much as possible
+// needs read/write access to state
+// The normal mixer may need to temporarily suspend the fast mixer thread during mode changes.
+// It will do this using the state -- one of the fields tells the fast mixer to idle.
+
+// Additional requirements:
+// - observer must always be able to poll for and view the latest pushed state; it must never be
+// blocked from seeing that state
+// - observer does not need to see every state in sequence; it is OK for it to skip states
+// [see below for more on this]
+// - mutator must always be able to read/modify a state, it must never be blocked from reading or
+// modifying state
+// - reduce memcpy where possible
+// - work well if the observer runs more frequently than the mutator,
+// as is the case with fast mixer/normal mixer.
+// It is not a requirement to work well if the roles were reversed,
+// and the mutator were to run more frequently than the observer.
+// In this case, the mutator could get blocked waiting for a slot to fill up for
+// it to work with. This could be solved somewhat by increasing the depth of the queue, but it would
+// still limit the mutator to a finite number of changes before it would block. A future
+// possibility, not implemented here, would be to allow the mutator to safely overwrite an already
+// pushed state. This could be done by the mutator overwriting mNext, but then being prepared to
+// read an mAck which is actually for the earlier mNext (since there is a race).
+
+// Solution:
+// Let's call the fast mixer thread the "observer" and normal mixer thread the "mutator".
+// We assume there is only a single observer and a single mutator; this is critical.
+// Each state is of type <T>, and should contain only POD (Plain Old Data) and raw pointers, as
+// memcpy() may be used to copy state, and the destructors are run in unpredictable order.
+// The states in chronological order are: previous, current, next, and mutating:
+// previous read-only, observer can compare vs. current to see the subset that changed
+// current read-only, this is the primary state for observer
+// next read-only, when observer is ready to accept a new state it will shift it in:
+// previous = current
+// current = next
+// and the slot formerly used by previous is now available to the mutator.
+// mutating invisible to observer, read/write to mutator
+// Initialization is tricky, especially for the observer. If the observer starts execution
+// before the mutator, there are no previous, current, or next states. And even if the observer
+// starts execution after the mutator, there is a next state but no previous or current states.
+// To solve this, we'll have the observer idle until there is a next state,
+// and it will have to deal with the case where there is no previous state.
+// The states are stored in a shared FIFO queue represented using a circular array.
+// The observer polls for mutations, and receives a new state pointer after a
+// a mutation is pushed onto the queue. To the observer, the state pointers are
+// effectively in random order, that is the observer should not do address
+// arithmetic on the state pointers. However to the mutator, the state pointers
+// are in a definite circular order.
+
namespace android {
#ifdef STATE_QUEUE_DUMP
@@ -108,7 +174,7 @@ public:
#endif
private:
- static const unsigned kN = 4; // values != 4 are not supported by this code
+ static const unsigned kN = 4; // values < 4 are not supported by this code
T mStates[kN]; // written by mutator, read by observer
// "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
new file mode 100644
index 0000000..1ceb850
--- /dev/null
+++ b/services/audioflinger/Threads.cpp
@@ -0,0 +1,4426 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+
+#include <math.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <cutils/properties.h>
+#include <cutils/compiler.h>
+#include <utils/Log.h>
+
+#include <private/media/AudioTrackShared.h>
+#include <hardware/audio.h>
+#include <audio_effects/effect_ns.h>
+#include <audio_effects/effect_aec.h>
+#include <audio_utils/primitives.h>
+
+// NBAIO implementations
+#include <media/nbaio/AudioStreamOutSink.h>
+#include <media/nbaio/MonoPipe.h>
+#include <media/nbaio/MonoPipeReader.h>
+#include <media/nbaio/Pipe.h>
+#include <media/nbaio/PipeReader.h>
+#include <media/nbaio/SourceAudioBufferProvider.h>
+
+#include <powermanager/PowerManager.h>
+
+#include <common_time/cc_helper.h>
+#include <common_time/local_clock.h>
+
+#include "AudioFlinger.h"
+#include "AudioMixer.h"
+#include "FastMixer.h"
+#include "ServiceUtilities.h"
+#include "SchedulingPolicyService.h"
+
+#undef ADD_BATTERY_DATA
+
+#ifdef ADD_BATTERY_DATA
+#include <media/IMediaPlayerService.h>
+#include <media/IMediaDeathNotifier.h>
+#endif
+
+// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds
+#ifdef DEBUG_CPU_USAGE
+#include <cpustats/CentralTendencyStatistics.h>
+#include <cpustats/ThreadCpuUsage.h>
+#endif
+
+// ----------------------------------------------------------------------------
+
+// Note: the following macro is used for extremely verbose logging message. In
+// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
+// 0; but one side effect of this is to turn all LOGV's as well. Some messages
+// are so verbose that we want to suppress them even when we have ALOG_ASSERT
+// turned on. Do not uncomment the #def below unless you really know what you
+// are doing and want to see all of the extremely verbose messages.
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+namespace android {
+
+// retry counts for buffer fill timeout
+// 50 * ~20msecs = 1 second
+static const int8_t kMaxTrackRetries = 50;
+static const int8_t kMaxTrackStartupRetries = 50;
+// allow less retry attempts on direct output thread.
+// direct outputs can be a scarce resource in audio hardware and should
+// be released as quickly as possible.
+static const int8_t kMaxTrackRetriesDirect = 2;
+
+// don't warn about blocked writes or record buffer overflows more often than this
+static const nsecs_t kWarningThrottleNs = seconds(5);
+
+// RecordThread loop sleep time upon application overrun or audio HAL read error
+static const int kRecordThreadSleepUs = 5000;
+
+// maximum time to wait for setParameters to complete
+static const nsecs_t kSetParametersTimeoutNs = seconds(2);
+
+// minimum sleep time for the mixer thread loop when tracks are active but in underrun
+static const uint32_t kMinThreadSleepTimeUs = 5000;
+// maximum divider applied to the active sleep time in the mixer thread loop
+static const uint32_t kMaxThreadSleepTimeShift = 2;
+
+// minimum normal mix buffer size, expressed in milliseconds rather than frames
+static const uint32_t kMinNormalMixBufferSizeMs = 20;
+// maximum normal mix buffer size
+static const uint32_t kMaxNormalMixBufferSizeMs = 24;
+
+// Whether to use fast mixer
+static const enum {
+ FastMixer_Never, // never initialize or use: for debugging only
+ FastMixer_Always, // always initialize and use, even if not needed: for debugging only
+ // normal mixer multiplier is 1
+ FastMixer_Static, // initialize if needed, then use all the time if initialized,
+ // multiplier is calculated based on min & max normal mixer buffer size
+ FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load,
+ // multiplier is calculated based on min & max normal mixer buffer size
+ // FIXME for FastMixer_Dynamic:
+ // Supporting this option will require fixing HALs that can't handle large writes.
+ // For example, one HAL implementation returns an error from a large write,
+ // and another HAL implementation corrupts memory, possibly in the sample rate converter.
+ // We could either fix the HAL implementations, or provide a wrapper that breaks
+ // up large writes into smaller ones, and the wrapper would need to deal with scheduler.
+} kUseFastMixer = FastMixer_Static;
+
+// Priorities for requestPriority
+static const int kPriorityAudioApp = 2;
+static const int kPriorityFastMixer = 3;
+
+// IAudioFlinger::createTrack() reports back to client the total size of shared memory area
+// for the track. The client then sub-divides this into smaller buffers for its use.
+// Currently the client uses double-buffering by default, but doesn't tell us about that.
+// So for now we just assume that client is double-buffered.
+// FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or
+// N-buffering, so AudioFlinger could allocate the right amount of memory.
+// See the client's minBufCount and mNotificationFramesAct calculations for details.
+static const int kFastTrackMultiplier = 2;
+
+// ----------------------------------------------------------------------------
+
+#ifdef ADD_BATTERY_DATA
+// To collect the amplifier usage
+static void addBatteryData(uint32_t params) {
+ sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
+ if (service == NULL) {
+ // it already logged
+ return;
+ }
+
+ service->addBatteryData(params);
+}
+#endif
+
+
+// ----------------------------------------------------------------------------
+// CPU Stats
+// ----------------------------------------------------------------------------
+
+class CpuStats {
+public:
+ CpuStats();
+ void sample(const String8 &title);
+#ifdef DEBUG_CPU_USAGE
+private:
+ ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
+ CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
+
+ CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
+
+ int mCpuNum; // thread's current CPU number
+ int mCpukHz; // frequency of thread's current CPU in kHz
+#endif
+};
+
+CpuStats::CpuStats()
+#ifdef DEBUG_CPU_USAGE
+ : mCpuNum(-1), mCpukHz(-1)
+#endif
+{
+}
+
+void CpuStats::sample(const String8 &title) {
+#ifdef DEBUG_CPU_USAGE
+ // get current thread's delta CPU time in wall clock ns
+ double wcNs;
+ bool valid = mCpuUsage.sampleAndEnable(wcNs);
+
+ // record sample for wall clock statistics
+ if (valid) {
+ mWcStats.sample(wcNs);
+ }
+
+ // get the current CPU number
+ int cpuNum = sched_getcpu();
+
+ // get the current CPU frequency in kHz
+ int cpukHz = mCpuUsage.getCpukHz(cpuNum);
+
+ // check if either CPU number or frequency changed
+ if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
+ mCpuNum = cpuNum;
+ mCpukHz = cpukHz;
+ // ignore sample for purposes of cycles
+ valid = false;
+ }
+
+ // if no change in CPU number or frequency, then record sample for cycle statistics
+ if (valid && mCpukHz > 0) {
+ double cycles = wcNs * cpukHz * 0.000001;
+ mHzStats.sample(cycles);
+ }
+
+ unsigned n = mWcStats.n();
+ // mCpuUsage.elapsed() is expensive, so don't call it every loop
+ if ((n & 127) == 1) {
+ long long elapsed = mCpuUsage.elapsed();
+ if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
+ double perLoop = elapsed / (double) n;
+ double perLoop100 = perLoop * 0.01;
+ double perLoop1k = perLoop * 0.001;
+ double mean = mWcStats.mean();
+ double stddev = mWcStats.stddev();
+ double minimum = mWcStats.minimum();
+ double maximum = mWcStats.maximum();
+ double meanCycles = mHzStats.mean();
+ double stddevCycles = mHzStats.stddev();
+ double minCycles = mHzStats.minimum();
+ double maxCycles = mHzStats.maximum();
+ mCpuUsage.resetElapsed();
+ mWcStats.reset();
+ mHzStats.reset();
+ ALOGD("CPU usage for %s over past %.1f secs\n"
+ " (%u mixer loops at %.1f mean ms per loop):\n"
+ " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
+ " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
+ " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
+ title.string(),
+ elapsed * .000000001, n, perLoop * .000001,
+ mean * .001,
+ stddev * .001,
+ minimum * .001,
+ maximum * .001,
+ mean / perLoop100,
+ stddev / perLoop100,
+ minimum / perLoop100,
+ maximum / perLoop100,
+ meanCycles / perLoop1k,
+ stddevCycles / perLoop1k,
+ minCycles / perLoop1k,
+ maxCycles / perLoop1k);
+
+ }
+ }
+#endif
+};
+
+// ----------------------------------------------------------------------------
+// ThreadBase
+// ----------------------------------------------------------------------------
+
+AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
+ : Thread(false /*canCallJava*/),
+ mType(type),
+ mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0),
+ // mChannelMask
+ mChannelCount(0),
+ mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
+ mParamStatus(NO_ERROR),
+ mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
+ mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
+ // mName will be set by concrete (non-virtual) subclass
+ mDeathRecipient(new PMDeathRecipient(this))
+{
+}
+
+AudioFlinger::ThreadBase::~ThreadBase()
+{
+ mParamCond.broadcast();
+ // do not lock the mutex in destructor
+ releaseWakeLock_l();
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = mPowerManager->asBinder();
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+}
+
+void AudioFlinger::ThreadBase::exit()
+{
+ ALOGV("ThreadBase::exit");
+ // do any cleanup required for exit to succeed
+ preExit();
+ {
+ // This lock prevents the following race in thread (uniprocessor for illustration):
+ // if (!exitPending()) {
+ // // context switch from here to exit()
+ // // exit() calls requestExit(), what exitPending() observes
+ // // exit() calls signal(), which is dropped since no waiters
+ // // context switch back from exit() to here
+ // mWaitWorkCV.wait(...);
+ // // now thread is hung
+ // }
+ AutoMutex lock(mLock);
+ requestExit();
+ mWaitWorkCV.broadcast();
+ }
+ // When Thread::requestExitAndWait is made virtual and this method is renamed to
+ // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
+ requestExitAndWait();
+}
+
+status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
+{
+ status_t status;
+
+ ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
+ Mutex::Autolock _l(mLock);
+
+ mNewParameters.add(keyValuePairs);
+ mWaitWorkCV.signal();
+ // wait condition with timeout in case the thread loop has exited
+ // before the request could be processed
+ if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) {
+ status = mParamStatus;
+ mWaitWorkCV.signal();
+ } else {
+ status = TIMED_OUT;
+ }
+ return status;
+}
+
+void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
+{
+ Mutex::Autolock _l(mLock);
+ sendIoConfigEvent_l(event, param);
+}
+
+// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param)
+{
+ IoConfigEvent *ioEvent = new IoConfigEvent(event, param);
+ mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent));
+ ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event,
+ param);
+ mWaitWorkCV.signal();
+}
+
+// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
+void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
+{
+ PrioConfigEvent *prioEvent = new PrioConfigEvent(pid, tid, prio);
+ mConfigEvents.add(static_cast<ConfigEvent *>(prioEvent));
+ ALOGV("sendPrioConfigEvent_l() num events %d pid %d, tid %d prio %d",
+ mConfigEvents.size(), pid, tid, prio);
+ mWaitWorkCV.signal();
+}
+
+void AudioFlinger::ThreadBase::processConfigEvents()
+{
+ mLock.lock();
+ while (!mConfigEvents.isEmpty()) {
+ ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
+ ConfigEvent *event = mConfigEvents[0];
+ mConfigEvents.removeAt(0);
+ // release mLock before locking AudioFlinger mLock: lock order is always
+ // AudioFlinger then ThreadBase to avoid cross deadlock
+ mLock.unlock();
+ switch(event->type()) {
+ case CFG_EVENT_PRIO: {
+ PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
+ int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio());
+ if (err != 0) {
+ ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; "
+ "error %d",
+ prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
+ }
+ } break;
+ case CFG_EVENT_IO: {
+ IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
+ mAudioFlinger->mLock.lock();
+ audioConfigChanged_l(ioEvent->event(), ioEvent->param());
+ mAudioFlinger->mLock.unlock();
+ } break;
+ default:
+ ALOGE("processConfigEvents() unknown event type %d", event->type());
+ break;
+ }
+ delete event;
+ mLock.lock();
+ }
+ mLock.unlock();
+}
+
+void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ bool locked = AudioFlinger::dumpTryLock(mLock);
+ if (!locked) {
+ snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this);
+ write(fd, buffer, strlen(buffer));
+ }
+
+ snprintf(buffer, SIZE, "io handle: %d\n", mId);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "TID: %d\n", getTid());
+ result.append(buffer);
+ snprintf(buffer, SIZE, "standby: %d\n", mStandby);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Sample rate: %u\n", mSampleRate);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Channel Count: %d\n", mChannelCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Format: %d\n", mFormat);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Frame size: %u\n", mFrameSize);
+ result.append(buffer);
+
+ snprintf(buffer, SIZE, "\nPending setParameters commands: \n");
+ result.append(buffer);
+ result.append(" Index Command");
+ for (size_t i = 0; i < mNewParameters.size(); ++i) {
+ snprintf(buffer, SIZE, "\n %02d ", i);
+ result.append(buffer);
+ result.append(mNewParameters[i]);
+ }
+
+ snprintf(buffer, SIZE, "\n\nPending config events: \n");
+ result.append(buffer);
+ for (size_t i = 0; i < mConfigEvents.size(); i++) {
+ mConfigEvents[i]->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ result.append("\n");
+
+ write(fd, result.string(), result.size());
+
+ if (locked) {
+ mLock.unlock();
+ }
+}
+
+void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size());
+ write(fd, buffer, strlen(buffer));
+
+ for (size_t i = 0; i < mEffectChains.size(); ++i) {
+ sp<EffectChain> chain = mEffectChains[i];
+ if (chain != 0) {
+ chain->dump(fd, args);
+ }
+ }
+}
+
+void AudioFlinger::ThreadBase::acquireWakeLock()
+{
+ Mutex::Autolock _l(mLock);
+ acquireWakeLock_l();
+}
+
+void AudioFlinger::ThreadBase::acquireWakeLock_l()
+{
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("Thread %s cannot connect to the power manager service", mName);
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = new BBinder();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16(mName));
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
+ ALOGV("acquireWakeLock_l() %s status %d", mName, status);
+ }
+}
+
+void AudioFlinger::ThreadBase::releaseWakeLock()
+{
+ Mutex::Autolock _l(mLock);
+ releaseWakeLock_l();
+}
+
+void AudioFlinger::ThreadBase::releaseWakeLock_l()
+{
+ if (mWakeLockToken != 0) {
+ ALOGV("releaseWakeLock_l() %s", mName);
+ if (mPowerManager != 0) {
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ }
+ mWakeLockToken.clear();
+ }
+}
+
+void AudioFlinger::ThreadBase::clearPowerManager()
+{
+ Mutex::Autolock _l(mLock);
+ releaseWakeLock_l();
+ mPowerManager.clear();
+}
+
+void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who)
+{
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ thread->clearPowerManager();
+ }
+ ALOGW("power manager service died !!!");
+}
+
+void AudioFlinger::ThreadBase::setEffectSuspended(
+ const effect_uuid_t *type, bool suspend, int sessionId)
+{
+ Mutex::Autolock _l(mLock);
+ setEffectSuspended_l(type, suspend, sessionId);
+}
+
+void AudioFlinger::ThreadBase::setEffectSuspended_l(
+ const effect_uuid_t *type, bool suspend, int sessionId)
+{
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ if (type != NULL) {
+ chain->setEffectSuspended_l(type, suspend);
+ } else {
+ chain->setEffectSuspendedAll_l(suspend);
+ }
+ }
+
+ updateSuspendedSessions_l(type, suspend, sessionId);
+}
+
+void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain)
+{
+ ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
+ if (index < 0) {
+ return;
+ }
+
+ const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects =
+ mSuspendedSessions.valueAt(index);
+
+ for (size_t i = 0; i < sessionEffects.size(); i++) {
+ sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
+ for (int j = 0; j < desc->mRefCount; j++) {
+ if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) {
+ chain->setEffectSuspendedAll_l(true);
+ } else {
+ ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
+ desc->mType.timeLow);
+ chain->setEffectSuspended_l(&desc->mType, true);
+ }
+ }
+ }
+}
+
+void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type,
+ bool suspend,
+ int sessionId)
+{
+ ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
+
+ KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
+
+ if (suspend) {
+ if (index >= 0) {
+ sessionEffects = mSuspendedSessions.valueAt(index);
+ } else {
+ mSuspendedSessions.add(sessionId, sessionEffects);
+ }
+ } else {
+ if (index < 0) {
+ return;
+ }
+ sessionEffects = mSuspendedSessions.valueAt(index);
+ }
+
+
+ int key = EffectChain::kKeyForSuspendAll;
+ if (type != NULL) {
+ key = type->timeLow;
+ }
+ index = sessionEffects.indexOfKey(key);
+
+ sp<SuspendedSessionDesc> desc;
+ if (suspend) {
+ if (index >= 0) {
+ desc = sessionEffects.valueAt(index);
+ } else {
+ desc = new SuspendedSessionDesc();
+ if (type != NULL) {
+ desc->mType = *type;
+ }
+ sessionEffects.add(key, desc);
+ ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key);
+ }
+ desc->mRefCount++;
+ } else {
+ if (index < 0) {
+ return;
+ }
+ desc = sessionEffects.valueAt(index);
+ if (--desc->mRefCount == 0) {
+ ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key);
+ sessionEffects.removeItemsAt(index);
+ if (sessionEffects.isEmpty()) {
+ ALOGV("updateSuspendedSessions_l() restore removing session %d",
+ sessionId);
+ mSuspendedSessions.removeItem(sessionId);
+ }
+ }
+ }
+ if (!sessionEffects.isEmpty()) {
+ mSuspendedSessions.replaceValueFor(sessionId, sessionEffects);
+ }
+}
+
+void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
+ bool enabled,
+ int sessionId)
+{
+ Mutex::Autolock _l(mLock);
+ checkSuspendOnEffectEnabled_l(effect, enabled, sessionId);
+}
+
+void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
+ bool enabled,
+ int sessionId)
+{
+ if (mType != RECORD) {
+ // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
+ // another session. This gives the priority to well behaved effect control panels
+ // and applications not using global effects.
+ // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect
+ // global effects
+ if ((sessionId != AUDIO_SESSION_OUTPUT_MIX) && (sessionId != AUDIO_SESSION_OUTPUT_STAGE)) {
+ setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX);
+ }
+ }
+
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ chain->checkSuspendOnEffectEnabled(effect, enabled);
+ }
+}
+
+// ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held
+sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
+ const sp<AudioFlinger::Client>& client,
+ const sp<IEffectClient>& effectClient,
+ int32_t priority,
+ int sessionId,
+ effect_descriptor_t *desc,
+ int *enabled,
+ status_t *status
+ )
+{
+ sp<EffectModule> effect;
+ sp<EffectHandle> handle;
+ status_t lStatus;
+ sp<EffectChain> chain;
+ bool chainCreated = false;
+ bool effectCreated = false;
+ bool effectRegistered = false;
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ ALOGW("createEffect_l() Audio driver not initialized.");
+ goto Exit;
+ }
+
+ // Do not allow effects with session ID 0 on direct output or duplicating threads
+ // TODO: add rule for hw accelerated effects on direct outputs with non PCM format
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX && mType != MIXER) {
+ ALOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
+ desc->name, sessionId);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ // Only Pre processor effects are allowed on input threads and only on input threads
+ if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
+ ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
+ desc->name, desc->flags, mType);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
+ ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+
+ // check for existing effect chain with the requested audio session
+ chain = getEffectChain_l(sessionId);
+ if (chain == 0) {
+ // create a new chain for this session
+ ALOGV("createEffect_l() new effect chain for session %d", sessionId);
+ chain = new EffectChain(this, sessionId);
+ addEffectChain_l(chain);
+ chain->setStrategy(getStrategyForSession_l(sessionId));
+ chainCreated = true;
+ } else {
+ effect = chain->getEffectFromDesc_l(desc);
+ }
+
+ ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
+
+ if (effect == 0) {
+ int id = mAudioFlinger->nextUniqueId();
+ // Check CPU and memory usage
+ lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
+ if (lStatus != NO_ERROR) {
+ goto Exit;
+ }
+ effectRegistered = true;
+ // create a new effect module if none present in the chain
+ effect = new EffectModule(this, chain, desc, id, sessionId);
+ lStatus = effect->status();
+ if (lStatus != NO_ERROR) {
+ goto Exit;
+ }
+ lStatus = chain->addEffect_l(effect);
+ if (lStatus != NO_ERROR) {
+ goto Exit;
+ }
+ effectCreated = true;
+
+ effect->setDevice(mOutDevice);
+ effect->setDevice(mInDevice);
+ effect->setMode(mAudioFlinger->getMode());
+ effect->setAudioSource(mAudioSource);
+ }
+ // create effect handle and connect it to effect module
+ handle = new EffectHandle(effect, client, effectClient, priority);
+ lStatus = effect->addHandle(handle.get());
+ if (enabled != NULL) {
+ *enabled = (int)effect->isEnabled();
+ }
+ }
+
+Exit:
+ if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
+ Mutex::Autolock _l(mLock);
+ if (effectCreated) {
+ chain->removeEffect_l(effect);
+ }
+ if (effectRegistered) {
+ AudioSystem::unregisterEffect(effect->id());
+ }
+ if (chainCreated) {
+ removeEffectChain_l(chain);
+ }
+ handle.clear();
+ }
+
+ if (status != NULL) {
+ *status = lStatus;
+ }
+ return handle;
+}
+
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId)
+{
+ Mutex::Autolock _l(mLock);
+ return getEffect_l(sessionId, effectId);
+}
+
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
+{
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
+}
+
+// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
+// PlaybackThread::mLock held
+status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
+{
+ // check for existing effect chain with the requested audio session
+ int sessionId = effect->sessionId();
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ bool chainCreated = false;
+
+ if (chain == 0) {
+ // create a new chain for this session
+ ALOGV("addEffect_l() new effect chain for session %d", sessionId);
+ chain = new EffectChain(this, sessionId);
+ addEffectChain_l(chain);
+ chain->setStrategy(getStrategyForSession_l(sessionId));
+ chainCreated = true;
+ }
+ ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get());
+
+ if (chain->getEffectFromId_l(effect->id()) != 0) {
+ ALOGW("addEffect_l() %p effect %s already present in chain %p",
+ this, effect->desc().name, chain.get());
+ return BAD_VALUE;
+ }
+
+ status_t status = chain->addEffect_l(effect);
+ if (status != NO_ERROR) {
+ if (chainCreated) {
+ removeEffectChain_l(chain);
+ }
+ return status;
+ }
+
+ effect->setDevice(mOutDevice);
+ effect->setDevice(mInDevice);
+ effect->setMode(mAudioFlinger->getMode());
+ effect->setAudioSource(mAudioSource);
+ return NO_ERROR;
+}
+
+void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) {
+
+ ALOGV("removeEffect_l() %p effect %p", this, effect.get());
+ effect_descriptor_t desc = effect->desc();
+ if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ detachAuxEffect_l(effect->id());
+ }
+
+ sp<EffectChain> chain = effect->chain().promote();
+ if (chain != 0) {
+ // remove effect chain if removing last effect
+ if (chain->removeEffect_l(effect) == 0) {
+ removeEffectChain_l(chain);
+ }
+ } else {
+ ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
+ }
+}
+
+void AudioFlinger::ThreadBase::lockEffectChains_l(
+ Vector< sp<AudioFlinger::EffectChain> >& effectChains)
+{
+ effectChains = mEffectChains;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->lock();
+ }
+}
+
+void AudioFlinger::ThreadBase::unlockEffectChains(
+ const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
+{
+ for (size_t i = 0; i < effectChains.size(); i++) {
+ effectChains[i]->unlock();
+ }
+}
+
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId)
+{
+ Mutex::Autolock _l(mLock);
+ return getEffectChain_l(sessionId);
+}
+
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const
+{
+ size_t size = mEffectChains.size();
+ for (size_t i = 0; i < size; i++) {
+ if (mEffectChains[i]->sessionId() == sessionId) {
+ return mEffectChains[i];
+ }
+ }
+ return 0;
+}
+
+void AudioFlinger::ThreadBase::setMode(audio_mode_t mode)
+{
+ Mutex::Autolock _l(mLock);
+ size_t size = mEffectChains.size();
+ for (size_t i = 0; i < size; i++) {
+ mEffectChains[i]->setMode_l(mode);
+ }
+}
+
+void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect,
+ EffectHandle *handle,
+ bool unpinIfLast) {
+
+ Mutex::Autolock _l(mLock);
+ ALOGV("disconnectEffect() %p effect %p", this, effect.get());
+ // delete the effect module if removing last handle on it
+ if (effect->removeHandle(handle) == 0) {
+ if (!effect->isPinned() || unpinIfLast) {
+ removeEffect_l(effect);
+ AudioSystem::unregisterEffect(effect->id());
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Playback
+// ----------------------------------------------------------------------------
+
+AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ audio_devices_t device,
+ type_t type)
+ : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
+ mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+ // mStreamTypes[] initialized in constructor body
+ mOutput(output),
+ mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
+ mMixerStatus(MIXER_IDLE),
+ mMixerStatusIgnoringFastTracks(MIXER_IDLE),
+ standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
+ mScreenState(AudioFlinger::mScreenState),
+ // index 0 is reserved for normal mixer's submix
+ mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
+{
+ snprintf(mName, kNameLength, "AudioOut_%X", id);
+
+ // Assumes constructor is called by AudioFlinger with it's mLock held, but
+ // it would be safer to explicitly pass initial masterVolume/masterMute as
+ // parameter.
+ //
+ // If the HAL we are using has support for master volume or master mute,
+ // then do not attenuate or mute during mixing (just leave the volume at 1.0
+ // and the mute set to false).
+ mMasterVolume = audioFlinger->masterVolume_l();
+ mMasterMute = audioFlinger->masterMute_l();
+ if (mOutput && mOutput->audioHwDev) {
+ if (mOutput->audioHwDev->canSetMasterVolume()) {
+ mMasterVolume = 1.0;
+ }
+
+ if (mOutput->audioHwDev->canSetMasterMute()) {
+ mMasterMute = false;
+ }
+ }
+
+ readOutputParameters();
+
+ // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
+ // There is no AUDIO_STREAM_MIN, and ++ operator does not compile
+ for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
+ stream = (audio_stream_type_t) (stream + 1)) {
+ mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
+ mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
+ }
+ // mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here,
+ // because mAudioFlinger doesn't have one to copy from
+}
+
+AudioFlinger::PlaybackThread::~PlaybackThread()
+{
+ delete [] mMixBuffer;
+}
+
+void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
+{
+ dumpInternals(fd, args);
+ dumpTracks(fd, args);
+ dumpEffectChains(fd, args);
+}
+
+void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ result.appendFormat("Output thread %p stream volumes in dB:\n ", this);
+ for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
+ const stream_type_t *st = &mStreamTypes[i];
+ if (i > 0) {
+ result.appendFormat(", ");
+ }
+ result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume));
+ if (st->mute) {
+ result.append("M");
+ }
+ }
+ result.append("\n");
+ write(fd, result.string(), result.length());
+ result.clear();
+
+ snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
+ result.append(buffer);
+ Track::appendDumpHeader(result);
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (track != 0) {
+ track->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ }
+
+ snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
+ result.append(buffer);
+ Track::appendDumpHeader(result);
+ for (size_t i = 0; i < mActiveTracks.size(); ++i) {
+ sp<Track> track = mActiveTracks[i].promote();
+ if (track != 0) {
+ track->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ }
+ write(fd, result.string(), result.size());
+
+ // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way.
+ FastTrackUnderruns underruns = getFastTrackUnderruns(0);
+ fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n",
+ underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
+}
+
+void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n",
+ ns2ms(systemTime() - mLastWriteTime));
+ result.append(buffer);
+ snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer);
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+ fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask);
+
+ dumpBase(fd, args);
+}
+
+// Thread virtuals
+status_t AudioFlinger::PlaybackThread::readyToRun()
+{
+ status_t status = initCheck();
+ if (status == NO_ERROR) {
+ ALOGI("AudioFlinger's thread %p ready to run", this);
+ } else {
+ ALOGE("No working audio driver found.");
+ }
+ return status;
+}
+
+void AudioFlinger::PlaybackThread::onFirstRef()
+{
+ run(mName, ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+// ThreadBase virtuals
+void AudioFlinger::PlaybackThread::preExit()
+{
+ ALOGV(" preExit()");
+ // FIXME this is using hard-coded strings but in the future, this functionality will be
+ // converted to use audio HAL extensions required to support tunneling
+ mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1");
+}
+
+// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
+sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId,
+ IAudioFlinger::track_flags_t *flags,
+ pid_t tid,
+ status_t *status)
+{
+ sp<Track> track;
+ status_t lStatus;
+
+ bool isTimed = (*flags & IAudioFlinger::TRACK_TIMED) != 0;
+
+ // client expresses a preference for FAST, but we get the final say
+ if (*flags & IAudioFlinger::TRACK_FAST) {
+ if (
+ // not timed
+ (!isTimed) &&
+ // either of these use cases:
+ (
+ // use case 1: shared buffer with any frame count
+ (
+ (sharedBuffer != 0)
+ ) ||
+ // use case 2: callback handler and frame count is default or at least as large as HAL
+ (
+ (tid != -1) &&
+ ((frameCount == 0) ||
+ (frameCount >= (mFrameCount * kFastTrackMultiplier)))
+ )
+ ) &&
+ // PCM data
+ audio_is_linear_pcm(format) &&
+ // mono or stereo
+ ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
+ (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
+#ifndef FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
+ // hardware sample rate
+ (sampleRate == mSampleRate) &&
+#endif
+ // normal mixer has an associated fast mixer
+ hasFastMixer() &&
+ // there are sufficient fast track slots available
+ (mFastTrackAvailMask != 0)
+ // FIXME test that MixerThread for this fast track has a capable output HAL
+ // FIXME add a permission test also?
+ ) {
+ // if frameCount not specified, then it defaults to fast mixer (HAL) frame count
+ if (frameCount == 0) {
+ frameCount = mFrameCount * kFastTrackMultiplier;
+ }
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+ frameCount, mFrameCount);
+ } else {
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
+ "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
+ "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
+ isTimed, sharedBuffer.get(), frameCount, mFrameCount, format,
+ audio_is_linear_pcm(format),
+ channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
+ *flags &= ~IAudioFlinger::TRACK_FAST;
+ // For compatibility with AudioTrack calculation, buffer depth is forced
+ // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
+ // This is probably too conservative, but legacy application code may depend on it.
+ // If you change this calculation, also review the start threshold which is related.
+ uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
+ uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
+ if (minBufCount < 2) {
+ minBufCount = 2;
+ }
+ size_t minFrameCount = mNormalFrameCount * minBufCount;
+ if (frameCount < minFrameCount) {
+ frameCount = minFrameCount;
+ }
+ }
+ }
+
+ if (mType == DIRECT) {
+ if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
+ if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
+ ALOGE("createTrack_l() Bad parameter: sampleRate %u format %d, channelMask 0x%08x "
+ "for output %p with format %d",
+ sampleRate, format, channelMask, mOutput, mFormat);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
+ } else {
+ // Resampler implementation limits input sampling rate to 2 x output sampling rate.
+ if (sampleRate > mSampleRate*2) {
+ ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ ALOGE("Audio driver not initialized.");
+ goto Exit;
+ }
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+
+ // all tracks in same audio session must share the same routing strategy otherwise
+ // conflicts will happen when tracks are moved from one output to another by audio policy
+ // manager
+ uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> t = mTracks[i];
+ if (t != 0 && !t->isOutputTrack()) {
+ uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
+ if (sessionId == t->sessionId() && strategy != actual) {
+ ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
+ strategy, actual);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
+ }
+
+ if (!isTimed) {
+ track = new Track(this, client, streamType, sampleRate, format,
+ channelMask, frameCount, sharedBuffer, sessionId, *flags);
+ } else {
+ track = TimedTrack::create(this, client, streamType, sampleRate, format,
+ channelMask, frameCount, sharedBuffer, sessionId);
+ }
+ if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
+ lStatus = NO_MEMORY;
+ goto Exit;
+ }
+ mTracks.add(track);
+
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
+ track->setMainBuffer(chain->inBuffer());
+ chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
+ chain->incTrackCnt();
+ }
+
+ if ((*flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) {
+ pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
+ // so ask activity manager to do this on our behalf
+ sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
+ }
+ }
+
+ lStatus = NO_ERROR;
+
+Exit:
+ if (status) {
+ *status = lStatus;
+ }
+ return track;
+}
+
+uint32_t AudioFlinger::PlaybackThread::correctLatency_l(uint32_t latency) const
+{
+ return latency;
+}
+
+uint32_t AudioFlinger::PlaybackThread::latency() const
+{
+ Mutex::Autolock _l(mLock);
+ return latency_l();
+}
+uint32_t AudioFlinger::PlaybackThread::latency_l() const
+{
+ if (initCheck() == NO_ERROR) {
+ return correctLatency_l(mOutput->stream->get_latency(mOutput->stream));
+ } else {
+ return 0;
+ }
+}
+
+void AudioFlinger::PlaybackThread::setMasterVolume(float value)
+{
+ Mutex::Autolock _l(mLock);
+ // Don't apply master volume in SW if our HAL can do it for us.
+ if (mOutput && mOutput->audioHwDev &&
+ mOutput->audioHwDev->canSetMasterVolume()) {
+ mMasterVolume = 1.0;
+ } else {
+ mMasterVolume = value;
+ }
+}
+
+void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
+{
+ Mutex::Autolock _l(mLock);
+ // Don't apply master mute in SW if our HAL can do it for us.
+ if (mOutput && mOutput->audioHwDev &&
+ mOutput->audioHwDev->canSetMasterMute()) {
+ mMasterMute = false;
+ } else {
+ mMasterMute = muted;
+ }
+}
+
+void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
+{
+ Mutex::Autolock _l(mLock);
+ mStreamTypes[stream].volume = value;
+}
+
+void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
+{
+ Mutex::Autolock _l(mLock);
+ mStreamTypes[stream].mute = muted;
+}
+
+float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
+{
+ Mutex::Autolock _l(mLock);
+ return mStreamTypes[stream].volume;
+}
+
+// addTrack_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
+{
+ status_t status = ALREADY_EXISTS;
+
+ // set retry count for buffer fill
+ track->mRetryCount = kMaxTrackStartupRetries;
+ if (mActiveTracks.indexOf(track) < 0) {
+ // the track is newly added, make sure it fills up all its
+ // buffers before playing. This is to ensure the client will
+ // effectively get the latency it requested.
+ track->mFillingUpStatus = Track::FS_FILLING;
+ track->mResetDone = false;
+ track->mPresentationCompleteFrames = 0;
+ mActiveTracks.add(track);
+ if (track->mainBuffer() != mMixBuffer) {
+ sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+ if (chain != 0) {
+ ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
+ track->sessionId());
+ chain->incActiveTrackCnt();
+ }
+ }
+
+ status = NO_ERROR;
+ }
+
+ ALOGV("mWaitWorkCV.broadcast");
+ mWaitWorkCV.broadcast();
+
+ return status;
+}
+
+// destroyTrack_l() must be called with ThreadBase::mLock held
+void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
+{
+ track->mState = TrackBase::TERMINATED;
+ // active tracks are removed by threadLoop()
+ if (mActiveTracks.indexOf(track) < 0) {
+ removeTrack_l(track);
+ }
+}
+
+void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
+{
+ track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+ mTracks.remove(track);
+ deleteTrackName_l(track->name());
+ // redundant as track is about to be destroyed, for dumpsys only
+ track->mName = -1;
+ if (track->isFastTrack()) {
+ int index = track->mFastIndex;
+ ALOG_ASSERT(0 < index && index < (int)FastMixerState::kMaxFastTracks);
+ ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
+ mFastTrackAvailMask |= 1 << index;
+ // redundant as track is about to be destroyed, for dumpsys only
+ track->mFastIndex = -1;
+ }
+ sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+ if (chain != 0) {
+ chain->decTrackCnt();
+ }
+}
+
+String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
+{
+ String8 out_s8 = String8("");
+ char *s;
+
+ Mutex::Autolock _l(mLock);
+ if (initCheck() != NO_ERROR) {
+ return out_s8;
+ }
+
+ s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
+ out_s8 = String8(s);
+ free(s);
+ return out_s8;
+}
+
+// audioConfigChanged_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) {
+ AudioSystem::OutputDescriptor desc;
+ void *param2 = NULL;
+
+ ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event,
+ param);
+
+ switch (event) {
+ case AudioSystem::OUTPUT_OPENED:
+ case AudioSystem::OUTPUT_CONFIG_CHANGED:
+ desc.channels = mChannelMask;
+ desc.samplingRate = mSampleRate;
+ desc.format = mFormat;
+ desc.frameCount = mNormalFrameCount; // FIXME see
+ // AudioFlinger::frameCount(audio_io_handle_t)
+ desc.latency = latency();
+ param2 = &desc;
+ break;
+
+ case AudioSystem::STREAM_CONFIG_CHANGED:
+ param2 = &param;
+ case AudioSystem::OUTPUT_CLOSED:
+ default:
+ break;
+ }
+ mAudioFlinger->audioConfigChanged_l(event, mId, param2);
+}
+
+void AudioFlinger::PlaybackThread::readOutputParameters()
+{
+ mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
+ mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
+ mChannelCount = (uint16_t)popcount(mChannelMask);
+ mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+ mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
+ mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
+ if (mFrameCount & 15) {
+ ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
+ mFrameCount);
+ }
+
+ // Calculate size of normal mix buffer relative to the HAL output buffer size
+ double multiplier = 1.0;
+ if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
+ kUseFastMixer == FastMixer_Dynamic)) {
+ size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000;
+ size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000;
+ // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
+ minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
+ maxNormalFrameCount = maxNormalFrameCount & ~15;
+ if (maxNormalFrameCount < minNormalFrameCount) {
+ maxNormalFrameCount = minNormalFrameCount;
+ }
+ multiplier = (double) minNormalFrameCount / (double) mFrameCount;
+ if (multiplier <= 1.0) {
+ multiplier = 1.0;
+ } else if (multiplier <= 2.0) {
+ if (2 * mFrameCount <= maxNormalFrameCount) {
+ multiplier = 2.0;
+ } else {
+ multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
+ }
+ } else {
+ // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL
+ // SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast
+ // track, but we sometimes have to do this to satisfy the maximum frame count
+ // constraint)
+ // FIXME this rounding up should not be done if no HAL SRC
+ uint32_t truncMult = (uint32_t) multiplier;
+ if ((truncMult & 1)) {
+ if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) {
+ ++truncMult;
+ }
+ }
+ multiplier = (double) truncMult;
+ }
+ }
+ mNormalFrameCount = multiplier * mFrameCount;
+ // round up to nearest 16 frames to satisfy AudioMixer
+ mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
+ ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
+ mNormalFrameCount);
+
+ delete[] mMixBuffer;
+ mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount];
+ memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+
+ // force reconfiguration of effect chains and engines to take new buffer size and audio
+ // parameters into account
+ // Note that mLock is not held when readOutputParameters() is called from the constructor
+ // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
+ // matter.
+ // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
+ Vector< sp<EffectChain> > effectChains = mEffectChains;
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this, false);
+ }
+}
+
+
+status_t AudioFlinger::PlaybackThread::getRenderPosition(size_t *halFrames, size_t *dspFrames)
+{
+ if (halFrames == NULL || dspFrames == NULL) {
+ return BAD_VALUE;
+ }
+ Mutex::Autolock _l(mLock);
+ if (initCheck() != NO_ERROR) {
+ return INVALID_OPERATION;
+ }
+ size_t framesWritten = mBytesWritten / mFrameSize;
+ *halFrames = framesWritten;
+
+ if (isSuspended()) {
+ // return an estimation of rendered frames when the output is suspended
+ size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
+ *dspFrames = framesWritten >= latencyFrames ? framesWritten - latencyFrames : 0;
+ return NO_ERROR;
+ } else {
+ return mOutput->stream->get_render_position(mOutput->stream, dspFrames);
+ }
+}
+
+uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const
+{
+ Mutex::Autolock _l(mLock);
+ uint32_t result = 0;
+ if (getEffectChain_l(sessionId) != 0) {
+ result = EFFECT_SESSION;
+ }
+
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (sessionId == track->sessionId() &&
+ !(track->mCblk->flags & CBLK_INVALID)) {
+ result |= TRACK_SESSION;
+ break;
+ }
+ }
+
+ return result;
+}
+
+uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
+{
+ // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
+ // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+ return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ }
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ sp<Track> track = mTracks[i];
+ if (sessionId == track->sessionId() &&
+ !(track->mCblk->flags & CBLK_INVALID)) {
+ return AudioSystem::getStrategyForStream(track->streamType());
+ }
+ }
+ return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+}
+
+
+AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
+{
+ Mutex::Autolock _l(mLock);
+ return mOutput;
+}
+
+AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
+{
+ Mutex::Autolock _l(mLock);
+ AudioStreamOut *output = mOutput;
+ mOutput = NULL;
+ // FIXME FastMixer might also have a raw ptr to mOutputSink;
+ // must push a NULL and wait for ack
+ mOutputSink.clear();
+ mPipeSink.clear();
+ mNormalSink.clear();
+ return output;
+}
+
+// this method must always be called either with ThreadBase mLock held or inside the thread loop
+audio_stream_t* AudioFlinger::PlaybackThread::stream() const
+{
+ if (mOutput == NULL) {
+ return NULL;
+ }
+ return &mOutput->stream->common;
+}
+
+uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
+{
+ return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000);
+}
+
+status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
+{
+ if (!isValidSyncEvent(event)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (event->triggerSession() == track->sessionId()) {
+ (void) track->setSyncEvent(event);
+ return NO_ERROR;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const
+{
+ return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_removeTracks(
+ const Vector< sp<Track> >& tracksToRemove)
+{
+ size_t count = tracksToRemove.size();
+ if (CC_UNLIKELY(count)) {
+ for (size_t i = 0 ; i < count ; i++) {
+ const sp<Track>& track = tracksToRemove.itemAt(i);
+ if ((track->sharedBuffer() != 0) &&
+ (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) {
+ AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+ }
+ }
+ }
+
+}
+
+void AudioFlinger::PlaybackThread::checkSilentMode_l()
+{
+ if (!mMasterMute) {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("ro.audio.silent", value, "0") > 0) {
+ char *endptr;
+ unsigned long ul = strtoul(value, &endptr, 0);
+ if (*endptr == '\0' && ul != 0) {
+ ALOGD("Silence is golden");
+ // The setprop command will not allow a property to be changed after
+ // the first time it is set, so we don't have to worry about un-muting.
+ setMasterMute_l(true);
+ }
+ }
+ }
+}
+
+// shared by MIXER and DIRECT, overridden by DUPLICATING
+void AudioFlinger::PlaybackThread::threadLoop_write()
+{
+ // FIXME rewrite to reduce number of system calls
+ mLastWriteTime = systemTime();
+ mInWrite = true;
+ int bytesWritten;
+
+ // If an NBAIO sink is present, use it to write the normal mixer's submix
+ if (mNormalSink != 0) {
+#define mBitShift 2 // FIXME
+ size_t count = mixBufferSize >> mBitShift;
+#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER)
+ Tracer::traceBegin(ATRACE_TAG, "write");
+#endif
+ // update the setpoint when AudioFlinger::mScreenState changes
+ uint32_t screenState = AudioFlinger::mScreenState;
+ if (screenState != mScreenState) {
+ mScreenState = screenState;
+ MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
+ if (pipe != NULL) {
+ pipe->setAvgFrames((mScreenState & 1) ?
+ (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
+ }
+ }
+ ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
+#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER)
+ Tracer::traceEnd(ATRACE_TAG);
+#endif
+ if (framesWritten > 0) {
+ bytesWritten = framesWritten << mBitShift;
+ } else {
+ bytesWritten = framesWritten;
+ }
+ // otherwise use the HAL / AudioStreamOut directly
+ } else {
+ // Direct output thread.
+ bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
+ }
+
+ if (bytesWritten > 0) {
+ mBytesWritten += mixBufferSize;
+ }
+ mNumWrites++;
+ mInWrite = false;
+}
+
+/*
+The derived values that are cached:
+ - mixBufferSize from frame count * frame size
+ - activeSleepTime from activeSleepTimeUs()
+ - idleSleepTime from idleSleepTimeUs()
+ - standbyDelay from mActiveSleepTimeUs (DIRECT only)
+ - maxPeriod from frame count and sample rate (MIXER only)
+
+The parameters that affect these derived values are:
+ - frame count
+ - frame size
+ - sample rate
+ - device type: A2DP or not
+ - device latency
+ - format: PCM or not
+ - active sleep time
+ - idle sleep time
+*/
+
+void AudioFlinger::PlaybackThread::cacheParameters_l()
+{
+ mixBufferSize = mNormalFrameCount * mFrameSize;
+ activeSleepTime = activeSleepTimeUs();
+ idleSleepTime = idleSleepTimeUs();
+}
+
+void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+{
+ ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+ this, streamType, mTracks.size());
+ Mutex::Autolock _l(mLock);
+
+ size_t size = mTracks.size();
+ for (size_t i = 0; i < size; i++) {
+ sp<Track> t = mTracks[i];
+ if (t->streamType() == streamType) {
+ android_atomic_or(CBLK_INVALID, &t->mCblk->flags);
+ t->mCblk->cv.signal();
+ }
+ }
+}
+
+status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
+{
+ int session = chain->sessionId();
+ int16_t *buffer = mMixBuffer;
+ bool ownsBuffer = false;
+
+ ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
+ if (session > 0) {
+ // Only one effect chain can be present in direct output thread and it uses
+ // the mix buffer as input
+ if (mType != DIRECT) {
+ size_t numSamples = mNormalFrameCount * mChannelCount;
+ buffer = new int16_t[numSamples];
+ memset(buffer, 0, numSamples * sizeof(int16_t));
+ ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session);
+ ownsBuffer = true;
+ }
+
+ // Attach all tracks with same session ID to this chain.
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (session == track->sessionId()) {
+ ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(),
+ buffer);
+ track->setMainBuffer(buffer);
+ chain->incTrackCnt();
+ }
+ }
+
+ // indicate all active tracks in the chain
+ for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
+ sp<Track> track = mActiveTracks[i].promote();
+ if (track == 0) {
+ continue;
+ }
+ if (session == track->sessionId()) {
+ ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
+ chain->incActiveTrackCnt();
+ }
+ }
+ }
+
+ chain->setInBuffer(buffer, ownsBuffer);
+ chain->setOutBuffer(mMixBuffer);
+ // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
+ // chains list in order to be processed last as it contains output stage effects
+ // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
+ // session AUDIO_SESSION_OUTPUT_STAGE to be processed
+ // after track specific effects and before output stage
+ // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
+ // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX
+ // Effect chain for other sessions are inserted at beginning of effect
+ // chains list to be processed before output mix effects. Relative order between other
+ // sessions is not important
+ size_t size = mEffectChains.size();
+ size_t i = 0;
+ for (i = 0; i < size; i++) {
+ if (mEffectChains[i]->sessionId() < session) {
+ break;
+ }
+ }
+ mEffectChains.insertAt(chain, i);
+ checkSuspendOnAddEffectChain_l(chain);
+
+ return NO_ERROR;
+}
+
+size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain)
+{
+ int session = chain->sessionId();
+
+ ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
+
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ if (chain == mEffectChains[i]) {
+ mEffectChains.removeAt(i);
+ // detach all active tracks from the chain
+ for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
+ sp<Track> track = mActiveTracks[i].promote();
+ if (track == 0) {
+ continue;
+ }
+ if (session == track->sessionId()) {
+ ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
+ chain.get(), session);
+ chain->decActiveTrackCnt();
+ }
+ }
+
+ // detach all tracks with same session ID from this chain
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (session == track->sessionId()) {
+ track->setMainBuffer(mMixBuffer);
+ chain->decTrackCnt();
+ }
+ }
+ break;
+ }
+ }
+ return mEffectChains.size();
+}
+
+status_t AudioFlinger::PlaybackThread::attachAuxEffect(
+ const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
+{
+ Mutex::Autolock _l(mLock);
+ return attachAuxEffect_l(track, EffectId);
+}
+
+status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(
+ const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
+{
+ status_t status = NO_ERROR;
+
+ if (EffectId == 0) {
+ track->setAuxBuffer(0, NULL);
+ } else {
+ // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
+ sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
+ if (effect != 0) {
+ if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
+ } else {
+ status = INVALID_OPERATION;
+ }
+ } else {
+ status = BAD_VALUE;
+ }
+ }
+ return status;
+}
+
+void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId)
+{
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (track->auxEffectId() == effectId) {
+ attachAuxEffect_l(track, 0);
+ }
+ }
+}
+
+bool AudioFlinger::PlaybackThread::threadLoop()
+{
+ Vector< sp<Track> > tracksToRemove;
+
+ standbyTime = systemTime();
+
+ // MIXER
+ nsecs_t lastWarning = 0;
+
+ // DUPLICATING
+ // FIXME could this be made local to while loop?
+ writeFrames = 0;
+
+ cacheParameters_l();
+ sleepTime = idleSleepTime;
+
+ if (mType == MIXER) {
+ sleepTimeShift = 0;
+ }
+
+ CpuStats cpuStats;
+ const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
+
+ acquireWakeLock();
+
+ while (!exitPending())
+ {
+ cpuStats.sample(myName);
+
+ Vector< sp<EffectChain> > effectChains;
+
+ processConfigEvents();
+
+ { // scope for mLock
+
+ Mutex::Autolock _l(mLock);
+
+ if (checkForNewParameters_l()) {
+ cacheParameters_l();
+ }
+
+ saveOutputTracks();
+
+ // put audio hardware into standby after short delay
+ if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
+ isSuspended())) {
+ if (!mStandby) {
+
+ threadLoop_standby();
+
+ mStandby = true;
+ }
+
+ if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
+ // we're about to wait, flush the binder command buffer
+ IPCThreadState::self()->flushCommands();
+
+ clearOutputTracks();
+
+ if (exitPending()) {
+ break;
+ }
+
+ releaseWakeLock_l();
+ // wait until we have something to do...
+ ALOGV("%s going to sleep", myName.string());
+ mWaitWorkCV.wait(mLock);
+ ALOGV("%s waking up", myName.string());
+ acquireWakeLock_l();
+
+ mMixerStatus = MIXER_IDLE;
+ mMixerStatusIgnoringFastTracks = MIXER_IDLE;
+ mBytesWritten = 0;
+
+ checkSilentMode_l();
+
+ standbyTime = systemTime() + standbyDelay;
+ sleepTime = idleSleepTime;
+ if (mType == MIXER) {
+ sleepTimeShift = 0;
+ }
+
+ continue;
+ }
+ }
+
+ // mMixerStatusIgnoringFastTracks is also updated internally
+ mMixerStatus = prepareTracks_l(&tracksToRemove);
+
+ // prevent any changes in effect chain list and in each effect chain
+ // during mixing and effect process as the audio buffers could be deleted
+ // or modified if an effect is created or deleted
+ lockEffectChains_l(effectChains);
+ }
+
+ if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) {
+ threadLoop_mix();
+ } else {
+ threadLoop_sleepTime();
+ }
+
+ if (isSuspended()) {
+ sleepTime = suspendSleepTimeUs();
+ mBytesWritten += mixBufferSize;
+ }
+
+ // only process effects if we're going to write
+ if (sleepTime == 0) {
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ effectChains[i]->process_l();
+ }
+ }
+
+ // enable changes in effect chain
+ unlockEffectChains(effectChains);
+
+ // sleepTime == 0 means we must write to audio hardware
+ if (sleepTime == 0) {
+
+ threadLoop_write();
+
+if (mType == MIXER) {
+ // write blocked detection
+ nsecs_t now = systemTime();
+ nsecs_t delta = now - mLastWriteTime;
+ if (!mStandby && delta > maxPeriod) {
+ mNumDelayedWrites++;
+ if ((now - lastWarning) > kWarningThrottleNs) {
+#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER)
+ ScopedTrace st(ATRACE_TAG, "underrun");
+#endif
+ ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
+ ns2ms(delta), mNumDelayedWrites, this);
+ lastWarning = now;
+ }
+ }
+}
+
+ mStandby = false;
+ } else {
+ usleep(sleepTime);
+ }
+
+ // Finally let go of removed track(s), without the lock held
+ // since we can't guarantee the destructors won't acquire that
+ // same lock. This will also mutate and push a new fast mixer state.
+ threadLoop_removeTracks(tracksToRemove);
+ tracksToRemove.clear();
+
+ // FIXME I don't understand the need for this here;
+ // it was in the original code but maybe the
+ // assignment in saveOutputTracks() makes this unnecessary?
+ clearOutputTracks();
+
+ // Effect chains will be actually deleted here if they were removed from
+ // mEffectChains list during mixing or effects processing
+ effectChains.clear();
+
+ // FIXME Note that the above .clear() is no longer necessary since effectChains
+ // is now local to this block, but will keep it for now (at least until merge done).
+ }
+
+ // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
+ if (mType == MIXER || mType == DIRECT) {
+ // put output stream into standby mode
+ if (!mStandby) {
+ mOutput->stream->common.standby(&mOutput->stream->common);
+ }
+ }
+
+ releaseWakeLock();
+
+ ALOGV("Thread %p type %d exiting", this, mType);
+ return false;
+}
+
+
+// ----------------------------------------------------------------------------
+
+AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+ audio_io_handle_t id, audio_devices_t device, type_t type)
+ : PlaybackThread(audioFlinger, output, id, device, type),
+ // mAudioMixer below
+ // mFastMixer below
+ mFastMixerFutex(0)
+ // mOutputSink below
+ // mPipeSink below
+ // mNormalSink below
+{
+ ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
+ ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%d, mFormat=%d, mFrameSize=%u, "
+ "mFrameCount=%d, mNormalFrameCount=%d",
+ mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
+ mNormalFrameCount);
+ mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
+
+ // FIXME - Current mixer implementation only supports stereo output
+ if (mChannelCount != FCC_2) {
+ ALOGE("Invalid audio hardware channel count %d", mChannelCount);
+ }
+
+ // create an NBAIO sink for the HAL output stream, and negotiate
+ mOutputSink = new AudioStreamOutSink(output->stream);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)};
+ ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+
+ // initialize fast mixer depending on configuration
+ bool initFastMixer;
+ switch (kUseFastMixer) {
+ case FastMixer_Never:
+ initFastMixer = false;
+ break;
+ case FastMixer_Always:
+ initFastMixer = true;
+ break;
+ case FastMixer_Static:
+ case FastMixer_Dynamic:
+ initFastMixer = mFrameCount < mNormalFrameCount;
+ break;
+ }
+ if (initFastMixer) {
+
+ // create a MonoPipe to connect our submix to FastMixer
+ NBAIO_Format format = mOutputSink->format();
+ // This pipe depth compensates for scheduling latency of the normal mixer thread.
+ // When it wakes up after a maximum latency, it runs a few cycles quickly before
+ // finally blocking. Note the pipe implementation rounds up the request to a power of 2.
+ MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
+ const NBAIO_Format offers[1] = {format};
+ size_t numCounterOffers = 0;
+ ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ monoPipe->setAvgFrames((mScreenState & 1) ?
+ (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
+ mPipeSink = monoPipe;
+
+#ifdef TEE_SINK_FRAMES
+ // create a Pipe to archive a copy of FastMixer's output for dumpsys
+ Pipe *teeSink = new Pipe(TEE_SINK_FRAMES, format);
+ numCounterOffers = 0;
+ index = teeSink->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ mTeeSink = teeSink;
+ PipeReader *teeSource = new PipeReader(*teeSink);
+ numCounterOffers = 0;
+ index = teeSource->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ mTeeSource = teeSource;
+#endif
+
+ // create fast mixer and configure it initially with just one fast track for our submix
+ mFastMixer = new FastMixer();
+ FastMixerStateQueue *sq = mFastMixer->sq();
+#ifdef STATE_QUEUE_DUMP
+ sq->setObserverDump(&mStateQueueObserverDump);
+ sq->setMutatorDump(&mStateQueueMutatorDump);
+#endif
+ FastMixerState *state = sq->begin();
+ FastTrack *fastTrack = &state->mFastTracks[0];
+ // wrap the source side of the MonoPipe to make it an AudioBufferProvider
+ fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
+ fastTrack->mVolumeProvider = NULL;
+ fastTrack->mGeneration++;
+ state->mFastTracksGen++;
+ state->mTrackMask = 1;
+ // fast mixer will use the HAL output sink
+ state->mOutputSink = mOutputSink.get();
+ state->mOutputSinkGen++;
+ state->mFrameCount = mFrameCount;
+ state->mCommand = FastMixerState::COLD_IDLE;
+ // already done in constructor initialization list
+ //mFastMixerFutex = 0;
+ state->mColdFutexAddr = &mFastMixerFutex;
+ state->mColdGen++;
+ state->mDumpState = &mFastMixerDumpState;
+ state->mTeeSink = mTeeSink.get();
+ sq->end();
+ sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+
+ // start the fast mixer
+ mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
+ pid_t tid = mFastMixer->getTid();
+ int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
+ if (err != 0) {
+ ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
+ kPriorityFastMixer, getpid_cached, tid, err);
+ }
+
+#ifdef AUDIO_WATCHDOG
+ // create and start the watchdog
+ mAudioWatchdog = new AudioWatchdog();
+ mAudioWatchdog->setDump(&mAudioWatchdogDump);
+ mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
+ tid = mAudioWatchdog->getTid();
+ err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
+ if (err != 0) {
+ ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
+ kPriorityFastMixer, getpid_cached, tid, err);
+ }
+#endif
+
+ } else {
+ mFastMixer = NULL;
+ }
+
+ switch (kUseFastMixer) {
+ case FastMixer_Never:
+ case FastMixer_Dynamic:
+ mNormalSink = mOutputSink;
+ break;
+ case FastMixer_Always:
+ mNormalSink = mPipeSink;
+ break;
+ case FastMixer_Static:
+ mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
+ break;
+ }
+}
+
+AudioFlinger::MixerThread::~MixerThread()
+{
+ if (mFastMixer != NULL) {
+ FastMixerStateQueue *sq = mFastMixer->sq();
+ FastMixerState *state = sq->begin();
+ if (state->mCommand == FastMixerState::COLD_IDLE) {
+ int32_t old = android_atomic_inc(&mFastMixerFutex);
+ if (old == -1) {
+ __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
+ }
+ }
+ state->mCommand = FastMixerState::EXIT;
+ sq->end();
+ sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+ mFastMixer->join();
+ // Though the fast mixer thread has exited, it's state queue is still valid.
+ // We'll use that extract the final state which contains one remaining fast track
+ // corresponding to our sub-mix.
+ state = sq->begin();
+ ALOG_ASSERT(state->mTrackMask == 1);
+ FastTrack *fastTrack = &state->mFastTracks[0];
+ ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
+ delete fastTrack->mBufferProvider;
+ sq->end(false /*didModify*/);
+ delete mFastMixer;
+#ifdef AUDIO_WATCHDOG
+ if (mAudioWatchdog != 0) {
+ mAudioWatchdog->requestExit();
+ mAudioWatchdog->requestExitAndWait();
+ mAudioWatchdog.clear();
+ }
+#endif
+ }
+ delete mAudioMixer;
+}
+
+
+uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t latency) const
+{
+ if (mFastMixer != NULL) {
+ MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
+ latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
+ }
+ return latency;
+}
+
+
+void AudioFlinger::MixerThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove)
+{
+ PlaybackThread::threadLoop_removeTracks(tracksToRemove);
+}
+
+void AudioFlinger::MixerThread::threadLoop_write()
+{
+ // FIXME we should only do one push per cycle; confirm this is true
+ // Start the fast mixer if it's not already running
+ if (mFastMixer != NULL) {
+ FastMixerStateQueue *sq = mFastMixer->sq();
+ FastMixerState *state = sq->begin();
+ if (state->mCommand != FastMixerState::MIX_WRITE &&
+ (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
+ if (state->mCommand == FastMixerState::COLD_IDLE) {
+ int32_t old = android_atomic_inc(&mFastMixerFutex);
+ if (old == -1) {
+ __futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
+ }
+#ifdef AUDIO_WATCHDOG
+ if (mAudioWatchdog != 0) {
+ mAudioWatchdog->resume();
+ }
+#endif
+ }
+ state->mCommand = FastMixerState::MIX_WRITE;
+ sq->end();
+ sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+ if (kUseFastMixer == FastMixer_Dynamic) {
+ mNormalSink = mPipeSink;
+ }
+ } else {
+ sq->end(false /*didModify*/);
+ }
+ }
+ PlaybackThread::threadLoop_write();
+}
+
+void AudioFlinger::MixerThread::threadLoop_standby()
+{
+ // Idle the fast mixer if it's currently running
+ if (mFastMixer != NULL) {
+ FastMixerStateQueue *sq = mFastMixer->sq();
+ FastMixerState *state = sq->begin();
+ if (!(state->mCommand & FastMixerState::IDLE)) {
+ state->mCommand = FastMixerState::COLD_IDLE;
+ state->mColdFutexAddr = &mFastMixerFutex;
+ state->mColdGen++;
+ mFastMixerFutex = 0;
+ sq->end();
+ // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
+ sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
+ if (kUseFastMixer == FastMixer_Dynamic) {
+ mNormalSink = mOutputSink;
+ }
+#ifdef AUDIO_WATCHDOG
+ if (mAudioWatchdog != 0) {
+ mAudioWatchdog->pause();
+ }
+#endif
+ } else {
+ sq->end(false /*didModify*/);
+ }
+ }
+ PlaybackThread::threadLoop_standby();
+}
+
+// shared by MIXER and DIRECT, overridden by DUPLICATING
+void AudioFlinger::PlaybackThread::threadLoop_standby()
+{
+ ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
+ mOutput->stream->common.standby(&mOutput->stream->common);
+}
+
+void AudioFlinger::MixerThread::threadLoop_mix()
+{
+ // obtain the presentation timestamp of the next output buffer
+ int64_t pts;
+ status_t status = INVALID_OPERATION;
+
+ if (mNormalSink != 0) {
+ status = mNormalSink->getNextWriteTimestamp(&pts);
+ } else {
+ status = mOutputSink->getNextWriteTimestamp(&pts);
+ }
+
+ if (status != NO_ERROR) {
+ pts = AudioBufferProvider::kInvalidPTS;
+ }
+
+ // mix buffers...
+ mAudioMixer->process(pts);
+ // increase sleep time progressively when application underrun condition clears.
+ // Only increase sleep time if the mixer is ready for two consecutive times to avoid
+ // that a steady state of alternating ready/not ready conditions keeps the sleep time
+ // such that we would underrun the audio HAL.
+ if ((sleepTime == 0) && (sleepTimeShift > 0)) {
+ sleepTimeShift--;
+ }
+ sleepTime = 0;
+ standbyTime = systemTime() + standbyDelay;
+ //TODO: delay standby when effects have a tail
+}
+
+void AudioFlinger::MixerThread::threadLoop_sleepTime()
+{
+ // If no tracks are ready, sleep once for the duration of an output
+ // buffer size, then write 0s to the output
+ if (sleepTime == 0) {
+ if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+ sleepTime = activeSleepTime >> sleepTimeShift;
+ if (sleepTime < kMinThreadSleepTimeUs) {
+ sleepTime = kMinThreadSleepTimeUs;
+ }
+ // reduce sleep time in case of consecutive application underruns to avoid
+ // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
+ // duration we would end up writing less data than needed by the audio HAL if
+ // the condition persists.
+ if (sleepTimeShift < kMaxThreadSleepTimeShift) {
+ sleepTimeShift++;
+ }
+ } else {
+ sleepTime = idleSleepTime;
+ }
+ } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
+ memset (mMixBuffer, 0, mixBufferSize);
+ sleepTime = 0;
+ ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
+ "anticipated start");
+ }
+ // TODO add standby time extension fct of effect tail
+}
+
+// prepareTracks_l() must be called with ThreadBase::mLock held
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
+ Vector< sp<Track> > *tracksToRemove)
+{
+
+ mixer_state mixerStatus = MIXER_IDLE;
+ // find out which tracks need to be processed
+ size_t count = mActiveTracks.size();
+ size_t mixedTracks = 0;
+ size_t tracksWithEffect = 0;
+ // counts only _active_ fast tracks
+ size_t fastTracks = 0;
+ uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
+
+ float masterVolume = mMasterVolume;
+ bool masterMute = mMasterMute;
+
+ if (masterMute) {
+ masterVolume = 0;
+ }
+ // Delegate master volume control to effect in output mix effect chain if needed
+ sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
+ if (chain != 0) {
+ uint32_t v = (uint32_t)(masterVolume * (1 << 24));
+ chain->setVolume_l(&v, &v);
+ masterVolume = (float)((v + (1 << 23)) >> 24);
+ chain.clear();
+ }
+
+ // prepare a new state to push
+ FastMixerStateQueue *sq = NULL;
+ FastMixerState *state = NULL;
+ bool didModify = false;
+ FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
+ if (mFastMixer != NULL) {
+ sq = mFastMixer->sq();
+ state = sq->begin();
+ }
+
+ for (size_t i=0 ; i<count ; i++) {
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t == 0) {
+ continue;
+ }
+
+ // this const just means the local variable doesn't change
+ Track* const track = t.get();
+
+ // process fast tracks
+ if (track->isFastTrack()) {
+
+ // It's theoretically possible (though unlikely) for a fast track to be created
+ // and then removed within the same normal mix cycle. This is not a problem, as
+ // the track never becomes active so it's fast mixer slot is never touched.
+ // The converse, of removing an (active) track and then creating a new track
+ // at the identical fast mixer slot within the same normal mix cycle,
+ // is impossible because the slot isn't marked available until the end of each cycle.
+ int j = track->mFastIndex;
+ ALOG_ASSERT(0 < j && j < (int)FastMixerState::kMaxFastTracks);
+ ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j)));
+ FastTrack *fastTrack = &state->mFastTracks[j];
+
+ // Determine whether the track is currently in underrun condition,
+ // and whether it had a recent underrun.
+ FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j];
+ FastTrackUnderruns underruns = ftDump->mUnderruns;
+ uint32_t recentFull = (underruns.mBitFields.mFull -
+ track->mObservedUnderruns.mBitFields.mFull) & UNDERRUN_MASK;
+ uint32_t recentPartial = (underruns.mBitFields.mPartial -
+ track->mObservedUnderruns.mBitFields.mPartial) & UNDERRUN_MASK;
+ uint32_t recentEmpty = (underruns.mBitFields.mEmpty -
+ track->mObservedUnderruns.mBitFields.mEmpty) & UNDERRUN_MASK;
+ uint32_t recentUnderruns = recentPartial + recentEmpty;
+ track->mObservedUnderruns = underruns;
+ // don't count underruns that occur while stopping or pausing
+ // or stopped which can occur when flush() is called while active
+ if (!(track->isStopping() || track->isPausing() || track->isStopped())) {
+ track->mUnderrunCount += recentUnderruns;
+ }
+
+ // This is similar to the state machine for normal tracks,
+ // with a few modifications for fast tracks.
+ bool isActive = true;
+ switch (track->mState) {
+ case TrackBase::STOPPING_1:
+ // track stays active in STOPPING_1 state until first underrun
+ if (recentUnderruns > 0) {
+ track->mState = TrackBase::STOPPING_2;
+ }
+ break;
+ case TrackBase::PAUSING:
+ // ramp down is not yet implemented
+ track->setPaused();
+ break;
+ case TrackBase::RESUMING:
+ // ramp up is not yet implemented
+ track->mState = TrackBase::ACTIVE;
+ break;
+ case TrackBase::ACTIVE:
+ if (recentFull > 0 || recentPartial > 0) {
+ // track has provided at least some frames recently: reset retry count
+ track->mRetryCount = kMaxTrackRetries;
+ }
+ if (recentUnderruns == 0) {
+ // no recent underruns: stay active
+ break;
+ }
+ // there has recently been an underrun of some kind
+ if (track->sharedBuffer() == 0) {
+ // were any of the recent underruns "empty" (no frames available)?
+ if (recentEmpty == 0) {
+ // no, then ignore the partial underruns as they are allowed indefinitely
+ break;
+ }
+ // there has recently been an "empty" underrun: decrement the retry counter
+ if (--(track->mRetryCount) > 0) {
+ break;
+ }
+ // indicate to client process that the track was disabled because of underrun;
+ // it will then automatically call start() when data is available
+ android_atomic_or(CBLK_DISABLED, &track->mCblk->flags);
+ // remove from active list, but state remains ACTIVE [confusing but true]
+ isActive = false;
+ break;
+ }
+ // fall through
+ case TrackBase::STOPPING_2:
+ case TrackBase::PAUSED:
+ case TrackBase::TERMINATED:
+ case TrackBase::STOPPED:
+ case TrackBase::FLUSHED: // flush() while active
+ // Check for presentation complete if track is inactive
+ // We have consumed all the buffers of this track.
+ // This would be incomplete if we auto-paused on underrun
+ {
+ size_t audioHALFrames =
+ (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+ size_t framesWritten = mBytesWritten / mFrameSize;
+ if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
+ // track stays in active list until presentation is complete
+ break;
+ }
+ }
+ if (track->isStopping_2()) {
+ track->mState = TrackBase::STOPPED;
+ }
+ if (track->isStopped()) {
+ // Can't reset directly, as fast mixer is still polling this track
+ // track->reset();
+ // So instead mark this track as needing to be reset after push with ack
+ resetMask |= 1 << i;
+ }
+ isActive = false;
+ break;
+ case TrackBase::IDLE:
+ default:
+ LOG_FATAL("unexpected track state %d", track->mState);
+ }
+
+ if (isActive) {
+ // was it previously inactive?
+ if (!(state->mTrackMask & (1 << j))) {
+ ExtendedAudioBufferProvider *eabp = track;
+ VolumeProvider *vp = track;
+ fastTrack->mBufferProvider = eabp;
+ fastTrack->mVolumeProvider = vp;
+ fastTrack->mSampleRate = track->mSampleRate;
+ fastTrack->mChannelMask = track->mChannelMask;
+ fastTrack->mGeneration++;
+ state->mTrackMask |= 1 << j;
+ didModify = true;
+ // no acknowledgement required for newly active tracks
+ }
+ // cache the combined master volume and stream type volume for fast mixer; this
+ // lacks any synchronization or barrier so VolumeProvider may read a stale value
+ track->mCachedVolume = track->isMuted() ?
+ 0 : masterVolume * mStreamTypes[track->streamType()].volume;
+ ++fastTracks;
+ } else {
+ // was it previously active?
+ if (state->mTrackMask & (1 << j)) {
+ fastTrack->mBufferProvider = NULL;
+ fastTrack->mGeneration++;
+ state->mTrackMask &= ~(1 << j);
+ didModify = true;
+ // If any fast tracks were removed, we must wait for acknowledgement
+ // because we're about to decrement the last sp<> on those tracks.
+ block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
+ } else {
+ LOG_FATAL("fast track %d should have been active", j);
+ }
+ tracksToRemove->add(track);
+ // Avoids a misleading display in dumpsys
+ track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL;
+ }
+ continue;
+ }
+
+ { // local variable scope to avoid goto warning
+
+ audio_track_cblk_t* cblk = track->cblk();
+
+ // The first time a track is added we wait
+ // for all its buffers to be filled before processing it
+ int name = track->name();
+ // make sure that we have enough frames to mix one full buffer.
+ // enforce this condition only once to enable draining the buffer in case the client
+ // app does not call stop() and relies on underrun to stop:
+ // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
+ // during last round
+ uint32_t minFrames = 1;
+ if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
+ (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
+ if (t->sampleRate() == mSampleRate) {
+ minFrames = mNormalFrameCount;
+ } else {
+ // +1 for rounding and +1 for additional sample needed for interpolation
+ minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
+ // add frames already consumed but not yet released by the resampler
+ // because cblk->framesReady() will include these frames
+ minFrames += mAudioMixer->getUnreleasedFrames(track->name());
+ // the minimum track buffer size is normally twice the number of frames necessary
+ // to fill one buffer and the resampler should not leave more than one buffer worth
+ // of unreleased frames after each pass, but just in case...
+ ALOG_ASSERT(minFrames <= cblk->frameCount);
+ }
+ }
+ if ((track->framesReady() >= minFrames) && track->isReady() &&
+ !track->isPaused() && !track->isTerminated())
+ {
+ ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
+ this);
+
+ mixedTracks++;
+
+ // track->mainBuffer() != mMixBuffer means there is an effect chain
+ // connected to the track
+ chain.clear();
+ if (track->mainBuffer() != mMixBuffer) {
+ chain = getEffectChain_l(track->sessionId());
+ // Delegate volume control to effect in track effect chain if needed
+ if (chain != 0) {
+ tracksWithEffect++;
+ } else {
+ ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on "
+ "session %d",
+ name, track->sessionId());
+ }
+ }
+
+
+ int param = AudioMixer::VOLUME;
+ if (track->mFillingUpStatus == Track::FS_FILLED) {
+ // no ramp for the first volume setting
+ track->mFillingUpStatus = Track::FS_ACTIVE;
+ if (track->mState == TrackBase::RESUMING) {
+ track->mState = TrackBase::ACTIVE;
+ param = AudioMixer::RAMP_VOLUME;
+ }
+ mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
+ } else if (cblk->server != 0) {
+ // If the track is stopped before the first frame was mixed,
+ // do not apply ramp
+ param = AudioMixer::RAMP_VOLUME;
+ }
+
+ // compute volume for this track
+ uint32_t vl, vr, va;
+ if (track->isMuted() || track->isPausing() ||
+ mStreamTypes[track->streamType()].mute) {
+ vl = vr = va = 0;
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ } else {
+
+ // read original volumes with volume control
+ float typeVolume = mStreamTypes[track->streamType()].volume;
+ float v = masterVolume * typeVolume;
+ uint32_t vlr = cblk->getVolumeLR();
+ vl = vlr & 0xFFFF;
+ vr = vlr >> 16;
+ // track volumes come from shared memory, so can't be trusted and must be clamped
+ if (vl > MAX_GAIN_INT) {
+ ALOGV("Track left volume out of range: %04X", vl);
+ vl = MAX_GAIN_INT;
+ }
+ if (vr > MAX_GAIN_INT) {
+ ALOGV("Track right volume out of range: %04X", vr);
+ vr = MAX_GAIN_INT;
+ }
+ // now apply the master volume and stream type volume
+ vl = (uint32_t)(v * vl) << 12;
+ vr = (uint32_t)(v * vr) << 12;
+ // assuming master volume and stream type volume each go up to 1.0,
+ // vl and vr are now in 8.24 format
+
+ uint16_t sendLevel = cblk->getSendLevel_U4_12();
+ // send level comes from shared memory and so may be corrupt
+ if (sendLevel > MAX_GAIN_INT) {
+ ALOGV("Track send level out of range: %04X", sendLevel);
+ sendLevel = MAX_GAIN_INT;
+ }
+ va = (uint32_t)(v * sendLevel);
+ }
+ // Delegate volume control to effect in track effect chain if needed
+ if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
+ // Do not ramp volume if volume is controlled by effect
+ param = AudioMixer::VOLUME;
+ track->mHasVolumeController = true;
+ } else {
+ // force no volume ramp when volume controller was just disabled or removed
+ // from effect chain to avoid volume spike
+ if (track->mHasVolumeController) {
+ param = AudioMixer::VOLUME;
+ }
+ track->mHasVolumeController = false;
+ }
+
+ // Convert volumes from 8.24 to 4.12 format
+ // This additional clamping is needed in case chain->setVolume_l() overshot
+ vl = (vl + (1 << 11)) >> 12;
+ if (vl > MAX_GAIN_INT) {
+ vl = MAX_GAIN_INT;
+ }
+ vr = (vr + (1 << 11)) >> 12;
+ if (vr > MAX_GAIN_INT) {
+ vr = MAX_GAIN_INT;
+ }
+
+ if (va > MAX_GAIN_INT) {
+ va = MAX_GAIN_INT; // va is uint32_t, so no need to check for -
+ }
+
+ // XXX: these things DON'T need to be done each time
+ mAudioMixer->setBufferProvider(name, track);
+ mAudioMixer->enable(name);
+
+ mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)vl);
+ mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)vr);
+ mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)va);
+ mAudioMixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::FORMAT, (void *)track->format());
+ mAudioMixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::CHANNEL_MASK, (void *)track->channelMask());
+ mAudioMixer->setParameter(
+ name,
+ AudioMixer::RESAMPLE,
+ AudioMixer::SAMPLE_RATE,
+ (void *)(cblk->sampleRate));
+ mAudioMixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
+ mAudioMixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
+
+ // reset retry count
+ track->mRetryCount = kMaxTrackRetries;
+
+ // If one track is ready, set the mixer ready if:
+ // - the mixer was not ready during previous round OR
+ // - no other track is not ready
+ if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
+ mixerStatus != MIXER_TRACKS_ENABLED) {
+ mixerStatus = MIXER_TRACKS_READY;
+ }
+ } else {
+ // clear effect chain input buffer if an active track underruns to avoid sending
+ // previous audio buffer again to effects
+ chain = getEffectChain_l(track->sessionId());
+ if (chain != 0) {
+ chain->clearInputBuffer();
+ }
+
+ ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user,
+ cblk->server, this);
+ if ((track->sharedBuffer() != 0) || track->isTerminated() ||
+ track->isStopped() || track->isPaused()) {
+ // We have consumed all the buffers of this track.
+ // Remove it from the list of active tracks.
+ // TODO: use actual buffer filling status instead of latency when available from
+ // audio HAL
+ size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
+ size_t framesWritten = mBytesWritten / mFrameSize;
+ if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
+ if (track->isStopped()) {
+ track->reset();
+ }
+ tracksToRemove->add(track);
+ }
+ } else {
+ track->mUnderrunCount++;
+ // No buffers for this track. Give it a few chances to
+ // fill a buffer, then remove it from active list.
+ if (--(track->mRetryCount) <= 0) {
+ ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this);
+ tracksToRemove->add(track);
+ // indicate to client process that the track was disabled because of underrun;
+ // it will then automatically call start() when data is available
+ android_atomic_or(CBLK_DISABLED, &cblk->flags);
+ // If one track is not ready, mark the mixer also not ready if:
+ // - the mixer was ready during previous round OR
+ // - no other track is ready
+ } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
+ mixerStatus != MIXER_TRACKS_READY) {
+ mixerStatus = MIXER_TRACKS_ENABLED;
+ }
+ }
+ mAudioMixer->disable(name);
+ }
+
+ } // local variable scope to avoid goto warning
+track_is_ready: ;
+
+ }
+
+ // Push the new FastMixer state if necessary
+ bool pauseAudioWatchdog = false;
+ if (didModify) {
+ state->mFastTracksGen++;
+ // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
+ if (kUseFastMixer == FastMixer_Dynamic &&
+ state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
+ state->mCommand = FastMixerState::COLD_IDLE;
+ state->mColdFutexAddr = &mFastMixerFutex;
+ state->mColdGen++;
+ mFastMixerFutex = 0;
+ if (kUseFastMixer == FastMixer_Dynamic) {
+ mNormalSink = mOutputSink;
+ }
+ // If we go into cold idle, need to wait for acknowledgement
+ // so that fast mixer stops doing I/O.
+ block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
+ pauseAudioWatchdog = true;
+ }
+ sq->end();
+ }
+ if (sq != NULL) {
+ sq->end(didModify);
+ sq->push(block);
+ }
+#ifdef AUDIO_WATCHDOG
+ if (pauseAudioWatchdog && mAudioWatchdog != 0) {
+ mAudioWatchdog->pause();
+ }
+#endif
+
+ // Now perform the deferred reset on fast tracks that have stopped
+ while (resetMask != 0) {
+ size_t i = __builtin_ctz(resetMask);
+ ALOG_ASSERT(i < count);
+ resetMask &= ~(1 << i);
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t == 0) {
+ continue;
+ }
+ Track* track = t.get();
+ ALOG_ASSERT(track->isFastTrack() && track->isStopped());
+ track->reset();
+ }
+
+ // remove all the tracks that need to be...
+ count = tracksToRemove->size();
+ if (CC_UNLIKELY(count)) {
+ for (size_t i=0 ; i<count ; i++) {
+ const sp<Track>& track = tracksToRemove->itemAt(i);
+ mActiveTracks.remove(track);
+ if (track->mainBuffer() != mMixBuffer) {
+ chain = getEffectChain_l(track->sessionId());
+ if (chain != 0) {
+ ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
+ track->sessionId());
+ chain->decActiveTrackCnt();
+ }
+ }
+ if (track->isTerminated()) {
+ removeTrack_l(track);
+ }
+ }
+ }
+
+ // mix buffer must be cleared if all tracks are connected to an
+ // effect chain as in this case the mixer will not write to
+ // mix buffer and track effects will accumulate into it
+ if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+ (mixedTracks == 0 && fastTracks > 0)) {
+ // FIXME as a performance optimization, should remember previous zero status
+ memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+ }
+
+ // if any fast tracks, then status is ready
+ mMixerStatusIgnoringFastTracks = mixerStatus;
+ if (fastTracks > 0) {
+ mixerStatus = MIXER_TRACKS_READY;
+ }
+ return mixerStatus;
+}
+
+// getTrackName_l() must be called with ThreadBase::mLock held
+int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask, int sessionId)
+{
+ return mAudioMixer->getTrackName(channelMask, sessionId);
+}
+
+// deleteTrackName_l() must be called with ThreadBase::mLock held
+void AudioFlinger::MixerThread::deleteTrackName_l(int name)
+{
+ ALOGV("remove track (%d) and delete from mixer", name);
+ mAudioMixer->deleteTrackName(name);
+}
+
+// checkForNewParameters_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::MixerThread::checkForNewParameters_l()
+{
+ // if !&IDLE, holds the FastMixer state to restore after new parameters processed
+ FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
+ bool reconfig = false;
+
+ while (!mNewParameters.isEmpty()) {
+
+ if (mFastMixer != NULL) {
+ FastMixerStateQueue *sq = mFastMixer->sq();
+ FastMixerState *state = sq->begin();
+ if (!(state->mCommand & FastMixerState::IDLE)) {
+ previousCommand = state->mCommand;
+ state->mCommand = FastMixerState::HOT_IDLE;
+ sq->end();
+ sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
+ } else {
+ sq->end(false /*didModify*/);
+ }
+ }
+
+ status_t status = NO_ERROR;
+ String8 keyValuePair = mNewParameters[0];
+ AudioParameter param = AudioParameter(keyValuePair);
+ int value;
+
+ if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
+ if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
+ status = BAD_VALUE;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
+ if (value != AUDIO_CHANNEL_OUT_STEREO) {
+ status = BAD_VALUE;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
+ // do not accept frame count changes if tracks are open as the track buffer
+ // size depends on frame count and correct behavior would not be guaranteed
+ // if frame count is changed after track creation
+ if (!mTracks.isEmpty()) {
+ status = INVALID_OPERATION;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+#ifdef ADD_BATTERY_DATA
+ // when changing the audio output device, call addBatteryData to notify
+ // the change
+ if (mOutDevice != value) {
+ uint32_t params = 0;
+ // check whether speaker is on
+ if (value & AUDIO_DEVICE_OUT_SPEAKER) {
+ params |= IMediaPlayerService::kBatteryDataSpeakerOn;
+ }
+
+ audio_devices_t deviceWithoutSpeaker
+ = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
+ // check if any other device (except speaker) is on
+ if (value & deviceWithoutSpeaker ) {
+ params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
+ }
+
+ if (params != 0) {
+ addBatteryData(params);
+ }
+ }
+#endif
+
+ // forward device change to effects that have requested to be
+ // aware of attached audio device.
+ mOutDevice = value;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(mOutDevice);
+ }
+ }
+
+ if (status == NO_ERROR) {
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
+ if (!mStandby && status == INVALID_OPERATION) {
+ mOutput->stream->common.standby(&mOutput->stream->common);
+ mStandby = true;
+ mBytesWritten = 0;
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
+ }
+ if (status == NO_ERROR && reconfig) {
+ delete mAudioMixer;
+ // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't)
+ mAudioMixer = NULL;
+ readOutputParameters();
+ mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
+ for (size_t i = 0; i < mTracks.size() ; i++) {
+ int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId);
+ if (name < 0) {
+ break;
+ }
+ mTracks[i]->mName = name;
+ // limit track sample rate to 2 x new output sample rate
+ if (mTracks[i]->mCblk->sampleRate > 2 * sampleRate()) {
+ mTracks[i]->mCblk->sampleRate = 2 * sampleRate();
+ }
+ }
+ sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
+ }
+ }
+
+ mNewParameters.removeAt(0);
+
+ mParamStatus = status;
+ mParamCond.signal();
+ // wait for condition with time out in case the thread calling ThreadBase::setParameters()
+ // already timed out waiting for the status and will never signal the condition.
+ mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
+ }
+
+ if (!(previousCommand & FastMixerState::IDLE)) {
+ ALOG_ASSERT(mFastMixer != NULL);
+ FastMixerStateQueue *sq = mFastMixer->sq();
+ FastMixerState *state = sq->begin();
+ ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
+ state->mCommand = previousCommand;
+ sq->end();
+ sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+ }
+
+ return reconfig;
+}
+
+
+void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ PlaybackThread::dumpInternals(fd, args);
+
+ snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+ FastMixerDumpState copy = mFastMixerDumpState;
+ copy.dump(fd);
+
+#ifdef STATE_QUEUE_DUMP
+ // Similar for state queue
+ StateQueueObserverDump observerCopy = mStateQueueObserverDump;
+ observerCopy.dump(fd);
+ StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
+ mutatorCopy.dump(fd);
+#endif
+
+ // Write the tee output to a .wav file
+ dumpTee(fd, mTeeSource, mId);
+
+#ifdef AUDIO_WATCHDOG
+ if (mAudioWatchdog != 0) {
+ // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
+ AudioWatchdogDump wdCopy = mAudioWatchdogDump;
+ wdCopy.dump(fd);
+ }
+#endif
+}
+
+uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
+{
+ return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
+}
+
+uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs() const
+{
+ return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000);
+}
+
+void AudioFlinger::MixerThread::cacheParameters_l()
+{
+ PlaybackThread::cacheParameters_l();
+
+ // FIXME: Relaxed timing because of a certain device that can't meet latency
+ // Should be reduced to 2x after the vendor fixes the driver issue
+ // increase threshold again due to low power audio mode. The way this warning
+ // threshold is calculated and its usefulness should be reconsidered anyway.
+ maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15;
+}
+
+// ----------------------------------------------------------------------------
+
+AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device)
+ : PlaybackThread(audioFlinger, output, id, device, DIRECT)
+ // mLeftVolFloat, mRightVolFloat
+{
+}
+
+AudioFlinger::DirectOutputThread::~DirectOutputThread()
+{
+}
+
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
+ Vector< sp<Track> > *tracksToRemove
+)
+{
+ sp<Track> trackToRemove;
+
+ mixer_state mixerStatus = MIXER_IDLE;
+
+ // find out which tracks need to be processed
+ if (mActiveTracks.size() != 0) {
+ sp<Track> t = mActiveTracks[0].promote();
+ // The track died recently
+ if (t == 0) {
+ return MIXER_IDLE;
+ }
+
+ Track* const track = t.get();
+ audio_track_cblk_t* cblk = track->cblk();
+
+ // The first time a track is added we wait
+ // for all its buffers to be filled before processing it
+ uint32_t minFrames;
+ if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) {
+ minFrames = mNormalFrameCount;
+ } else {
+ minFrames = 1;
+ }
+ if ((track->framesReady() >= minFrames) && track->isReady() &&
+ !track->isPaused() && !track->isTerminated())
+ {
+ ALOGVV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
+
+ if (track->mFillingUpStatus == Track::FS_FILLED) {
+ track->mFillingUpStatus = Track::FS_ACTIVE;
+ mLeftVolFloat = mRightVolFloat = 0;
+ if (track->mState == TrackBase::RESUMING) {
+ track->mState = TrackBase::ACTIVE;
+ }
+ }
+
+ // compute volume for this track
+ float left, right;
+ if (track->isMuted() || mMasterMute || track->isPausing() ||
+ mStreamTypes[track->streamType()].mute) {
+ left = right = 0;
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ } else {
+ float typeVolume = mStreamTypes[track->streamType()].volume;
+ float v = mMasterVolume * typeVolume;
+ uint32_t vlr = cblk->getVolumeLR();
+ float v_clamped = v * (vlr & 0xFFFF);
+ if (v_clamped > MAX_GAIN) {
+ v_clamped = MAX_GAIN;
+ }
+ left = v_clamped/MAX_GAIN;
+ v_clamped = v * (vlr >> 16);
+ if (v_clamped > MAX_GAIN) {
+ v_clamped = MAX_GAIN;
+ }
+ right = v_clamped/MAX_GAIN;
+ }
+
+ if (left != mLeftVolFloat || right != mRightVolFloat) {
+ mLeftVolFloat = left;
+ mRightVolFloat = right;
+
+ // Convert volumes from float to 8.24
+ uint32_t vl = (uint32_t)(left * (1 << 24));
+ uint32_t vr = (uint32_t)(right * (1 << 24));
+
+ // Delegate volume control to effect in track effect chain if needed
+ // only one effect chain can be present on DirectOutputThread, so if
+ // there is one, the track is connected to it
+ if (!mEffectChains.isEmpty()) {
+ // Do not ramp volume if volume is controlled by effect
+ mEffectChains[0]->setVolume_l(&vl, &vr);
+ left = (float)vl / (1 << 24);
+ right = (float)vr / (1 << 24);
+ }
+ mOutput->stream->set_volume(mOutput->stream, left, right);
+ }
+
+ // reset retry count
+ track->mRetryCount = kMaxTrackRetriesDirect;
+ mActiveTrack = t;
+ mixerStatus = MIXER_TRACKS_READY;
+ } else {
+ // clear effect chain input buffer if an active track underruns to avoid sending
+ // previous audio buffer again to effects
+ if (!mEffectChains.isEmpty()) {
+ mEffectChains[0]->clearInputBuffer();
+ }
+
+ ALOGVV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
+ if ((track->sharedBuffer() != 0) || track->isTerminated() ||
+ track->isStopped() || track->isPaused()) {
+ // We have consumed all the buffers of this track.
+ // Remove it from the list of active tracks.
+ // TODO: implement behavior for compressed audio
+ size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
+ size_t framesWritten = mBytesWritten / mFrameSize;
+ if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
+ if (track->isStopped()) {
+ track->reset();
+ }
+ trackToRemove = track;
+ }
+ } else {
+ // No buffers for this track. Give it a few chances to
+ // fill a buffer, then remove it from active list.
+ if (--(track->mRetryCount) <= 0) {
+ ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
+ trackToRemove = track;
+ } else {
+ mixerStatus = MIXER_TRACKS_ENABLED;
+ }
+ }
+ }
+ }
+
+ // FIXME merge this with similar code for removing multiple tracks
+ // remove all the tracks that need to be...
+ if (CC_UNLIKELY(trackToRemove != 0)) {
+ tracksToRemove->add(trackToRemove);
+ mActiveTracks.remove(trackToRemove);
+ if (!mEffectChains.isEmpty()) {
+ ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(),
+ trackToRemove->sessionId());
+ mEffectChains[0]->decActiveTrackCnt();
+ }
+ if (trackToRemove->isTerminated()) {
+ removeTrack_l(trackToRemove);
+ }
+ }
+
+ return mixerStatus;
+}
+
+void AudioFlinger::DirectOutputThread::threadLoop_mix()
+{
+ AudioBufferProvider::Buffer buffer;
+ size_t frameCount = mFrameCount;
+ int8_t *curBuf = (int8_t *)mMixBuffer;
+ // output audio to hardware
+ while (frameCount) {
+ buffer.frameCount = frameCount;
+ mActiveTrack->getNextBuffer(&buffer);
+ if (CC_UNLIKELY(buffer.raw == NULL)) {
+ memset(curBuf, 0, frameCount * mFrameSize);
+ break;
+ }
+ memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
+ frameCount -= buffer.frameCount;
+ curBuf += buffer.frameCount * mFrameSize;
+ mActiveTrack->releaseBuffer(&buffer);
+ }
+ sleepTime = 0;
+ standbyTime = systemTime() + standbyDelay;
+ mActiveTrack.clear();
+
+}
+
+void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
+{
+ if (sleepTime == 0) {
+ if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+ sleepTime = activeSleepTime;
+ } else {
+ sleepTime = idleSleepTime;
+ }
+ } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
+ memset(mMixBuffer, 0, mFrameCount * mFrameSize);
+ sleepTime = 0;
+ }
+}
+
+// getTrackName_l() must be called with ThreadBase::mLock held
+int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask,
+ int sessionId)
+{
+ return 0;
+}
+
+// deleteTrackName_l() must be called with ThreadBase::mLock held
+void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name)
+{
+}
+
+// checkForNewParameters_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::DirectOutputThread::checkForNewParameters_l()
+{
+ bool reconfig = false;
+
+ while (!mNewParameters.isEmpty()) {
+ status_t status = NO_ERROR;
+ String8 keyValuePair = mNewParameters[0];
+ AudioParameter param = AudioParameter(keyValuePair);
+ int value;
+
+ if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
+ // do not accept frame count changes if tracks are open as the track buffer
+ // size depends on frame count and correct behavior would not be garantied
+ // if frame count is changed after track creation
+ if (!mTracks.isEmpty()) {
+ status = INVALID_OPERATION;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (status == NO_ERROR) {
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
+ if (!mStandby && status == INVALID_OPERATION) {
+ mOutput->stream->common.standby(&mOutput->stream->common);
+ mStandby = true;
+ mBytesWritten = 0;
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
+ }
+ if (status == NO_ERROR && reconfig) {
+ readOutputParameters();
+ sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
+ }
+ }
+
+ mNewParameters.removeAt(0);
+
+ mParamStatus = status;
+ mParamCond.signal();
+ // wait for condition with time out in case the thread calling ThreadBase::setParameters()
+ // already timed out waiting for the status and will never signal the condition.
+ mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
+ }
+ return reconfig;
+}
+
+uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const
+{
+ uint32_t time;
+ if (audio_is_linear_pcm(mFormat)) {
+ time = PlaybackThread::activeSleepTimeUs();
+ } else {
+ time = 10000;
+ }
+ return time;
+}
+
+uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const
+{
+ uint32_t time;
+ if (audio_is_linear_pcm(mFormat)) {
+ time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
+ } else {
+ time = 10000;
+ }
+ return time;
+}
+
+uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const
+{
+ uint32_t time;
+ if (audio_is_linear_pcm(mFormat)) {
+ time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
+ } else {
+ time = 10000;
+ }
+ return time;
+}
+
+void AudioFlinger::DirectOutputThread::cacheParameters_l()
+{
+ PlaybackThread::cacheParameters_l();
+
+ // use shorter standby delay as on normal output to release
+ // hardware resources as soon as possible
+ standbyDelay = microseconds(activeSleepTime*2);
+}
+
+// ----------------------------------------------------------------------------
+
+AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
+ AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
+ : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
+ DUPLICATING),
+ mWaitTimeMs(UINT_MAX)
+{
+ addOutputTrack(mainThread);
+}
+
+AudioFlinger::DuplicatingThread::~DuplicatingThread()
+{
+ for (size_t i = 0; i < mOutputTracks.size(); i++) {
+ mOutputTracks[i]->destroy();
+ }
+}
+
+void AudioFlinger::DuplicatingThread::threadLoop_mix()
+{
+ // mix buffers...
+ if (outputsReady(outputTracks)) {
+ mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
+ } else {
+ memset(mMixBuffer, 0, mixBufferSize);
+ }
+ sleepTime = 0;
+ writeFrames = mNormalFrameCount;
+ standbyTime = systemTime() + standbyDelay;
+}
+
+void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
+{
+ if (sleepTime == 0) {
+ if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+ sleepTime = activeSleepTime;
+ } else {
+ sleepTime = idleSleepTime;
+ }
+ } else if (mBytesWritten != 0) {
+ if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+ writeFrames = mNormalFrameCount;
+ memset(mMixBuffer, 0, mixBufferSize);
+ } else {
+ // flush remaining overflow buffers in output tracks
+ writeFrames = 0;
+ }
+ sleepTime = 0;
+ }
+}
+
+void AudioFlinger::DuplicatingThread::threadLoop_write()
+{
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ outputTracks[i]->write(mMixBuffer, writeFrames);
+ }
+ mBytesWritten += mixBufferSize;
+}
+
+void AudioFlinger::DuplicatingThread::threadLoop_standby()
+{
+ // DuplicatingThread implements standby by stopping all tracks
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ outputTracks[i]->stop();
+ }
+}
+
+void AudioFlinger::DuplicatingThread::saveOutputTracks()
+{
+ outputTracks = mOutputTracks;
+}
+
+void AudioFlinger::DuplicatingThread::clearOutputTracks()
+{
+ outputTracks.clear();
+}
+
+void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
+{
+ Mutex::Autolock _l(mLock);
+ // FIXME explain this formula
+ size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
+ OutputTrack *outputTrack = new OutputTrack(thread,
+ this,
+ mSampleRate,
+ mFormat,
+ mChannelMask,
+ frameCount);
+ if (outputTrack->cblk() != NULL) {
+ thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f);
+ mOutputTracks.add(outputTrack);
+ ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
+ updateWaitTime_l();
+ }
+}
+
+void AudioFlinger::DuplicatingThread::removeOutputTrack(MixerThread *thread)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mOutputTracks.size(); i++) {
+ if (mOutputTracks[i]->thread() == thread) {
+ mOutputTracks[i]->destroy();
+ mOutputTracks.removeAt(i);
+ updateWaitTime_l();
+ return;
+ }
+ }
+ ALOGV("removeOutputTrack(): unkonwn thread: %p", thread);
+}
+
+// caller must hold mLock
+void AudioFlinger::DuplicatingThread::updateWaitTime_l()
+{
+ mWaitTimeMs = UINT_MAX;
+ for (size_t i = 0; i < mOutputTracks.size(); i++) {
+ sp<ThreadBase> strong = mOutputTracks[i]->thread().promote();
+ if (strong != 0) {
+ uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate();
+ if (waitTimeMs < mWaitTimeMs) {
+ mWaitTimeMs = waitTimeMs;
+ }
+ }
+ }
+}
+
+
+bool AudioFlinger::DuplicatingThread::outputsReady(
+ const SortedVector< sp<OutputTrack> > &outputTracks)
+{
+ for (size_t i = 0; i < outputTracks.size(); i++) {
+ sp<ThreadBase> thread = outputTracks[i]->thread().promote();
+ if (thread == 0) {
+ ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p",
+ outputTracks[i].get());
+ return false;
+ }
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ // see note at standby() declaration
+ if (playbackThread->standby() && !playbackThread->isSuspended()) {
+ ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(),
+ thread.get());
+ return false;
+ }
+ }
+ return true;
+}
+
+uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
+{
+ return (mWaitTimeMs * 1000) / 2;
+}
+
+void AudioFlinger::DuplicatingThread::cacheParameters_l()
+{
+ // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first
+ updateWaitTime_l();
+
+ MixerThread::cacheParameters_l();
+}
+
+// ----------------------------------------------------------------------------
+// Record
+// ----------------------------------------------------------------------------
+
+AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamIn *input,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t id,
+ audio_devices_t device,
+ const sp<NBAIO_Sink>& teeSink) :
+ ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
+ mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
+ // mRsmpInIndex and mInputBytes set by readInputParameters()
+ mReqChannelCount(popcount(channelMask)),
+ mReqSampleRate(sampleRate),
+ // mBytesRead is only meaningful while active, and so is cleared in start()
+ // (but might be better to also clear here for dump?)
+ mTeeSink(teeSink)
+{
+ snprintf(mName, kNameLength, "AudioIn_%X", id);
+
+ readInputParameters();
+
+}
+
+
+AudioFlinger::RecordThread::~RecordThread()
+{
+ delete[] mRsmpInBuffer;
+ delete mResampler;
+ delete[] mRsmpOutBuffer;
+}
+
+void AudioFlinger::RecordThread::onFirstRef()
+{
+ run(mName, PRIORITY_URGENT_AUDIO);
+}
+
+status_t AudioFlinger::RecordThread::readyToRun()
+{
+ status_t status = initCheck();
+ ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this);
+ return status;
+}
+
+bool AudioFlinger::RecordThread::threadLoop()
+{
+ AudioBufferProvider::Buffer buffer;
+ sp<RecordTrack> activeTrack;
+ Vector< sp<EffectChain> > effectChains;
+
+ nsecs_t lastWarning = 0;
+
+ inputStandBy();
+ acquireWakeLock();
+
+ // used to verify we've read at least once before evaluating how many bytes were read
+ bool readOnce = false;
+
+ // start recording
+ while (!exitPending()) {
+
+ processConfigEvents();
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+ checkForNewParameters_l();
+ if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
+ standby();
+
+ if (exitPending()) {
+ break;
+ }
+
+ releaseWakeLock_l();
+ ALOGV("RecordThread: loop stopping");
+ // go to sleep
+ mWaitWorkCV.wait(mLock);
+ ALOGV("RecordThread: loop starting");
+ acquireWakeLock_l();
+ continue;
+ }
+ if (mActiveTrack != 0) {
+ if (mActiveTrack->mState == TrackBase::PAUSING) {
+ standby();
+ mActiveTrack.clear();
+ mStartStopCond.broadcast();
+ } else if (mActiveTrack->mState == TrackBase::RESUMING) {
+ if (mReqChannelCount != mActiveTrack->channelCount()) {
+ mActiveTrack.clear();
+ mStartStopCond.broadcast();
+ } else if (readOnce) {
+ // record start succeeds only if first read from audio input
+ // succeeds
+ if (mBytesRead >= 0) {
+ mActiveTrack->mState = TrackBase::ACTIVE;
+ } else {
+ mActiveTrack.clear();
+ }
+ mStartStopCond.broadcast();
+ }
+ mStandby = false;
+ } else if (mActiveTrack->mState == TrackBase::TERMINATED) {
+ removeTrack_l(mActiveTrack);
+ mActiveTrack.clear();
+ }
+ }
+ lockEffectChains_l(effectChains);
+ }
+
+ if (mActiveTrack != 0) {
+ if (mActiveTrack->mState != TrackBase::ACTIVE &&
+ mActiveTrack->mState != TrackBase::RESUMING) {
+ unlockEffectChains(effectChains);
+ usleep(kRecordThreadSleepUs);
+ continue;
+ }
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ effectChains[i]->process_l();
+ }
+
+ buffer.frameCount = mFrameCount;
+ if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+ readOnce = true;
+ size_t framesOut = buffer.frameCount;
+ if (mResampler == NULL) {
+ // no resampling
+ while (framesOut) {
+ size_t framesIn = mFrameCount - mRsmpInIndex;
+ if (framesIn) {
+ int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
+ int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
+ mActiveTrack->mFrameSize;
+ if (framesIn > framesOut)
+ framesIn = framesOut;
+ mRsmpInIndex += framesIn;
+ framesOut -= framesIn;
+ if (mChannelCount == mReqChannelCount ||
+ mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+ memcpy(dst, src, framesIn * mFrameSize);
+ } else {
+ if (mChannelCount == 1) {
+ upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
+ (int16_t *)src, framesIn);
+ } else {
+ downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
+ (int16_t *)src, framesIn);
+ }
+ }
+ }
+ if (framesOut && mFrameCount == mRsmpInIndex) {
+ void *readInto;
+ if (framesOut == mFrameCount &&
+ (mChannelCount == mReqChannelCount ||
+ mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
+ readInto = buffer.raw;
+ framesOut = 0;
+ } else {
+ readInto = mRsmpInBuffer;
+ mRsmpInIndex = 0;
+ }
+ mBytesRead = mInput->stream->read(mInput->stream, readInto, mInputBytes);
+ if (mBytesRead <= 0) {
+ if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
+ {
+ ALOGE("Error reading audio input");
+ // Force input into standby so that it tries to
+ // recover at next read attempt
+ inputStandBy();
+ usleep(kRecordThreadSleepUs);
+ }
+ mRsmpInIndex = mFrameCount;
+ framesOut = 0;
+ buffer.frameCount = 0;
+ } else if (mTeeSink != 0) {
+ (void) mTeeSink->write(readInto,
+ mBytesRead >> Format_frameBitShift(mTeeSink->format()));
+ }
+ }
+ }
+ } else {
+ // resampling
+
+ memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t));
+ // alter output frame count as if we were expecting stereo samples
+ if (mChannelCount == 1 && mReqChannelCount == 1) {
+ framesOut >>= 1;
+ }
+ mResampler->resample(mRsmpOutBuffer, framesOut,
+ this /* AudioBufferProvider* */);
+ // ditherAndClamp() works as long as all buffers returned by
+ // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
+ if (mChannelCount == 2 && mReqChannelCount == 1) {
+ ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
+ // the resampler always outputs stereo samples:
+ // do post stereo to mono conversion
+ downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
+ framesOut);
+ } else {
+ ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
+ }
+
+ }
+ if (mFramestoDrop == 0) {
+ mActiveTrack->releaseBuffer(&buffer);
+ } else {
+ if (mFramestoDrop > 0) {
+ mFramestoDrop -= buffer.frameCount;
+ if (mFramestoDrop <= 0) {
+ clearSyncStartEvent();
+ }
+ } else {
+ mFramestoDrop += buffer.frameCount;
+ if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
+ mSyncStartEvent->isCancelled()) {
+ ALOGW("Synced record %s, session %d, trigger session %d",
+ (mFramestoDrop >= 0) ? "timed out" : "cancelled",
+ mActiveTrack->sessionId(),
+ (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
+ clearSyncStartEvent();
+ }
+ }
+ }
+ mActiveTrack->clearOverflow();
+ }
+ // client isn't retrieving buffers fast enough
+ else {
+ if (!mActiveTrack->setOverflow()) {
+ nsecs_t now = systemTime();
+ if ((now - lastWarning) > kWarningThrottleNs) {
+ ALOGW("RecordThread: buffer overflow");
+ lastWarning = now;
+ }
+ }
+ // Release the processor for a while before asking for a new buffer.
+ // This will give the application more chance to read from the buffer and
+ // clear the overflow.
+ usleep(kRecordThreadSleepUs);
+ }
+ }
+ // enable changes in effect chain
+ unlockEffectChains(effectChains);
+ effectChains.clear();
+ }
+
+ standby();
+
+ {
+ Mutex::Autolock _l(mLock);
+ mActiveTrack.clear();
+ mStartStopCond.broadcast();
+ }
+
+ releaseWakeLock();
+
+ ALOGV("RecordThread %p exiting", this);
+ return false;
+}
+
+void AudioFlinger::RecordThread::standby()
+{
+ if (!mStandby) {
+ inputStandBy();
+ mStandby = true;
+ }
+}
+
+void AudioFlinger::RecordThread::inputStandBy()
+{
+ mInput->stream->common.standby(&mInput->stream->common);
+}
+
+sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ int sessionId,
+ IAudioFlinger::track_flags_t flags,
+ pid_t tid,
+ status_t *status)
+{
+ sp<RecordTrack> track;
+ status_t lStatus;
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ ALOGE("Audio driver not initialized.");
+ goto Exit;
+ }
+
+ // FIXME use flags and tid similar to createTrack_l()
+
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+
+ track = new RecordTrack(this, client, sampleRate,
+ format, channelMask, frameCount, sessionId);
+
+ if (track->getCblk() == 0) {
+ lStatus = NO_MEMORY;
+ goto Exit;
+ }
+ mTracks.add(track);
+
+ // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
+ bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
+ mAudioFlinger->btNrecIsOff();
+ setEffectSuspended_l(FX_IID_AEC, suspend, sessionId);
+ setEffectSuspended_l(FX_IID_NS, suspend, sessionId);
+ }
+ lStatus = NO_ERROR;
+
+Exit:
+ if (status) {
+ *status = lStatus;
+ }
+ return track;
+}
+
+status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
+ AudioSystem::sync_event_t event,
+ int triggerSession)
+{
+ ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
+ sp<ThreadBase> strongMe = this;
+ status_t status = NO_ERROR;
+
+ if (event == AudioSystem::SYNC_EVENT_NONE) {
+ clearSyncStartEvent();
+ } else if (event != AudioSystem::SYNC_EVENT_SAME) {
+ mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
+ triggerSession,
+ recordTrack->sessionId(),
+ syncStartEventCallback,
+ this);
+ // Sync event can be cancelled by the trigger session if the track is not in a
+ // compatible state in which case we start record immediately
+ if (mSyncStartEvent->isCancelled()) {
+ clearSyncStartEvent();
+ } else {
+ // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs
+ mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000);
+ }
+ }
+
+ {
+ AutoMutex lock(mLock);
+ if (mActiveTrack != 0) {
+ if (recordTrack != mActiveTrack.get()) {
+ status = -EBUSY;
+ } else if (mActiveTrack->mState == TrackBase::PAUSING) {
+ mActiveTrack->mState = TrackBase::ACTIVE;
+ }
+ return status;
+ }
+
+ recordTrack->mState = TrackBase::IDLE;
+ mActiveTrack = recordTrack;
+ mLock.unlock();
+ status_t status = AudioSystem::startInput(mId);
+ mLock.lock();
+ if (status != NO_ERROR) {
+ mActiveTrack.clear();
+ clearSyncStartEvent();
+ return status;
+ }
+ mRsmpInIndex = mFrameCount;
+ mBytesRead = 0;
+ if (mResampler != NULL) {
+ mResampler->reset();
+ }
+ mActiveTrack->mState = TrackBase::RESUMING;
+ // signal thread to start
+ ALOGV("Signal record thread");
+ mWaitWorkCV.broadcast();
+ // do not wait for mStartStopCond if exiting
+ if (exitPending()) {
+ mActiveTrack.clear();
+ status = INVALID_OPERATION;
+ goto startError;
+ }
+ mStartStopCond.wait(mLock);
+ if (mActiveTrack == 0) {
+ ALOGV("Record failed to start");
+ status = BAD_VALUE;
+ goto startError;
+ }
+ ALOGV("Record started OK");
+ return status;
+ }
+startError:
+ AudioSystem::stopInput(mId);
+ clearSyncStartEvent();
+ return status;
+}
+
+void AudioFlinger::RecordThread::clearSyncStartEvent()
+{
+ if (mSyncStartEvent != 0) {
+ mSyncStartEvent->cancel();
+ }
+ mSyncStartEvent.clear();
+ mFramestoDrop = 0;
+}
+
+void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
+{
+ sp<SyncEvent> strongEvent = event.promote();
+
+ if (strongEvent != 0) {
+ RecordThread *me = (RecordThread *)strongEvent->cookie();
+ me->handleSyncStartEvent(strongEvent);
+ }
+}
+
+void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event)
+{
+ if (event == mSyncStartEvent) {
+ // TODO: use actual buffer filling status instead of 2 buffers when info is available
+ // from audio HAL
+ mFramestoDrop = mFrameCount * 2;
+ }
+}
+
+bool AudioFlinger::RecordThread::stop_l(RecordThread::RecordTrack* recordTrack) {
+ ALOGV("RecordThread::stop");
+ if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
+ return false;
+ }
+ recordTrack->mState = TrackBase::PAUSING;
+ // do not wait for mStartStopCond if exiting
+ if (exitPending()) {
+ return true;
+ }
+ mStartStopCond.wait(mLock);
+ // if we have been restarted, recordTrack == mActiveTrack.get() here
+ if (exitPending() || recordTrack != mActiveTrack.get()) {
+ ALOGV("Record stopped OK");
+ return true;
+ }
+ return false;
+}
+
+bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const
+{
+ return false;
+}
+
+status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event)
+{
+#if 0 // This branch is currently dead code, but is preserved in case it will be needed in future
+ if (!isValidSyncEvent(event)) {
+ return BAD_VALUE;
+ }
+
+ int eventSession = event->triggerSession();
+ status_t ret = NAME_NOT_FOUND;
+
+ Mutex::Autolock _l(mLock);
+
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ sp<RecordTrack> track = mTracks[i];
+ if (eventSession == track->sessionId()) {
+ (void) track->setSyncEvent(event);
+ ret = NO_ERROR;
+ }
+ }
+ return ret;
+#else
+ return BAD_VALUE;
+#endif
+}
+
+// destroyTrack_l() must be called with ThreadBase::mLock held
+void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
+{
+ track->mState = TrackBase::TERMINATED;
+ // active tracks are removed by threadLoop()
+ if (mActiveTrack != track) {
+ removeTrack_l(track);
+ }
+}
+
+void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track)
+{
+ mTracks.remove(track);
+ // need anything related to effects here?
+}
+
+void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
+{
+ dumpInternals(fd, args);
+ dumpTracks(fd, args);
+ dumpEffectChains(fd, args);
+}
+
+void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "\nInput thread %p internals\n", this);
+ result.append(buffer);
+
+ if (mActiveTrack != 0) {
+ snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "In size: %d\n", mInputBytes);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL));
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Out channel count: %u\n", mReqChannelCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "Out sample rate: %u\n", mReqSampleRate);
+ result.append(buffer);
+ } else {
+ result.append("No active record client\n");
+ }
+
+ write(fd, result.string(), result.size());
+
+ dumpBase(fd, args);
+}
+
+void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "Input thread %p tracks\n", this);
+ result.append(buffer);
+ RecordTrack::appendDumpHeader(result);
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<RecordTrack> track = mTracks[i];
+ if (track != 0) {
+ track->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ }
+
+ if (mActiveTrack != 0) {
+ snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this);
+ result.append(buffer);
+ RecordTrack::appendDumpHeader(result);
+ mActiveTrack->dump(buffer, SIZE);
+ result.append(buffer);
+
+ }
+ write(fd, result.string(), result.size());
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+ size_t framesReq = buffer->frameCount;
+ size_t framesReady = mFrameCount - mRsmpInIndex;
+ int channelCount;
+
+ if (framesReady == 0) {
+ mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
+ if (mBytesRead <= 0) {
+ if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) {
+ ALOGE("RecordThread::getNextBuffer() Error reading audio input");
+ // Force input into standby so that it tries to
+ // recover at next read attempt
+ inputStandBy();
+ usleep(kRecordThreadSleepUs);
+ }
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ return NOT_ENOUGH_DATA;
+ }
+ mRsmpInIndex = 0;
+ framesReady = mFrameCount;
+ }
+
+ if (framesReq > framesReady) {
+ framesReq = framesReady;
+ }
+
+ if (mChannelCount == 1 && mReqChannelCount == 2) {
+ channelCount = 1;
+ } else {
+ channelCount = 2;
+ }
+ buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount;
+ buffer->frameCount = framesReq;
+ return NO_ERROR;
+}
+
+// AudioBufferProvider interface
+void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ mRsmpInIndex += buffer->frameCount;
+ buffer->frameCount = 0;
+}
+
+bool AudioFlinger::RecordThread::checkForNewParameters_l()
+{
+ bool reconfig = false;
+
+ while (!mNewParameters.isEmpty()) {
+ status_t status = NO_ERROR;
+ String8 keyValuePair = mNewParameters[0];
+ AudioParameter param = AudioParameter(keyValuePair);
+ int value;
+ audio_format_t reqFormat = mFormat;
+ uint32_t reqSamplingRate = mReqSampleRate;
+ uint32_t reqChannelCount = mReqChannelCount;
+
+ if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
+ reqSamplingRate = value;
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
+ reqFormat = (audio_format_t) value;
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
+ reqChannelCount = popcount(value);
+ reconfig = true;
+ }
+ if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
+ // do not accept frame count changes if tracks are open as the track buffer
+ // size depends on frame count and correct behavior would not be guaranteed
+ // if frame count is changed after track creation
+ if (mActiveTrack != 0) {
+ status = INVALID_OPERATION;
+ } else {
+ reconfig = true;
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+ // forward device change to effects that have requested to be
+ // aware of attached audio device.
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(value);
+ }
+
+ // store input device and output device but do not forward output device to audio HAL.
+ // Note that status is ignored by the caller for output device
+ // (see AudioFlinger::setParameters()
+ if (audio_is_output_devices(value)) {
+ mOutDevice = value;
+ status = BAD_VALUE;
+ } else {
+ mInDevice = value;
+ // disable AEC and NS if the device is a BT SCO headset supporting those
+ // pre processings
+ if (mTracks.size() > 0) {
+ bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
+ mAudioFlinger->btNrecIsOff();
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ sp<RecordTrack> track = mTracks[i];
+ setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
+ setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId());
+ }
+ }
+ }
+ }
+ if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR &&
+ mAudioSource != (audio_source_t)value) {
+ // forward device change to effects that have requested to be
+ // aware of attached audio device.
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setAudioSource_l((audio_source_t)value);
+ }
+ mAudioSource = (audio_source_t)value;
+ }
+ if (status == NO_ERROR) {
+ status = mInput->stream->common.set_parameters(&mInput->stream->common,
+ keyValuePair.string());
+ if (status == INVALID_OPERATION) {
+ inputStandBy();
+ status = mInput->stream->common.set_parameters(&mInput->stream->common,
+ keyValuePair.string());
+ }
+ if (reconfig) {
+ if (status == BAD_VALUE &&
+ reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
+ reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
+ ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common)
+ <= (2 * reqSamplingRate)) &&
+ popcount(mInput->stream->common.get_channels(&mInput->stream->common))
+ <= FCC_2 &&
+ (reqChannelCount <= FCC_2)) {
+ status = NO_ERROR;
+ }
+ if (status == NO_ERROR) {
+ readInputParameters();
+ sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED);
+ }
+ }
+ }
+
+ mNewParameters.removeAt(0);
+
+ mParamStatus = status;
+ mParamCond.signal();
+ // wait for condition with time out in case the thread calling ThreadBase::setParameters()
+ // already timed out waiting for the status and will never signal the condition.
+ mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
+ }
+ return reconfig;
+}
+
+String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
+{
+ char *s;
+ String8 out_s8 = String8();
+
+ Mutex::Autolock _l(mLock);
+ if (initCheck() != NO_ERROR) {
+ return out_s8;
+ }
+
+ s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
+ out_s8 = String8(s);
+ free(s);
+ return out_s8;
+}
+
+void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
+ AudioSystem::OutputDescriptor desc;
+ void *param2 = NULL;
+
+ switch (event) {
+ case AudioSystem::INPUT_OPENED:
+ case AudioSystem::INPUT_CONFIG_CHANGED:
+ desc.channels = mChannelMask;
+ desc.samplingRate = mSampleRate;
+ desc.format = mFormat;
+ desc.frameCount = mFrameCount;
+ desc.latency = 0;
+ param2 = &desc;
+ break;
+
+ case AudioSystem::INPUT_CLOSED:
+ default:
+ break;
+ }
+ mAudioFlinger->audioConfigChanged_l(event, mId, param2);
+}
+
+void AudioFlinger::RecordThread::readInputParameters()
+{
+ delete mRsmpInBuffer;
+ // mRsmpInBuffer is always assigned a new[] below
+ delete mRsmpOutBuffer;
+ mRsmpOutBuffer = NULL;
+ delete mResampler;
+ mResampler = NULL;
+
+ mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
+ mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
+ mChannelCount = (uint16_t)popcount(mChannelMask);
+ mFormat = mInput->stream->common.get_format(&mInput->stream->common);
+ mFrameSize = audio_stream_frame_size(&mInput->stream->common);
+ mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
+ mFrameCount = mInputBytes / mFrameSize;
+ mNormalFrameCount = mFrameCount; // not used by record, but used by input effects
+ mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
+
+ if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
+ {
+ int channelCount;
+ // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
+ // stereo to mono post process as the resampler always outputs stereo.
+ if (mChannelCount == 1 && mReqChannelCount == 2) {
+ channelCount = 1;
+ } else {
+ channelCount = 2;
+ }
+ mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
+ mResampler->setSampleRate(mSampleRate);
+ mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
+ mRsmpOutBuffer = new int32_t[mFrameCount * 2];
+
+ // optmization: if mono to mono, alter input frame count as if we were inputing
+ // stereo samples
+ if (mChannelCount == 1 && mReqChannelCount == 1) {
+ mFrameCount >>= 1;
+ }
+
+ }
+ mRsmpInIndex = mFrameCount;
+}
+
+unsigned int AudioFlinger::RecordThread::getInputFramesLost()
+{
+ Mutex::Autolock _l(mLock);
+ if (initCheck() != NO_ERROR) {
+ return 0;
+ }
+
+ return mInput->stream->get_input_frames_lost(mInput->stream);
+}
+
+uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId) const
+{
+ Mutex::Autolock _l(mLock);
+ uint32_t result = 0;
+ if (getEffectChain_l(sessionId) != 0) {
+ result = EFFECT_SESSION;
+ }
+
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ if (sessionId == mTracks[i]->sessionId()) {
+ result |= TRACK_SESSION;
+ break;
+ }
+ }
+
+ return result;
+}
+
+KeyedVector<int, bool> AudioFlinger::RecordThread::sessionIds() const
+{
+ KeyedVector<int, bool> ids;
+ Mutex::Autolock _l(mLock);
+ for (size_t j = 0; j < mTracks.size(); ++j) {
+ sp<RecordThread::RecordTrack> track = mTracks[j];
+ int sessionId = track->sessionId();
+ if (ids.indexOfKey(sessionId) < 0) {
+ ids.add(sessionId, true);
+ }
+ }
+ return ids;
+}
+
+AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::clearInput()
+{
+ Mutex::Autolock _l(mLock);
+ AudioStreamIn *input = mInput;
+ mInput = NULL;
+ return input;
+}
+
+// this method must always be called either with ThreadBase mLock held or inside the thread loop
+audio_stream_t* AudioFlinger::RecordThread::stream() const
+{
+ if (mInput == NULL) {
+ return NULL;
+ }
+ return &mInput->stream->common;
+}
+
+status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
+{
+ // only one chain per input thread
+ if (mEffectChains.size() != 0) {
+ return INVALID_OPERATION;
+ }
+ ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this);
+
+ chain->setInBuffer(NULL);
+ chain->setOutBuffer(NULL);
+
+ checkSuspendOnAddEffectChain_l(chain);
+
+ mEffectChains.add(chain);
+
+ return NO_ERROR;
+}
+
+size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& chain)
+{
+ ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
+ ALOGW_IF(mEffectChains.size() != 1,
+ "removeEffectChain_l() %p invalid chain size %d on thread %p",
+ chain.get(), mEffectChains.size(), this);
+ if (mEffectChains.size() == 1) {
+ mEffectChains.removeAt(0);
+ }
+ return 0;
+}
+
+}; // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
new file mode 100644
index 0000000..06a1c8c
--- /dev/null
+++ b/services/audioflinger/Threads.h
@@ -0,0 +1,801 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+class ThreadBase : public Thread {
+public:
+
+#include "TrackBase.h"
+
+ enum type_t {
+ MIXER, // Thread class is MixerThread
+ DIRECT, // Thread class is DirectOutputThread
+ DUPLICATING, // Thread class is DuplicatingThread
+ RECORD // Thread class is RecordThread
+ };
+
+ ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
+ virtual ~ThreadBase();
+
+ void dumpBase(int fd, const Vector<String16>& args);
+ void dumpEffectChains(int fd, const Vector<String16>& args);
+
+ void clearPowerManager();
+
+ // base for record and playback
+ enum {
+ CFG_EVENT_IO,
+ CFG_EVENT_PRIO
+ };
+
+ class ConfigEvent {
+ public:
+ ConfigEvent(int type) : mType(type) {}
+ virtual ~ConfigEvent() {}
+
+ int type() const { return mType; }
+
+ virtual void dump(char *buffer, size_t size) = 0;
+
+ private:
+ const int mType;
+ };
+
+ class IoConfigEvent : public ConfigEvent {
+ public:
+ IoConfigEvent(int event, int param) :
+ ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {}
+ virtual ~IoConfigEvent() {}
+
+ int event() const { return mEvent; }
+ int param() const { return mParam; }
+
+ virtual void dump(char *buffer, size_t size) {
+ snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam);
+ }
+
+ private:
+ const int mEvent;
+ const int mParam;
+ };
+
+ class PrioConfigEvent : public ConfigEvent {
+ public:
+ PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) :
+ ConfigEvent(CFG_EVENT_PRIO), mPid(pid), mTid(tid), mPrio(prio) {}
+ virtual ~PrioConfigEvent() {}
+
+ pid_t pid() const { return mPid; }
+ pid_t tid() const { return mTid; }
+ int32_t prio() const { return mPrio; }
+
+ virtual void dump(char *buffer, size_t size) {
+ snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio);
+ }
+
+ private:
+ const pid_t mPid;
+ const pid_t mTid;
+ const int32_t mPrio;
+ };
+
+
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ wp<ThreadBase> mThread;
+ };
+
+ virtual status_t initCheck() const = 0;
+
+ // static externally-visible
+ type_t type() const { return mType; }
+ audio_io_handle_t id() const { return mId;}
+
+ // dynamic externally-visible
+ uint32_t sampleRate() const { return mSampleRate; }
+ uint32_t channelCount() const { return mChannelCount; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
+ audio_format_t format() const { return mFormat; }
+ // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
+ // and returns the normal mix buffer's frame count.
+ size_t frameCount() const { return mNormalFrameCount; }
+ // Return's the HAL's frame count i.e. fast mixer buffer size.
+ size_t frameCountHAL() const { return mFrameCount; }
+
+ // Should be "virtual status_t requestExitAndWait()" and override same
+ // method in Thread, but Thread::requestExitAndWait() is not yet virtual.
+ void exit();
+ virtual bool checkForNewParameters_l() = 0;
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys) = 0;
+ virtual void audioConfigChanged_l(int event, int param = 0) = 0;
+ void sendIoConfigEvent(int event, int param = 0);
+ void sendIoConfigEvent_l(int event, int param = 0);
+ void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
+ void processConfigEvents();
+
+ // see note at declaration of mStandby, mOutDevice and mInDevice
+ bool standby() const { return mStandby; }
+ audio_devices_t outDevice() const { return mOutDevice; }
+ audio_devices_t inDevice() const { return mInDevice; }
+
+ virtual audio_stream_t* stream() const = 0;
+
+ sp<EffectHandle> createEffect_l(
+ const sp<AudioFlinger::Client>& client,
+ const sp<IEffectClient>& effectClient,
+ int32_t priority,
+ int sessionId,
+ effect_descriptor_t *desc,
+ int *enabled,
+ status_t *status);
+ void disconnectEffect(const sp< EffectModule>& effect,
+ EffectHandle *handle,
+ bool unpinIfLast);
+
+ // return values for hasAudioSession (bit field)
+ enum effect_state {
+ EFFECT_SESSION = 0x1, // the audio session corresponds to at least one
+ // effect
+ TRACK_SESSION = 0x2 // the audio session corresponds to at least one
+ // track
+ };
+
+ // get effect chain corresponding to session Id.
+ sp<EffectChain> getEffectChain(int sessionId);
+ // same as getEffectChain() but must be called with ThreadBase mutex locked
+ sp<EffectChain> getEffectChain_l(int sessionId) const;
+ // add an effect chain to the chain list (mEffectChains)
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain) = 0;
+ // remove an effect chain from the chain list (mEffectChains)
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain) = 0;
+ // lock all effect chains Mutexes. Must be called before releasing the
+ // ThreadBase mutex before processing the mixer and effects. This guarantees the
+ // integrity of the chains during the process.
+ // Also sets the parameter 'effectChains' to current value of mEffectChains.
+ void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
+ // unlock effect chains after process
+ void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
+ // set audio mode to all effect chains
+ void setMode(audio_mode_t mode);
+ // get effect module with corresponding ID on specified audio session
+ sp<AudioFlinger::EffectModule> getEffect(int sessionId, int effectId);
+ sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
+ // add and effect module. Also creates the effect chain is none exists for
+ // the effects audio session
+ status_t addEffect_l(const sp< EffectModule>& effect);
+ // remove and effect module. Also removes the effect chain is this was the last
+ // effect
+ void removeEffect_l(const sp< EffectModule>& effect);
+ // detach all tracks connected to an auxiliary effect
+ virtual void detachAuxEffect_l(int effectId) {}
+ // returns either EFFECT_SESSION if effects on this audio session exist in one
+ // chain, or TRACK_SESSION if tracks on this audio session exist, or both
+ virtual uint32_t hasAudioSession(int sessionId) const = 0;
+ // the value returned by default implementation is not important as the
+ // strategy is only meaningful for PlaybackThread which implements this method
+ virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; }
+
+ // suspend or restore effect according to the type of effect passed. a NULL
+ // type pointer means suspend all effects in the session
+ void setEffectSuspended(const effect_uuid_t *type,
+ bool suspend,
+ int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+ // check if some effects must be suspended/restored when an effect is enabled
+ // or disabled
+ void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
+ bool enabled,
+ int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+ void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
+ bool enabled,
+ int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0;
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0;
+
+
+ mutable Mutex mLock;
+
+protected:
+
+ // entry describing an effect being suspended in mSuspendedSessions keyed vector
+ class SuspendedSessionDesc : public RefBase {
+ public:
+ SuspendedSessionDesc() : mRefCount(0) {}
+
+ int mRefCount; // number of active suspend requests
+ effect_uuid_t mType; // effect type UUID
+ };
+
+ void acquireWakeLock();
+ void acquireWakeLock_l();
+ void releaseWakeLock();
+ void releaseWakeLock_l();
+ void setEffectSuspended_l(const effect_uuid_t *type,
+ bool suspend,
+ int sessionId);
+ // updated mSuspendedSessions when an effect suspended or restored
+ void updateSuspendedSessions_l(const effect_uuid_t *type,
+ bool suspend,
+ int sessionId);
+ // check if some effects must be suspended when an effect chain is added
+ void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
+
+ virtual void preExit() { }
+
+ friend class AudioFlinger; // for mEffectChains
+
+ const type_t mType;
+
+ // Used by parameters, config events, addTrack_l, exit
+ Condition mWaitWorkCV;
+
+ const sp<AudioFlinger> mAudioFlinger;
+ uint32_t mSampleRate;
+ size_t mFrameCount; // output HAL, direct output, record
+ size_t mNormalFrameCount; // normal mixer and effects
+ audio_channel_mask_t mChannelMask;
+ uint16_t mChannelCount;
+ size_t mFrameSize;
+ audio_format_t mFormat;
+
+ // Parameter sequence by client: binder thread calling setParameters():
+ // 1. Lock mLock
+ // 2. Append to mNewParameters
+ // 3. mWaitWorkCV.signal
+ // 4. mParamCond.waitRelative with timeout
+ // 5. read mParamStatus
+ // 6. mWaitWorkCV.signal
+ // 7. Unlock
+ //
+ // Parameter sequence by server: threadLoop calling checkForNewParameters_l():
+ // 1. Lock mLock
+ // 2. If there is an entry in mNewParameters proceed ...
+ // 2. Read first entry in mNewParameters
+ // 3. Process
+ // 4. Remove first entry from mNewParameters
+ // 5. Set mParamStatus
+ // 6. mParamCond.signal
+ // 7. mWaitWorkCV.wait with timeout (this is to avoid overwriting mParamStatus)
+ // 8. Unlock
+ Condition mParamCond;
+ Vector<String8> mNewParameters;
+ status_t mParamStatus;
+
+ Vector<ConfigEvent *> mConfigEvents;
+
+ // These fields are written and read by thread itself without lock or barrier,
+ // and read by other threads without lock or barrier via standby() , outDevice()
+ // and inDevice().
+ // Because of the absence of a lock or barrier, any other thread that reads
+ // these fields must use the information in isolation, or be prepared to deal
+ // with possibility that it might be inconsistent with other information.
+ bool mStandby; // Whether thread is currently in standby.
+ audio_devices_t mOutDevice; // output device
+ audio_devices_t mInDevice; // input device
+ audio_source_t mAudioSource; // (see audio.h, audio_source_t)
+
+ const audio_io_handle_t mId;
+ Vector< sp<EffectChain> > mEffectChains;
+
+ static const int kNameLength = 16; // prctl(PR_SET_NAME) limit
+ char mName[kNameLength];
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ const sp<PMDeathRecipient> mDeathRecipient;
+ // list of suspended effects per session and per type. The first vector is
+ // keyed by session ID, the second by type UUID timeLow field
+ KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > >
+ mSuspendedSessions;
+};
+
+// --- PlaybackThread ---
+class PlaybackThread : public ThreadBase {
+public:
+
+#include "PlaybackTracks.h"
+
+ enum mixer_state {
+ MIXER_IDLE, // no active tracks
+ MIXER_TRACKS_ENABLED, // at least one active track, but no track has any data ready
+ MIXER_TRACKS_READY // at least one active track, and at least one track has data
+ // standby mode does not have an enum value
+ // suspend by audio policy manager is orthogonal to mixer state
+ };
+
+ PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+ audio_io_handle_t id, audio_devices_t device, type_t type);
+ virtual ~PlaybackThread();
+
+ void dump(int fd, const Vector<String16>& args);
+
+ // Thread virtuals
+ virtual status_t readyToRun();
+ virtual bool threadLoop();
+
+ // RefBase
+ virtual void onFirstRef();
+
+protected:
+ // Code snippets that were lifted up out of threadLoop()
+ virtual void threadLoop_mix() = 0;
+ virtual void threadLoop_sleepTime() = 0;
+ virtual void threadLoop_write();
+ virtual void threadLoop_standby();
+ virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
+
+ // prepareTracks_l reads and writes mActiveTracks, and returns
+ // the pending set of tracks to remove via Vector 'tracksToRemove'. The caller
+ // is responsible for clearing or destroying this Vector later on, when it
+ // is safe to do so. That will drop the final ref count and destroy the tracks.
+ virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
+
+ // ThreadBase virtuals
+ virtual void preExit();
+
+public:
+
+ virtual status_t initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
+
+ // return estimated latency in milliseconds, as reported by HAL
+ uint32_t latency() const;
+ // same, but lock must already be held
+ uint32_t latency_l() const;
+
+ void setMasterVolume(float value);
+ void setMasterMute(bool muted);
+
+ void setStreamVolume(audio_stream_type_t stream, float value);
+ void setStreamMute(audio_stream_type_t stream, bool muted);
+
+ float streamVolume(audio_stream_type_t stream) const;
+
+ sp<Track> createTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId,
+ IAudioFlinger::track_flags_t *flags,
+ pid_t tid,
+ status_t *status);
+
+ AudioStreamOut* getOutput() const;
+ AudioStreamOut* clearOutput();
+ virtual audio_stream_t* stream() const;
+
+ // a very large number of suspend() will eventually wraparound, but unlikely
+ void suspend() { (void) android_atomic_inc(&mSuspended); }
+ void restore()
+ {
+ // if restore() is done without suspend(), get back into
+ // range so that the next suspend() will operate correctly
+ if (android_atomic_dec(&mSuspended) <= 0) {
+ android_atomic_release_store(0, &mSuspended);
+ }
+ }
+ bool isSuspended() const
+ { return android_atomic_acquire_load(&mSuspended) > 0; }
+
+ virtual String8 getParameters(const String8& keys);
+ virtual void audioConfigChanged_l(int event, int param = 0);
+ status_t getRenderPosition(size_t *halFrames, size_t *dspFrames);
+ int16_t *mixBuffer() const { return mMixBuffer; };
+
+ virtual void detachAuxEffect_l(int effectId);
+ status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track,
+ int EffectId);
+ status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track,
+ int EffectId);
+
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
+ virtual uint32_t hasAudioSession(int sessionId) const;
+ virtual uint32_t getStrategyForSession_l(int sessionId);
+
+
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
+ void invalidateTracks(audio_stream_type_t streamType);
+
+
+protected:
+ int16_t* mMixBuffer;
+
+ // suspend count, > 0 means suspended. While suspended, the thread continues to pull from
+ // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle
+ // concurrent use of both of them, so Audio Policy Service suspends one of the threads to
+ // workaround that restriction.
+ // 'volatile' means accessed via atomic operations and no lock.
+ volatile int32_t mSuspended;
+
+ // FIXME overflows every 6+ hours at 44.1 kHz stereo 16-bit samples
+ // mFramesWritten would be better, or 64-bit even better
+ size_t mBytesWritten;
+private:
+ // mMasterMute is in both PlaybackThread and in AudioFlinger. When a
+ // PlaybackThread needs to find out if master-muted, it checks it's local
+ // copy rather than the one in AudioFlinger. This optimization saves a lock.
+ bool mMasterMute;
+ void setMasterMute_l(bool muted) { mMasterMute = muted; }
+protected:
+ SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<>
+
+ // Allocate a track name for a given channel mask.
+ // Returns name >= 0 if successful, -1 on failure.
+ virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId) = 0;
+ virtual void deleteTrackName_l(int name) = 0;
+
+ // Time to sleep between cycles when:
+ virtual uint32_t activeSleepTimeUs() const; // mixer state MIXER_TRACKS_ENABLED
+ virtual uint32_t idleSleepTimeUs() const = 0; // mixer state MIXER_IDLE
+ virtual uint32_t suspendSleepTimeUs() const = 0; // audio policy manager suspended us
+ // No sleep when mixer state == MIXER_TRACKS_READY; relies on audio HAL stream->write()
+ // No sleep in standby mode; waits on a condition
+
+ // Code snippets that are temporarily lifted up out of threadLoop() until the merge
+ void checkSilentMode_l();
+
+ // Non-trivial for DUPLICATING only
+ virtual void saveOutputTracks() { }
+ virtual void clearOutputTracks() { }
+
+ // Cache various calculated values, at threadLoop() entry and after a parameter change
+ virtual void cacheParameters_l();
+
+ virtual uint32_t correctLatency_l(uint32_t latency) const;
+
+private:
+
+ friend class AudioFlinger; // for numerous
+
+ PlaybackThread(const Client&);
+ PlaybackThread& operator = (const PlaybackThread&);
+
+ status_t addTrack_l(const sp<Track>& track);
+ void destroyTrack_l(const sp<Track>& track);
+ void removeTrack_l(const sp<Track>& track);
+
+ void readOutputParameters();
+
+ virtual void dumpInternals(int fd, const Vector<String16>& args);
+ void dumpTracks(int fd, const Vector<String16>& args);
+
+ SortedVector< sp<Track> > mTracks;
+ // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by
+ // DuplicatingThread
+ stream_type_t mStreamTypes[AUDIO_STREAM_CNT + 1];
+ AudioStreamOut *mOutput;
+
+ float mMasterVolume;
+ nsecs_t mLastWriteTime;
+ int mNumWrites;
+ int mNumDelayedWrites;
+ bool mInWrite;
+
+ // FIXME rename these former local variables of threadLoop to standard "m" names
+ nsecs_t standbyTime;
+ size_t mixBufferSize;
+
+ // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l()
+ uint32_t activeSleepTime;
+ uint32_t idleSleepTime;
+
+ uint32_t sleepTime;
+
+ // mixer status returned by prepareTracks_l()
+ mixer_state mMixerStatus; // current cycle
+ // previous cycle when in prepareTracks_l()
+ mixer_state mMixerStatusIgnoringFastTracks;
+ // FIXME or a separate ready state per track
+
+ // FIXME move these declarations into the specific sub-class that needs them
+ // MIXER only
+ uint32_t sleepTimeShift;
+
+ // same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value
+ nsecs_t standbyDelay;
+
+ // MIXER only
+ nsecs_t maxPeriod;
+
+ // DUPLICATING only
+ uint32_t writeFrames;
+
+private:
+ // The HAL output sink is treated as non-blocking, but current implementation is blocking
+ sp<NBAIO_Sink> mOutputSink;
+ // If a fast mixer is present, the blocking pipe sink, otherwise clear
+ sp<NBAIO_Sink> mPipeSink;
+ // The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink
+ sp<NBAIO_Sink> mNormalSink;
+ // For dumpsys
+ sp<NBAIO_Sink> mTeeSink;
+ sp<NBAIO_Source> mTeeSource;
+ uint32_t mScreenState; // cached copy of gScreenState
+public:
+ virtual bool hasFastMixer() const = 0;
+ virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const
+ { FastTrackUnderruns dummy; return dummy; }
+
+protected:
+ // accessed by both binder threads and within threadLoop(), lock on mutex needed
+ unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available
+
+};
+
+class MixerThread : public PlaybackThread {
+public:
+ MixerThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ audio_devices_t device,
+ type_t type = MIXER);
+ virtual ~MixerThread();
+
+ // Thread virtuals
+
+ virtual bool checkForNewParameters_l();
+ virtual void dumpInternals(int fd, const Vector<String16>& args);
+
+protected:
+ virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
+ virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId);
+ virtual void deleteTrackName_l(int name);
+ virtual uint32_t idleSleepTimeUs() const;
+ virtual uint32_t suspendSleepTimeUs() const;
+ virtual void cacheParameters_l();
+
+ // threadLoop snippets
+ virtual void threadLoop_write();
+ virtual void threadLoop_standby();
+ virtual void threadLoop_mix();
+ virtual void threadLoop_sleepTime();
+ virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
+ virtual uint32_t correctLatency_l(uint32_t latency) const;
+
+ AudioMixer* mAudioMixer; // normal mixer
+private:
+ // one-time initialization, no locks required
+ FastMixer* mFastMixer; // non-NULL if there is also a fast mixer
+ sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread
+
+ // contents are not guaranteed to be consistent, no locks required
+ FastMixerDumpState mFastMixerDumpState;
+#ifdef STATE_QUEUE_DUMP
+ StateQueueObserverDump mStateQueueObserverDump;
+ StateQueueMutatorDump mStateQueueMutatorDump;
+#endif
+ AudioWatchdogDump mAudioWatchdogDump;
+
+ // accessible only within the threadLoop(), no locks required
+ // mFastMixer->sq() // for mutating and pushing state
+ int32_t mFastMixerFutex; // for cold idle
+
+public:
+ virtual bool hasFastMixer() const { return mFastMixer != NULL; }
+ virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const {
+ ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks);
+ return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
+ }
+};
+
+class DirectOutputThread : public PlaybackThread {
+public:
+
+ DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+ audio_io_handle_t id, audio_devices_t device);
+ virtual ~DirectOutputThread();
+
+ // Thread virtuals
+
+ virtual bool checkForNewParameters_l();
+
+protected:
+ virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId);
+ virtual void deleteTrackName_l(int name);
+ virtual uint32_t activeSleepTimeUs() const;
+ virtual uint32_t idleSleepTimeUs() const;
+ virtual uint32_t suspendSleepTimeUs() const;
+ virtual void cacheParameters_l();
+
+ // threadLoop snippets
+ virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
+ virtual void threadLoop_mix();
+ virtual void threadLoop_sleepTime();
+
+private:
+ // volumes last sent to audio HAL with stream->set_volume()
+ float mLeftVolFloat;
+ float mRightVolFloat;
+
+ // prepareTracks_l() tells threadLoop_mix() the name of the single active track
+ sp<Track> mActiveTrack;
+public:
+ virtual bool hasFastMixer() const { return false; }
+};
+
+class DuplicatingThread : public MixerThread {
+public:
+ DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
+ audio_io_handle_t id);
+ virtual ~DuplicatingThread();
+
+ // Thread virtuals
+ void addOutputTrack(MixerThread* thread);
+ void removeOutputTrack(MixerThread* thread);
+ uint32_t waitTimeMs() const { return mWaitTimeMs; }
+protected:
+ virtual uint32_t activeSleepTimeUs() const;
+
+private:
+ bool outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
+protected:
+ // threadLoop snippets
+ virtual void threadLoop_mix();
+ virtual void threadLoop_sleepTime();
+ virtual void threadLoop_write();
+ virtual void threadLoop_standby();
+ virtual void cacheParameters_l();
+
+private:
+ // called from threadLoop, addOutputTrack, removeOutputTrack
+ virtual void updateWaitTime_l();
+protected:
+ virtual void saveOutputTracks();
+ virtual void clearOutputTracks();
+private:
+
+ uint32_t mWaitTimeMs;
+ SortedVector < sp<OutputTrack> > outputTracks;
+ SortedVector < sp<OutputTrack> > mOutputTracks;
+public:
+ virtual bool hasFastMixer() const { return false; }
+};
+
+
+// record thread
+class RecordThread : public ThreadBase, public AudioBufferProvider
+ // derives from AudioBufferProvider interface for use by resampler
+{
+public:
+
+#include "RecordTracks.h"
+
+ RecordThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamIn *input,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t id,
+ audio_devices_t device,
+ const sp<NBAIO_Sink>& teeSink);
+ virtual ~RecordThread();
+
+ // no addTrack_l ?
+ void destroyTrack_l(const sp<RecordTrack>& track);
+ void removeTrack_l(const sp<RecordTrack>& track);
+
+ void dumpInternals(int fd, const Vector<String16>& args);
+ void dumpTracks(int fd, const Vector<String16>& args);
+
+ // Thread virtuals
+ virtual bool threadLoop();
+ virtual status_t readyToRun();
+
+ // RefBase
+ virtual void onFirstRef();
+
+ virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
+ sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
+ const sp<AudioFlinger::Client>& client,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ int sessionId,
+ IAudioFlinger::track_flags_t flags,
+ pid_t tid,
+ status_t *status);
+
+ status_t start(RecordTrack* recordTrack,
+ AudioSystem::sync_event_t event,
+ int triggerSession);
+
+ // ask the thread to stop the specified track, and
+ // return true if the caller should then do it's part of the stopping process
+ bool stop_l(RecordTrack* recordTrack);
+
+ void dump(int fd, const Vector<String16>& args);
+ AudioStreamIn* clearInput();
+ virtual audio_stream_t* stream() const;
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
+ virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+ virtual bool checkForNewParameters_l();
+ virtual String8 getParameters(const String8& keys);
+ virtual void audioConfigChanged_l(int event, int param = 0);
+ void readInputParameters();
+ virtual unsigned int getInputFramesLost();
+
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
+ virtual uint32_t hasAudioSession(int sessionId) const;
+
+ // Return the set of unique session IDs across all tracks.
+ // The keys are the session IDs, and the associated values are meaningless.
+ // FIXME replace by Set [and implement Bag/Multiset for other uses].
+ KeyedVector<int, bool> sessionIds() const;
+
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
+
+ static void syncStartEventCallback(const wp<SyncEvent>& event);
+ void handleSyncStartEvent(const sp<SyncEvent>& event);
+
+private:
+ void clearSyncStartEvent();
+
+ // Enter standby if not already in standby, and set mStandby flag
+ void standby();
+
+ // Call the HAL standby method unconditionally, and don't change mStandby flag
+ void inputStandBy();
+
+ AudioStreamIn *mInput;
+ SortedVector < sp<RecordTrack> > mTracks;
+ // mActiveTrack has dual roles: it indicates the current active track, and
+ // is used together with mStartStopCond to indicate start()/stop() progress
+ sp<RecordTrack> mActiveTrack;
+ Condition mStartStopCond;
+ AudioResampler *mResampler;
+ int32_t *mRsmpOutBuffer;
+ int16_t *mRsmpInBuffer;
+ size_t mRsmpInIndex;
+ size_t mInputBytes;
+ const uint32_t mReqChannelCount;
+ const uint32_t mReqSampleRate;
+ ssize_t mBytesRead;
+ // sync event triggering actual audio capture. Frames read before this event will
+ // be dropped and therefore not read by the application.
+ sp<SyncEvent> mSyncStartEvent;
+ // number of captured frames to drop after the start sync event has been received.
+ // when < 0, maximum frames to drop before starting capture even if sync event is
+ // not received
+ ssize_t mFramestoDrop;
+
+ // For dumpsys
+ const sp<NBAIO_Sink> mTeeSink;
+};
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
new file mode 100644
index 0000000..17de49b
--- /dev/null
+++ b/services/audioflinger/TrackBase.h
@@ -0,0 +1,139 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+// base for record and playback
+class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
+
+public:
+ enum track_state {
+ IDLE,
+ TERMINATED,
+ FLUSHED,
+ STOPPED,
+ // next 2 states are currently used for fast tracks only
+ STOPPING_1, // waiting for first underrun
+ STOPPING_2, // waiting for presentation complete
+ RESUMING,
+ ACTIVE,
+ PAUSING,
+ PAUSED
+ };
+
+ TrackBase(ThreadBase *thread,
+ const sp<Client>& client,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId);
+ virtual ~TrackBase();
+
+ virtual status_t start(AudioSystem::sync_event_t event,
+ int triggerSession) = 0;
+ virtual void stop() = 0;
+ sp<IMemory> getCblk() const { return mCblkMemory; }
+ audio_track_cblk_t* cblk() const { return mCblk; }
+ int sessionId() const { return mSessionId; }
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+
+protected:
+ TrackBase(const TrackBase&);
+ TrackBase& operator = (const TrackBase&);
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
+ virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+ // ExtendedAudioBufferProvider interface is only needed for Track,
+ // but putting it in TrackBase avoids the complexity of virtual inheritance
+ virtual size_t framesReady() const { return SIZE_MAX; }
+
+ audio_format_t format() const {
+ return mFormat;
+ }
+
+ uint32_t channelCount() const { return mChannelCount; }
+
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
+
+ uint32_t sampleRate() const; // FIXME inline after cblk sr moved
+
+ // Return a pointer to the start of a contiguous slice of the track buffer.
+ // Parameter 'offset' is the requested start position, expressed in
+ // monotonically increasing frame units relative to the track epoch.
+ // Parameter 'frames' is the requested length, also in frame units.
+ // Always returns non-NULL. It is the caller's responsibility to
+ // verify that this will be successful; the result of calling this
+ // function with invalid 'offset' or 'frames' is undefined.
+ void* getBuffer(uint32_t offset, uint32_t frames) const;
+
+ bool isStopped() const {
+ return (mState == STOPPED || mState == FLUSHED);
+ }
+
+ // for fast tracks only
+ bool isStopping() const {
+ return mState == STOPPING_1 || mState == STOPPING_2;
+ }
+ bool isStopping_1() const {
+ return mState == STOPPING_1;
+ }
+ bool isStopping_2() const {
+ return mState == STOPPING_2;
+ }
+
+ bool isTerminated() const {
+ return mState == TERMINATED;
+ }
+
+ bool step(); // mStepCount is an implicit input
+ void reset();
+
+ virtual bool isOut() const = 0; // true for Track and TimedTrack, false for RecordTrack,
+ // this could be a track type if needed later
+
+ const wp<ThreadBase> mThread;
+ /*const*/ sp<Client> mClient; // see explanation at ~TrackBase() why not const
+ sp<IMemory> mCblkMemory;
+ audio_track_cblk_t* mCblk;
+ void* mBuffer; // start of track buffer, typically in shared memory
+ void* mBufferEnd; // &mBuffer[mFrameCount * frameSize], where frameSize
+ // is based on mChannelCount and 16-bit samples
+ uint32_t mStepCount; // saves AudioBufferProvider::Buffer::frameCount as of
+ // time of releaseBuffer() for later use by step()
+ // we don't really need a lock for these
+ track_state mState;
+ const uint32_t mSampleRate; // initial sample rate only; for tracks which
+ // support dynamic rates, the current value is in control block
+ const audio_format_t mFormat;
+ const audio_channel_mask_t mChannelMask;
+ const uint8_t mChannelCount;
+ const size_t mFrameSize; // AudioFlinger's view of frame size in shared memory,
+ // where for AudioTrack (but not AudioRecord),
+ // 8-bit PCM samples are stored as 16-bit
+ const size_t mFrameCount;// size of track buffer given at createTrack() or
+ // openRecord(), and then adjusted as needed
+
+ bool mStepServerFailed;
+ const int mSessionId;
+ Vector < sp<SyncEvent> >mSyncEvents;
+};
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
new file mode 100644
index 0000000..2c6ba8b
--- /dev/null
+++ b/services/audioflinger/Tracks.cpp
@@ -0,0 +1,1789 @@
+/*
+**
+** Copyright 2012, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+
+#include <math.h>
+#include <cutils/compiler.h>
+#include <utils/Log.h>
+
+#include <private/media/AudioTrackShared.h>
+
+#include <common_time/cc_helper.h>
+#include <common_time/local_clock.h>
+
+#include "AudioMixer.h"
+#include "AudioFlinger.h"
+#include "ServiceUtilities.h"
+
+// ----------------------------------------------------------------------------
+
+// Note: the following macro is used for extremely verbose logging message. In
+// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
+// 0; but one side effect of this is to turn all LOGV's as well. Some messages
+// are so verbose that we want to suppress them even when we have ALOG_ASSERT
+// turned on. Do not uncomment the #def below unless you really know what you
+// are doing and want to see all of the extremely verbose messages.
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+// TrackBase
+// ----------------------------------------------------------------------------
+
+// TrackBase constructor must be called with AudioFlinger::mLock held
+AudioFlinger::ThreadBase::TrackBase::TrackBase(
+ ThreadBase *thread,
+ const sp<Client>& client,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId)
+ : RefBase(),
+ mThread(thread),
+ mClient(client),
+ mCblk(NULL),
+ // mBuffer
+ // mBufferEnd
+ mStepCount(0),
+ mState(IDLE),
+ mSampleRate(sampleRate),
+ mFormat(format),
+ mChannelMask(channelMask),
+ mChannelCount(popcount(channelMask)),
+ mFrameSize(audio_is_linear_pcm(format) ?
+ mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
+ mFrameCount(frameCount),
+ mStepServerFailed(false),
+ mSessionId(sessionId)
+{
+ // client == 0 implies sharedBuffer == 0
+ ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
+
+ ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+ sharedBuffer->size());
+
+ // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
+ size_t size = sizeof(audio_track_cblk_t);
+ size_t bufferSize = frameCount * mFrameSize;
+ if (sharedBuffer == 0) {
+ size += bufferSize;
+ }
+
+ if (client != 0) {
+ mCblkMemory = client->heap()->allocate(size);
+ if (mCblkMemory != 0) {
+ mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
+ // can't assume mCblk != NULL
+ } else {
+ ALOGE("not enough memory for AudioTrack size=%u", size);
+ client->heap()->dump("AudioTrack");
+ return;
+ }
+ } else {
+ mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
+ // assume mCblk != NULL
+ }
+
+ // construct the shared structure in-place.
+ if (mCblk != NULL) {
+ new(mCblk) audio_track_cblk_t();
+ // clear all buffers
+ mCblk->frameCount_ = frameCount;
+ mCblk->sampleRate = sampleRate;
+// uncomment the following lines to quickly test 32-bit wraparound
+// mCblk->user = 0xffff0000;
+// mCblk->server = 0xffff0000;
+// mCblk->userBase = 0xffff0000;
+// mCblk->serverBase = 0xffff0000;
+ if (sharedBuffer == 0) {
+ mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
+ memset(mBuffer, 0, bufferSize);
+ // Force underrun condition to avoid false underrun callback until first data is
+ // written to buffer (other flags are cleared)
+ mCblk->flags = CBLK_UNDERRUN;
+ } else {
+ mBuffer = sharedBuffer->pointer();
+ }
+ mBufferEnd = (uint8_t *)mBuffer + bufferSize;
+ }
+}
+
+AudioFlinger::ThreadBase::TrackBase::~TrackBase()
+{
+ if (mCblk != NULL) {
+ if (mClient == 0) {
+ delete mCblk;
+ } else {
+ mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
+ }
+ }
+ mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
+ if (mClient != 0) {
+ // Client destructor must run with AudioFlinger mutex locked
+ Mutex::Autolock _l(mClient->audioFlinger()->mLock);
+ // If the client's reference count drops to zero, the associated destructor
+ // must run with AudioFlinger lock held. Thus the explicit clear() rather than
+ // relying on the automatic clear() at end of scope.
+ mClient.clear();
+ }
+}
+
+// AudioBufferProvider interface
+// getNextBuffer() = 0;
+// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
+void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ buffer->raw = NULL;
+ mStepCount = buffer->frameCount;
+ // FIXME See note at getNextBuffer()
+ (void) step(); // ignore return value of step()
+ buffer->frameCount = 0;
+}
+
+bool AudioFlinger::ThreadBase::TrackBase::step() {
+ bool result;
+ audio_track_cblk_t* cblk = this->cblk();
+
+ result = cblk->stepServer(mStepCount, mFrameCount, isOut());
+ if (!result) {
+ ALOGV("stepServer failed acquiring cblk mutex");
+ mStepServerFailed = true;
+ }
+ return result;
+}
+
+void AudioFlinger::ThreadBase::TrackBase::reset() {
+ audio_track_cblk_t* cblk = this->cblk();
+
+ cblk->user = 0;
+ cblk->server = 0;
+ cblk->userBase = 0;
+ cblk->serverBase = 0;
+ mStepServerFailed = false;
+ ALOGV("TrackBase::reset");
+}
+
+uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
+ return mCblk->sampleRate;
+}
+
+void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
+ audio_track_cblk_t* cblk = this->cblk();
+ int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize;
+ int8_t *bufferEnd = bufferStart + frames * mFrameSize;
+
+ // Check validity of returned pointer in case the track control block would have been corrupted.
+ ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
+ "TrackBase::getBuffer buffer out of range:\n"
+ " start: %p, end %p , mBuffer %p mBufferEnd %p\n"
+ " server %u, serverBase %u, user %u, userBase %u, frameSize %u",
+ bufferStart, bufferEnd, mBuffer, mBufferEnd,
+ cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize);
+
+ return bufferStart;
+}
+
+status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
+{
+ mSyncEvents.add(event);
+ return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+// Playback
+// ----------------------------------------------------------------------------
+
+AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
+ : BnAudioTrack(),
+ mTrack(track)
+{
+}
+
+AudioFlinger::TrackHandle::~TrackHandle() {
+ // just stop the track on deletion, associated resources
+ // will be freed from the main thread once all pending buffers have
+ // been played. Unless it's not in the active track list, in which
+ // case we free everything now...
+ mTrack->destroy();
+}
+
+sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
+ return mTrack->getCblk();
+}
+
+status_t AudioFlinger::TrackHandle::start() {
+ return mTrack->start();
+}
+
+void AudioFlinger::TrackHandle::stop() {
+ mTrack->stop();
+}
+
+void AudioFlinger::TrackHandle::flush() {
+ mTrack->flush();
+}
+
+void AudioFlinger::TrackHandle::mute(bool e) {
+ mTrack->mute(e);
+}
+
+void AudioFlinger::TrackHandle::pause() {
+ mTrack->pause();
+}
+
+status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
+{
+ return mTrack->attachAuxEffect(EffectId);
+}
+
+status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
+ sp<IMemory>* buffer) {
+ if (!mTrack->isTimedTrack())
+ return INVALID_OPERATION;
+
+ PlaybackThread::TimedTrack* tt =
+ reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+ return tt->allocateTimedBuffer(size, buffer);
+}
+
+status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
+ int64_t pts) {
+ if (!mTrack->isTimedTrack())
+ return INVALID_OPERATION;
+
+ PlaybackThread::TimedTrack* tt =
+ reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+ return tt->queueTimedBuffer(buffer, pts);
+}
+
+status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
+ const LinearTransform& xform, int target) {
+
+ if (!mTrack->isTimedTrack())
+ return INVALID_OPERATION;
+
+ PlaybackThread::TimedTrack* tt =
+ reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+ return tt->setMediaTimeTransform(
+ xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
+}
+
+status_t AudioFlinger::TrackHandle::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnAudioTrack::onTransact(code, data, reply, flags);
+}
+
+// ----------------------------------------------------------------------------
+
+// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
+AudioFlinger::PlaybackThread::Track::Track(
+ PlaybackThread *thread,
+ const sp<Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId,
+ IAudioFlinger::track_flags_t flags)
+ : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
+ sessionId),
+ mMute(false),
+ mFillingUpStatus(FS_INVALID),
+ // mRetryCount initialized later when needed
+ mSharedBuffer(sharedBuffer),
+ mStreamType(streamType),
+ mName(-1), // see note below
+ mMainBuffer(thread->mixBuffer()),
+ mAuxBuffer(NULL),
+ mAuxEffectId(0), mHasVolumeController(false),
+ mPresentationCompleteFrames(0),
+ mFlags(flags),
+ mFastIndex(-1),
+ mUnderrunCount(0),
+ mCachedVolume(1.0)
+{
+ if (mCblk != NULL) {
+ // to avoid leaking a track name, do not allocate one unless there is an mCblk
+ mName = thread->getTrackName_l(channelMask, sessionId);
+ mCblk->mName = mName;
+ if (mName < 0) {
+ ALOGE("no more track names available");
+ return;
+ }
+ // only allocate a fast track index if we were able to allocate a normal track name
+ if (flags & IAudioFlinger::TRACK_FAST) {
+ ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
+ int i = __builtin_ctz(thread->mFastTrackAvailMask);
+ ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
+ // FIXME This is too eager. We allocate a fast track index before the
+ // fast track becomes active. Since fast tracks are a scarce resource,
+ // this means we are potentially denying other more important fast tracks from
+ // being created. It would be better to allocate the index dynamically.
+ mFastIndex = i;
+ mCblk->mName = i;
+ // Read the initial underruns because this field is never cleared by the fast mixer
+ mObservedUnderruns = thread->getFastTrackUnderruns(i);
+ thread->mFastTrackAvailMask &= ~(1 << i);
+ }
+ }
+ ALOGV("Track constructor name %d, calling pid %d", mName,
+ IPCThreadState::self()->getCallingPid());
+}
+
+AudioFlinger::PlaybackThread::Track::~Track()
+{
+ ALOGV("PlaybackThread::Track destructor");
+}
+
+void AudioFlinger::PlaybackThread::Track::destroy()
+{
+ // NOTE: destroyTrack_l() can remove a strong reference to this Track
+ // by removing it from mTracks vector, so there is a risk that this Tracks's
+ // destructor is called. As the destructor needs to lock mLock,
+ // we must acquire a strong reference on this Track before locking mLock
+ // here so that the destructor is called only when exiting this function.
+ // On the other hand, as long as Track::destroy() is only called by
+ // TrackHandle destructor, the TrackHandle still holds a strong ref on
+ // this Track with its member mTrack.
+ sp<Track> keep(this);
+ { // scope for mLock
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ if (!isOutputTrack()) {
+ if (mState == ACTIVE || mState == RESUMING) {
+ AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
+
+#ifdef ADD_BATTERY_DATA
+ // to track the speaker usage
+ addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
+ }
+ AudioSystem::releaseOutput(thread->id());
+ }
+ Mutex::Autolock _l(thread->mLock);
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ playbackThread->destroyTrack_l(this);
+ }
+ }
+}
+
+/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+{
+ result.append(" Name Client Type Fmt Chn mask Session StpCnt fCount S M F SRate "
+ "L dB R dB Server User Main buf Aux Buf Flags Underruns\n");
+}
+
+void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
+{
+ uint32_t vlr = mCblk->getVolumeLR();
+ if (isFastTrack()) {
+ sprintf(buffer, " F %2d", mFastIndex);
+ } else {
+ sprintf(buffer, " %4d", mName - AudioMixer::TRACK0);
+ }
+ track_state state = mState;
+ char stateChar;
+ switch (state) {
+ case IDLE:
+ stateChar = 'I';
+ break;
+ case TERMINATED:
+ stateChar = 'T';
+ break;
+ case STOPPING_1:
+ stateChar = 's';
+ break;
+ case STOPPING_2:
+ stateChar = '5';
+ break;
+ case STOPPED:
+ stateChar = 'S';
+ break;
+ case RESUMING:
+ stateChar = 'R';
+ break;
+ case ACTIVE:
+ stateChar = 'A';
+ break;
+ case PAUSING:
+ stateChar = 'p';
+ break;
+ case PAUSED:
+ stateChar = 'P';
+ break;
+ case FLUSHED:
+ stateChar = 'F';
+ break;
+ default:
+ stateChar = '?';
+ break;
+ }
+ char nowInUnderrun;
+ switch (mObservedUnderruns.mBitFields.mMostRecent) {
+ case UNDERRUN_FULL:
+ nowInUnderrun = ' ';
+ break;
+ case UNDERRUN_PARTIAL:
+ nowInUnderrun = '<';
+ break;
+ case UNDERRUN_EMPTY:
+ nowInUnderrun = '*';
+ break;
+ default:
+ nowInUnderrun = '?';
+ break;
+ }
+ snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %1d %5u %5.2g %5.2g "
+ "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
+ (mClient == 0) ? getpid_cached : mClient->pid(),
+ mStreamType,
+ mFormat,
+ mChannelMask,
+ mSessionId,
+ mStepCount,
+ mFrameCount,
+ stateChar,
+ mMute,
+ mFillingUpStatus,
+ mCblk->sampleRate,
+ 20.0 * log10((vlr & 0xFFFF) / 4096.0),
+ 20.0 * log10((vlr >> 16) / 4096.0),
+ mCblk->server,
+ mCblk->user,
+ (int)mMainBuffer,
+ (int)mAuxBuffer,
+ mCblk->flags,
+ mUnderrunCount,
+ nowInUnderrun);
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
+ AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+ audio_track_cblk_t* cblk = this->cblk();
+ uint32_t framesReady;
+ uint32_t framesReq = buffer->frameCount;
+
+ // Check if last stepServer failed, try to step now
+ if (mStepServerFailed) {
+ // FIXME When called by fast mixer, this takes a mutex with tryLock().
+ // Since the fast mixer is higher priority than client callback thread,
+ // it does not result in priority inversion for client.
+ // But a non-blocking solution would be preferable to avoid
+ // fast mixer being unable to tryLock(), and
+ // to avoid the extra context switches if the client wakes up,
+ // discovers the mutex is locked, then has to wait for fast mixer to unlock.
+ if (!step()) goto getNextBuffer_exit;
+ ALOGV("stepServer recovered");
+ mStepServerFailed = false;
+ }
+
+ // FIXME Same as above
+ framesReady = cblk->framesReadyOut();
+
+ if (CC_LIKELY(framesReady)) {
+ uint32_t s = cblk->server;
+ uint32_t bufferEnd = cblk->serverBase + mFrameCount;
+
+ bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
+ if (framesReq > framesReady) {
+ framesReq = framesReady;
+ }
+ if (framesReq > bufferEnd - s) {
+ framesReq = bufferEnd - s;
+ }
+
+ buffer->raw = getBuffer(s, framesReq);
+ buffer->frameCount = framesReq;
+ return NO_ERROR;
+ }
+
+getNextBuffer_exit:
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
+ return NOT_ENOUGH_DATA;
+}
+
+// Note that framesReady() takes a mutex on the control block using tryLock().
+// This could result in priority inversion if framesReady() is called by the normal mixer,
+// as the normal mixer thread runs at lower
+// priority than the client's callback thread: there is a short window within framesReady()
+// during which the normal mixer could be preempted, and the client callback would block.
+// Another problem can occur if framesReady() is called by the fast mixer:
+// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
+// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
+size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
+ return mCblk->framesReadyOut();
+}
+
+// Don't call for fast tracks; the framesReady() could result in priority inversion
+bool AudioFlinger::PlaybackThread::Track::isReady() const {
+ if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
+ return true;
+ }
+
+ if (framesReady() >= mFrameCount ||
+ (mCblk->flags & CBLK_FORCEREADY)) {
+ mFillingUpStatus = FS_FILLED;
+ android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
+ return true;
+ }
+ return false;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
+ int triggerSession)
+{
+ status_t status = NO_ERROR;
+ ALOGV("start(%d), calling pid %d session %d",
+ mName, IPCThreadState::self()->getCallingPid(), mSessionId);
+
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ track_state state = mState;
+ // here the track could be either new, or restarted
+ // in both cases "unstop" the track
+ if (mState == PAUSED) {
+ mState = TrackBase::RESUMING;
+ ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+ } else {
+ mState = TrackBase::ACTIVE;
+ ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
+ }
+
+ if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
+ thread->mLock.unlock();
+ status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
+ thread->mLock.lock();
+
+#ifdef ADD_BATTERY_DATA
+ // to track the speaker usage
+ if (status == NO_ERROR) {
+ addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
+ }
+#endif
+ }
+ if (status == NO_ERROR) {
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ playbackThread->addTrack_l(this);
+ } else {
+ mState = state;
+ triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+ }
+ } else {
+ status = BAD_VALUE;
+ }
+ return status;
+}
+
+void AudioFlinger::PlaybackThread::Track::stop()
+{
+ ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ track_state state = mState;
+ if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
+ // If the track is not active (PAUSED and buffers full), flush buffers
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+ reset();
+ mState = STOPPED;
+ } else if (!isFastTrack()) {
+ mState = STOPPED;
+ } else {
+ // prepareTracks_l() will set state to STOPPING_2 after next underrun,
+ // and then to STOPPED and reset() when presentation is complete
+ mState = STOPPING_1;
+ }
+ ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
+ playbackThread);
+ }
+ if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
+ thread->mLock.unlock();
+ AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
+ thread->mLock.lock();
+
+#ifdef ADD_BATTERY_DATA
+ // to track the speaker usage
+ addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
+ }
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::pause()
+{
+ ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ if (mState == ACTIVE || mState == RESUMING) {
+ mState = PAUSING;
+ ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
+ if (!isOutputTrack()) {
+ thread->mLock.unlock();
+ AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
+ thread->mLock.lock();
+
+#ifdef ADD_BATTERY_DATA
+ // to track the speaker usage
+ addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
+ }
+ }
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::flush()
+{
+ ALOGV("flush(%d)", mName);
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
+ mState != PAUSING && mState != IDLE && mState != FLUSHED) {
+ return;
+ }
+ // No point remaining in PAUSED state after a flush => go to
+ // FLUSHED state
+ mState = FLUSHED;
+ // do not reset the track if it is still in the process of being stopped or paused.
+ // this will be done by prepareTracks_l() when the track is stopped.
+ // prepareTracks_l() will see mState == FLUSHED, then
+ // remove from active track list, reset(), and trigger presentation complete
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+ reset();
+ }
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::reset()
+{
+ // Do not reset twice to avoid discarding data written just after a flush and before
+ // the audioflinger thread detects the track is stopped.
+ if (!mResetDone) {
+ TrackBase::reset();
+ // Force underrun condition to avoid false underrun callback until first data is
+ // written to buffer
+ android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
+ android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
+ mFillingUpStatus = FS_FILLING;
+ mResetDone = true;
+ if (mState == FLUSHED) {
+ mState = IDLE;
+ }
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::mute(bool muted)
+{
+ mMute = muted;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
+{
+ status_t status = DEAD_OBJECT;
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ sp<AudioFlinger> af = mClient->audioFlinger();
+
+ Mutex::Autolock _l(af->mLock);
+
+ sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
+
+ if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
+ Mutex::Autolock _dl(playbackThread->mLock);
+ Mutex::Autolock _sl(srcThread->mLock);
+ sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
+ if (chain == 0) {
+ return INVALID_OPERATION;
+ }
+
+ sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
+ if (effect == 0) {
+ return INVALID_OPERATION;
+ }
+ srcThread->removeEffect_l(effect);
+ playbackThread->addEffect_l(effect);
+ // removeEffect_l() has stopped the effect if it was active so it must be restarted
+ if (effect->state() == EffectModule::ACTIVE ||
+ effect->state() == EffectModule::STOPPING) {
+ effect->start();
+ }
+
+ sp<EffectChain> dstChain = effect->chain().promote();
+ if (dstChain == 0) {
+ srcThread->addEffect_l(effect);
+ return INVALID_OPERATION;
+ }
+ AudioSystem::unregisterEffect(effect->id());
+ AudioSystem::registerEffect(&effect->desc(),
+ srcThread->id(),
+ dstChain->strategy(),
+ AUDIO_SESSION_OUTPUT_MIX,
+ effect->id());
+ }
+ status = playbackThread->attachAuxEffect(this, EffectId);
+ }
+ return status;
+}
+
+void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
+{
+ mAuxEffectId = EffectId;
+ mAuxBuffer = buffer;
+}
+
+bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
+ size_t audioHalFrames)
+{
+ // a track is considered presented when the total number of frames written to audio HAL
+ // corresponds to the number of frames written when presentationComplete() is called for the
+ // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
+ if (mPresentationCompleteFrames == 0) {
+ mPresentationCompleteFrames = framesWritten + audioHalFrames;
+ ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
+ mPresentationCompleteFrames, audioHalFrames);
+ }
+ if (framesWritten >= mPresentationCompleteFrames) {
+ ALOGV("presentationComplete() session %d complete: framesWritten %d",
+ mSessionId, framesWritten);
+ triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+ return true;
+ }
+ return false;
+}
+
+void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
+{
+ for (int i = 0; i < (int)mSyncEvents.size(); i++) {
+ if (mSyncEvents[i]->type() == type) {
+ mSyncEvents[i]->trigger();
+ mSyncEvents.removeAt(i);
+ i--;
+ }
+ }
+}
+
+// implement VolumeBufferProvider interface
+
+uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
+{
+ // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
+ ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
+ uint32_t vlr = mCblk->getVolumeLR();
+ uint32_t vl = vlr & 0xFFFF;
+ uint32_t vr = vlr >> 16;
+ // track volumes come from shared memory, so can't be trusted and must be clamped
+ if (vl > MAX_GAIN_INT) {
+ vl = MAX_GAIN_INT;
+ }
+ if (vr > MAX_GAIN_INT) {
+ vr = MAX_GAIN_INT;
+ }
+ // now apply the cached master volume and stream type volume;
+ // this is trusted but lacks any synchronization or barrier so may be stale
+ float v = mCachedVolume;
+ vl *= v;
+ vr *= v;
+ // re-combine into U4.16
+ vlr = (vr << 16) | (vl & 0xFFFF);
+ // FIXME look at mute, pause, and stop flags
+ return vlr;
+}
+
+status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
+{
+ if (mState == TERMINATED || mState == PAUSED ||
+ ((framesReady() == 0) && ((mSharedBuffer != 0) ||
+ (mState == STOPPED)))) {
+ ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
+ mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
+ event->cancel();
+ return INVALID_OPERATION;
+ }
+ (void) TrackBase::setSyncEvent(event);
+ return NO_ERROR;
+}
+
+bool AudioFlinger::PlaybackThread::Track::isOut() const
+{
+ return true;
+}
+
+// ----------------------------------------------------------------------------
+
+sp<AudioFlinger::PlaybackThread::TimedTrack>
+AudioFlinger::PlaybackThread::TimedTrack::create(
+ PlaybackThread *thread,
+ const sp<Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId) {
+ if (!client->reserveTimedTrack())
+ return 0;
+
+ return new TimedTrack(
+ thread, client, streamType, sampleRate, format, channelMask, frameCount,
+ sharedBuffer, sessionId);
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
+ PlaybackThread *thread,
+ const sp<Client>& client,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId)
+ : Track(thread, client, streamType, sampleRate, format, channelMask,
+ frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
+ mQueueHeadInFlight(false),
+ mTrimQueueHeadOnRelease(false),
+ mFramesPendingInQueue(0),
+ mTimedSilenceBuffer(NULL),
+ mTimedSilenceBufferSize(0),
+ mTimedAudioOutputOnTime(false),
+ mMediaTimeTransformValid(false)
+{
+ LocalClock lc;
+ mLocalTimeFreq = lc.getLocalFreq();
+
+ mLocalTimeToSampleTransform.a_zero = 0;
+ mLocalTimeToSampleTransform.b_zero = 0;
+ mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
+ mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
+ LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
+ &mLocalTimeToSampleTransform.a_to_b_denom);
+
+ mMediaTimeToSampleTransform.a_zero = 0;
+ mMediaTimeToSampleTransform.b_zero = 0;
+ mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
+ mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
+ LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
+ &mMediaTimeToSampleTransform.a_to_b_denom);
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
+ mClient->releaseTimedTrack();
+ delete [] mTimedSilenceBuffer;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
+ size_t size, sp<IMemory>* buffer) {
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ trimTimedBufferQueue_l();
+
+ // lazily initialize the shared memory heap for timed buffers
+ if (mTimedMemoryDealer == NULL) {
+ const int kTimedBufferHeapSize = 512 << 10;
+
+ mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
+ "AudioFlingerTimed");
+ if (mTimedMemoryDealer == NULL)
+ return NO_MEMORY;
+ }
+
+ sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
+ if (newBuffer == NULL) {
+ newBuffer = mTimedMemoryDealer->allocate(size);
+ if (newBuffer == NULL)
+ return NO_MEMORY;
+ }
+
+ *buffer = newBuffer;
+ return NO_ERROR;
+}
+
+// caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
+ int64_t mediaTimeNow;
+ {
+ Mutex::Autolock mttLock(mMediaTimeTransformLock);
+ if (!mMediaTimeTransformValid)
+ return;
+
+ int64_t targetTimeNow;
+ status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
+ ? mCCHelper.getCommonTime(&targetTimeNow)
+ : mCCHelper.getLocalTime(&targetTimeNow);
+
+ if (OK != res)
+ return;
+
+ if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
+ &mediaTimeNow)) {
+ return;
+ }
+ }
+
+ size_t trimEnd;
+ for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
+ int64_t bufEnd;
+
+ if ((trimEnd + 1) < mTimedBufferQueue.size()) {
+ // We have a next buffer. Just use its PTS as the PTS of the frame
+ // following the last frame in this buffer. If the stream is sparse
+ // (ie, there are deliberate gaps left in the stream which should be
+ // filled with silence by the TimedAudioTrack), then this can result
+ // in one extra buffer being left un-trimmed when it could have
+ // been. In general, this is not typical, and we would rather
+ // optimized away the TS calculation below for the more common case
+ // where PTSes are contiguous.
+ bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
+ } else {
+ // We have no next buffer. Compute the PTS of the frame following
+ // the last frame in this buffer by computing the duration of of
+ // this frame in media time units and adding it to the PTS of the
+ // buffer.
+ int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
+ / mFrameSize;
+
+ if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
+ &bufEnd)) {
+ ALOGE("Failed to convert frame count of %lld to media time"
+ " duration" " (scale factor %d/%u) in %s",
+ frameCount,
+ mMediaTimeToSampleTransform.a_to_b_numer,
+ mMediaTimeToSampleTransform.a_to_b_denom,
+ __PRETTY_FUNCTION__);
+ break;
+ }
+ bufEnd += mTimedBufferQueue[trimEnd].pts();
+ }
+
+ if (bufEnd > mediaTimeNow)
+ break;
+
+ // Is the buffer we want to use in the middle of a mix operation right
+ // now? If so, don't actually trim it. Just wait for the releaseBuffer
+ // from the mixer which should be coming back shortly.
+ if (!trimEnd && mQueueHeadInFlight) {
+ mTrimQueueHeadOnRelease = true;
+ }
+ }
+
+ size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
+ if (trimStart < trimEnd) {
+ // Update the bookkeeping for framesReady()
+ for (size_t i = trimStart; i < trimEnd; ++i) {
+ updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
+ }
+
+ // Now actually remove the buffers from the queue.
+ mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
+ }
+}
+
+void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
+ const char* logTag) {
+ ALOG_ASSERT(mTimedBufferQueue.size() > 0,
+ "%s called (reason \"%s\"), but timed buffer queue has no"
+ " elements to trim.", __FUNCTION__, logTag);
+
+ updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
+ mTimedBufferQueue.removeAt(0);
+}
+
+void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
+ const TimedBuffer& buf,
+ const char* logTag) {
+ uint32_t bufBytes = buf.buffer()->size();
+ uint32_t consumedAlready = buf.position();
+
+ ALOG_ASSERT(consumedAlready <= bufBytes,
+ "Bad bookkeeping while updating frames pending. Timed buffer is"
+ " only %u bytes long, but claims to have consumed %u"
+ " bytes. (update reason: \"%s\")",
+ bufBytes, consumedAlready, logTag);
+
+ uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
+ ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
+ "Bad bookkeeping while updating frames pending. Should have at"
+ " least %u queued frames, but we think we have only %u. (update"
+ " reason: \"%s\")",
+ bufFrames, mFramesPendingInQueue, logTag);
+
+ mFramesPendingInQueue -= bufFrames;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
+ const sp<IMemory>& buffer, int64_t pts) {
+
+ {
+ Mutex::Autolock mttLock(mMediaTimeTransformLock);
+ if (!mMediaTimeTransformValid)
+ return INVALID_OPERATION;
+ }
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ uint32_t bufFrames = buffer->size() / mFrameSize;
+ mFramesPendingInQueue += bufFrames;
+ mTimedBufferQueue.add(TimedBuffer(buffer, pts));
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
+ const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
+
+ ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
+ xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
+ target);
+
+ if (!(target == TimedAudioTrack::LOCAL_TIME ||
+ target == TimedAudioTrack::COMMON_TIME)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMediaTimeTransformLock);
+ mMediaTimeTransform = xform;
+ mMediaTimeTransformTarget = target;
+ mMediaTimeTransformValid = true;
+
+ return NO_ERROR;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+// implementation of getNextBuffer for tracks whose buffers have timestamps
+status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
+ AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+ if (pts == AudioBufferProvider::kInvalidPTS) {
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ mTimedAudioOutputOnTime = false;
+ return INVALID_OPERATION;
+ }
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ ALOG_ASSERT(!mQueueHeadInFlight,
+ "getNextBuffer called without releaseBuffer!");
+
+ while (true) {
+
+ // if we have no timed buffers, then fail
+ if (mTimedBufferQueue.isEmpty()) {
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ return NOT_ENOUGH_DATA;
+ }
+
+ TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+ // calculate the PTS of the head of the timed buffer queue expressed in
+ // local time
+ int64_t headLocalPTS;
+ {
+ Mutex::Autolock mttLock(mMediaTimeTransformLock);
+
+ ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
+
+ if (mMediaTimeTransform.a_to_b_denom == 0) {
+ // the transform represents a pause, so yield silence
+ timedYieldSilence_l(buffer->frameCount, buffer);
+ return NO_ERROR;
+ }
+
+ int64_t transformedPTS;
+ if (!mMediaTimeTransform.doForwardTransform(head.pts(),
+ &transformedPTS)) {
+ // the transform failed. this shouldn't happen, but if it does
+ // then just drop this buffer
+ ALOGW("timedGetNextBuffer transform failed");
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ trimTimedBufferQueueHead_l("getNextBuffer; no transform");
+ return NO_ERROR;
+ }
+
+ if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
+ if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
+ &headLocalPTS)) {
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ return INVALID_OPERATION;
+ }
+ } else {
+ headLocalPTS = transformedPTS;
+ }
+ }
+
+ // adjust the head buffer's PTS to reflect the portion of the head buffer
+ // that has already been consumed
+ int64_t effectivePTS = headLocalPTS +
+ ((head.position() / mFrameSize) * mLocalTimeFreq / sampleRate());
+
+ // Calculate the delta in samples between the head of the input buffer
+ // queue and the start of the next output buffer that will be written.
+ // If the transformation fails because of over or underflow, it means
+ // that the sample's position in the output stream is so far out of
+ // whack that it should just be dropped.
+ int64_t sampleDelta;
+ if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
+ ALOGV("*** head buffer is too far from PTS: dropped buffer");
+ trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
+ " mix");
+ continue;
+ }
+ if (!mLocalTimeToSampleTransform.doForwardTransform(
+ (effectivePTS - pts) << 32, &sampleDelta)) {
+ ALOGV("*** too late during sample rate transform: dropped buffer");
+ trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
+ continue;
+ }
+
+ ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
+ " sampleDelta=[%d.%08x]",
+ head.pts(), head.position(), pts,
+ static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
+ + (sampleDelta >> 32)),
+ static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
+
+ // if the delta between the ideal placement for the next input sample and
+ // the current output position is within this threshold, then we will
+ // concatenate the next input samples to the previous output
+ const int64_t kSampleContinuityThreshold =
+ (static_cast<int64_t>(sampleRate()) << 32) / 250;
+
+ // if this is the first buffer of audio that we're emitting from this track
+ // then it should be almost exactly on time.
+ const int64_t kSampleStartupThreshold = 1LL << 32;
+
+ if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
+ (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
+ // the next input is close enough to being on time, so concatenate it
+ // with the last output
+ timedYieldSamples_l(buffer);
+
+ ALOGVV("*** on time: head.pos=%d frameCount=%u",
+ head.position(), buffer->frameCount);
+ return NO_ERROR;
+ }
+
+ // Looks like our output is not on time. Reset our on timed status.
+ // Next time we mix samples from our input queue, then should be within
+ // the StartupThreshold.
+ mTimedAudioOutputOnTime = false;
+ if (sampleDelta > 0) {
+ // the gap between the current output position and the proper start of
+ // the next input sample is too big, so fill it with silence
+ uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
+
+ timedYieldSilence_l(framesUntilNextInput, buffer);
+ ALOGV("*** silence: frameCount=%u", buffer->frameCount);
+ return NO_ERROR;
+ } else {
+ // the next input sample is late
+ uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
+ size_t onTimeSamplePosition =
+ head.position() + lateFrames * mFrameSize;
+
+ if (onTimeSamplePosition > head.buffer()->size()) {
+ // all the remaining samples in the head are too late, so
+ // drop it and move on
+ ALOGV("*** too late: dropped buffer");
+ trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
+ continue;
+ } else {
+ // skip over the late samples
+ head.setPosition(onTimeSamplePosition);
+
+ // yield the available samples
+ timedYieldSamples_l(buffer);
+
+ ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+ return NO_ERROR;
+ }
+ }
+ }
+}
+
+// Yield samples from the timed buffer queue head up to the given output
+// buffer's capacity.
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
+ AudioBufferProvider::Buffer* buffer) {
+
+ const TimedBuffer& head = mTimedBufferQueue[0];
+
+ buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
+ head.position());
+
+ uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
+ mFrameSize);
+ size_t framesRequested = buffer->frameCount;
+ buffer->frameCount = min(framesLeftInHead, framesRequested);
+
+ mQueueHeadInFlight = true;
+ mTimedAudioOutputOnTime = true;
+}
+
+// Yield samples of silence up to the given output buffer's capacity
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
+ uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
+
+ // lazily allocate a buffer filled with silence
+ if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
+ delete [] mTimedSilenceBuffer;
+ mTimedSilenceBufferSize = numFrames * mFrameSize;
+ mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
+ memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
+ }
+
+ buffer->raw = mTimedSilenceBuffer;
+ size_t framesRequested = buffer->frameCount;
+ buffer->frameCount = min(numFrames, framesRequested);
+
+ mTimedAudioOutputOnTime = false;
+}
+
+// AudioBufferProvider interface
+void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
+ AudioBufferProvider::Buffer* buffer) {
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ // If the buffer which was just released is part of the buffer at the head
+ // of the queue, be sure to update the amt of the buffer which has been
+ // consumed. If the buffer being returned is not part of the head of the
+ // queue, its either because the buffer is part of the silence buffer, or
+ // because the head of the timed queue was trimmed after the mixer called
+ // getNextBuffer but before the mixer called releaseBuffer.
+ if (buffer->raw == mTimedSilenceBuffer) {
+ ALOG_ASSERT(!mQueueHeadInFlight,
+ "Queue head in flight during release of silence buffer!");
+ goto done;
+ }
+
+ ALOG_ASSERT(mQueueHeadInFlight,
+ "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
+ " head in flight.");
+
+ if (mTimedBufferQueue.size()) {
+ TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+ void* start = head.buffer()->pointer();
+ void* end = reinterpret_cast<void*>(
+ reinterpret_cast<uint8_t*>(head.buffer()->pointer())
+ + head.buffer()->size());
+
+ ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
+ "released buffer not within the head of the timed buffer"
+ " queue; qHead = [%p, %p], released buffer = %p",
+ start, end, buffer->raw);
+
+ head.setPosition(head.position() +
+ (buffer->frameCount * mFrameSize));
+ mQueueHeadInFlight = false;
+
+ ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
+ "Bad bookkeeping during releaseBuffer! Should have at"
+ " least %u queued frames, but we think we have only %u",
+ buffer->frameCount, mFramesPendingInQueue);
+
+ mFramesPendingInQueue -= buffer->frameCount;
+
+ if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
+ || mTrimQueueHeadOnRelease) {
+ trimTimedBufferQueueHead_l("releaseBuffer");
+ mTrimQueueHeadOnRelease = false;
+ }
+ } else {
+ LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
+ " buffers in the timed buffer queue");
+ }
+
+done:
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+}
+
+size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+ return mFramesPendingInQueue;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
+ : mPTS(0), mPosition(0) {}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
+ const sp<IMemory>& buffer, int64_t pts)
+ : mBuffer(buffer), mPTS(pts), mPosition(0) {}
+
+
+// ----------------------------------------------------------------------------
+
+AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
+ PlaybackThread *playbackThread,
+ DuplicatingThread *sourceThread,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount)
+ : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
+ NULL, 0, IAudioFlinger::TRACK_DEFAULT),
+ mActive(false), mSourceThread(sourceThread), mBuffers(NULL)
+{
+
+ if (mCblk != NULL) {
+ mBuffers = (char*)mCblk + sizeof(audio_track_cblk_t);
+ mOutBuffer.frameCount = 0;
+ playbackThread->mTracks.add(this);
+ ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, mBuffers %p, " \
+ "mCblk->frameCount %d, mCblk->sampleRate %u, mChannelMask 0x%08x mBufferEnd %p",
+ mCblk, mBuffer, mBuffers,
+ mCblk->frameCount, mCblk->sampleRate, mChannelMask, mBufferEnd);
+ } else {
+ ALOGW("Error creating output track on thread %p", playbackThread);
+ }
+}
+
+AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
+{
+ clearBufferQueue();
+}
+
+status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
+ int triggerSession)
+{
+ status_t status = Track::start(event, triggerSession);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ mActive = true;
+ mRetryCount = 127;
+ return status;
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::stop()
+{
+ Track::stop();
+ clearBufferQueue();
+ mOutBuffer.frameCount = 0;
+ mActive = false;
+}
+
+bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
+{
+ Buffer *pInBuffer;
+ Buffer inBuffer;
+ uint32_t channelCount = mChannelCount;
+ bool outputBufferFull = false;
+ inBuffer.frameCount = frames;
+ inBuffer.i16 = data;
+
+ uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
+
+ if (!mActive && frames != 0) {
+ start();
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ MixerThread *mixerThread = (MixerThread *)thread.get();
+ if (mFrameCount > frames) {
+ if (mBufferQueue.size() < kMaxOverFlowBuffers) {
+ uint32_t startFrames = (mFrameCount - frames);
+ pInBuffer = new Buffer;
+ pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
+ pInBuffer->frameCount = startFrames;
+ pInBuffer->i16 = pInBuffer->mBuffer;
+ memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
+ mBufferQueue.add(pInBuffer);
+ } else {
+ ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
+ }
+ }
+ }
+ }
+
+ while (waitTimeLeftMs) {
+ // First write pending buffers, then new data
+ if (mBufferQueue.size()) {
+ pInBuffer = mBufferQueue.itemAt(0);
+ } else {
+ pInBuffer = &inBuffer;
+ }
+
+ if (pInBuffer->frameCount == 0) {
+ break;
+ }
+
+ if (mOutBuffer.frameCount == 0) {
+ mOutBuffer.frameCount = pInBuffer->frameCount;
+ nsecs_t startTime = systemTime();
+ if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
+ ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
+ mThread.unsafe_get());
+ outputBufferFull = true;
+ break;
+ }
+ uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
+ if (waitTimeLeftMs >= waitTimeMs) {
+ waitTimeLeftMs -= waitTimeMs;
+ } else {
+ waitTimeLeftMs = 0;
+ }
+ }
+
+ uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
+ pInBuffer->frameCount;
+ memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
+ mCblk->stepUserOut(outFrames, mFrameCount);
+ pInBuffer->frameCount -= outFrames;
+ pInBuffer->i16 += outFrames * channelCount;
+ mOutBuffer.frameCount -= outFrames;
+ mOutBuffer.i16 += outFrames * channelCount;
+
+ if (pInBuffer->frameCount == 0) {
+ if (mBufferQueue.size()) {
+ mBufferQueue.removeAt(0);
+ delete [] pInBuffer->mBuffer;
+ delete pInBuffer;
+ ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
+ mThread.unsafe_get(), mBufferQueue.size());
+ } else {
+ break;
+ }
+ }
+ }
+
+ // If we could not write all frames, allocate a buffer and queue it for next time.
+ if (inBuffer.frameCount) {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0 && !thread->standby()) {
+ if (mBufferQueue.size() < kMaxOverFlowBuffers) {
+ pInBuffer = new Buffer;
+ pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
+ pInBuffer->frameCount = inBuffer.frameCount;
+ pInBuffer->i16 = pInBuffer->mBuffer;
+ memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
+ sizeof(int16_t));
+ mBufferQueue.add(pInBuffer);
+ ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
+ mThread.unsafe_get(), mBufferQueue.size());
+ } else {
+ ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
+ mThread.unsafe_get(), this);
+ }
+ }
+ }
+
+ // Calling write() with a 0 length buffer, means that no more data will be written:
+ // If no more buffers are pending, fill output track buffer to make sure it is started
+ // by output mixer.
+ if (frames == 0 && mBufferQueue.size() == 0) {
+ if (mCblk->user < mFrameCount) {
+ frames = mFrameCount - mCblk->user;
+ pInBuffer = new Buffer;
+ pInBuffer->mBuffer = new int16_t[frames * channelCount];
+ pInBuffer->frameCount = frames;
+ pInBuffer->i16 = pInBuffer->mBuffer;
+ memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
+ mBufferQueue.add(pInBuffer);
+ } else if (mActive) {
+ stop();
+ }
+ }
+
+ return outputBufferFull;
+}
+
+status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
+ AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
+{
+ int active;
+ status_t result;
+ audio_track_cblk_t* cblk = mCblk;
+ uint32_t framesReq = buffer->frameCount;
+
+ ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
+ buffer->frameCount = 0;
+
+ uint32_t framesAvail = cblk->framesAvailableOut(mFrameCount);
+
+
+ if (framesAvail == 0) {
+ Mutex::Autolock _l(cblk->lock);
+ goto start_loop_here;
+ while (framesAvail == 0) {
+ active = mActive;
+ if (CC_UNLIKELY(!active)) {
+ ALOGV("Not active and NO_MORE_BUFFERS");
+ return NO_MORE_BUFFERS;
+ }
+ result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
+ if (result != NO_ERROR) {
+ return NO_MORE_BUFFERS;
+ }
+ // read the server count again
+ start_loop_here:
+ framesAvail = cblk->framesAvailableOut_l(mFrameCount);
+ }
+ }
+
+// if (framesAvail < framesReq) {
+// return NO_MORE_BUFFERS;
+// }
+
+ if (framesReq > framesAvail) {
+ framesReq = framesAvail;
+ }
+
+ uint32_t u = cblk->user;
+ uint32_t bufferEnd = cblk->userBase + mFrameCount;
+
+ if (framesReq > bufferEnd - u) {
+ framesReq = bufferEnd - u;
+ }
+
+ buffer->frameCount = framesReq;
+ buffer->raw = cblk->buffer(mBuffers, mFrameSize, u);
+ return NO_ERROR;
+}
+
+
+void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
+{
+ size_t size = mBufferQueue.size();
+
+ for (size_t i = 0; i < size; i++) {
+ Buffer *pBuffer = mBufferQueue.itemAt(i);
+ delete [] pBuffer->mBuffer;
+ delete pBuffer;
+ }
+ mBufferQueue.clear();
+}
+
+
+// ----------------------------------------------------------------------------
+// Record
+// ----------------------------------------------------------------------------
+
+AudioFlinger::RecordHandle::RecordHandle(
+ const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
+ : BnAudioRecord(),
+ mRecordTrack(recordTrack)
+{
+}
+
+AudioFlinger::RecordHandle::~RecordHandle() {
+ stop_nonvirtual();
+ mRecordTrack->destroy();
+}
+
+sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
+ return mRecordTrack->getCblk();
+}
+
+status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
+ int triggerSession) {
+ ALOGV("RecordHandle::start()");
+ return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
+}
+
+void AudioFlinger::RecordHandle::stop() {
+ stop_nonvirtual();
+}
+
+void AudioFlinger::RecordHandle::stop_nonvirtual() {
+ ALOGV("RecordHandle::stop()");
+ mRecordTrack->stop();
+}
+
+status_t AudioFlinger::RecordHandle::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnAudioRecord::onTransact(code, data, reply, flags);
+}
+
+// ----------------------------------------------------------------------------
+
+// RecordTrack constructor must be called with AudioFlinger::mLock held
+AudioFlinger::RecordThread::RecordTrack::RecordTrack(
+ RecordThread *thread,
+ const sp<Client>& client,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ int sessionId)
+ : TrackBase(thread, client, sampleRate, format,
+ channelMask, frameCount, 0 /*sharedBuffer*/, sessionId),
+ mOverflow(false)
+{
+ ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+}
+
+AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
+{
+ ALOGV("%s", __func__);
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts)
+{
+ audio_track_cblk_t* cblk = this->cblk();
+ uint32_t framesAvail;
+ uint32_t framesReq = buffer->frameCount;
+
+ // Check if last stepServer failed, try to step now
+ if (mStepServerFailed) {
+ if (!step()) {
+ goto getNextBuffer_exit;
+ }
+ ALOGV("stepServer recovered");
+ mStepServerFailed = false;
+ }
+
+ // FIXME lock is not actually held, so overrun is possible
+ framesAvail = cblk->framesAvailableIn_l(mFrameCount);
+
+ if (CC_LIKELY(framesAvail)) {
+ uint32_t s = cblk->server;
+ uint32_t bufferEnd = cblk->serverBase + mFrameCount;
+
+ if (framesReq > framesAvail) {
+ framesReq = framesAvail;
+ }
+ if (framesReq > bufferEnd - s) {
+ framesReq = bufferEnd - s;
+ }
+
+ buffer->raw = getBuffer(s, framesReq);
+ buffer->frameCount = framesReq;
+ return NO_ERROR;
+ }
+
+getNextBuffer_exit:
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ return NOT_ENOUGH_DATA;
+}
+
+status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
+ int triggerSession)
+{
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ RecordThread *recordThread = (RecordThread *)thread.get();
+ return recordThread->start(this, event, triggerSession);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+void AudioFlinger::RecordThread::RecordTrack::stop()
+{
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ RecordThread *recordThread = (RecordThread *)thread.get();
+ recordThread->mLock.lock();
+ bool doStop = recordThread->stop_l(this);
+ if (doStop) {
+ TrackBase::reset();
+ // Force overrun condition to avoid false overrun callback until first data is
+ // read from buffer
+ android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
+ }
+ recordThread->mLock.unlock();
+ if (doStop) {
+ AudioSystem::stopInput(recordThread->id());
+ }
+ }
+}
+
+void AudioFlinger::RecordThread::RecordTrack::destroy()
+{
+ // see comments at AudioFlinger::PlaybackThread::Track::destroy()
+ sp<RecordTrack> keep(this);
+ {
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ if (mState == ACTIVE || mState == RESUMING) {
+ AudioSystem::stopInput(thread->id());
+ }
+ AudioSystem::releaseInput(thread->id());
+ Mutex::Autolock _l(thread->mLock);
+ RecordThread *recordThread = (RecordThread *) thread.get();
+ recordThread->destroyTrack_l(this);
+ }
+ }
+}
+
+
+/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
+{
+ result.append(" Clien Fmt Chn mask Session Step S SRate Serv User FrameCount\n");
+}
+
+void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
+{
+ snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %05u %08x %08x %05d\n",
+ (mClient == 0) ? getpid_cached : mClient->pid(),
+ mFormat,
+ mChannelMask,
+ mSessionId,
+ mStepCount,
+ mState,
+ mCblk->sampleRate,
+ mCblk->server,
+ mCblk->user,
+ mFrameCount);
+}
+
+bool AudioFlinger::RecordThread::RecordTrack::isOut() const
+{
+ return false;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index eff47c8..5245983 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -28,7 +28,6 @@ LOCAL_SHARED_LIBRARIES:= \
libbinder \
libcutils \
libmedia \
- libmedia_native \
libcamera_client \
libgui \
libhardware \
@@ -40,6 +39,9 @@ LOCAL_C_INCLUDES += \
system/media/camera/include \
external/jpeg
+
+LOCAL_CFLAGS += -Wall -Wextra
+
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index 0f1e650..5a7bb48 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -37,10 +37,6 @@ static int getCallingPid() {
return IPCThreadState::self()->getCallingPid();
}
-static int getCallingUid() {
- return IPCThreadState::self()->getCallingUid();
-}
-
// Interface used by CameraService
Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
@@ -370,7 +366,6 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
void Camera2Client::disconnect() {
ATRACE_CALL();
Mutex::Autolock icl(mICameraLock);
- status_t res;
// Allow both client and the media server to disconnect at all times
int callingPid = getCallingPid();
@@ -575,7 +570,7 @@ void Camera2Client::setPreviewCallbackFlag(int flag) {
ATRACE_CALL();
ALOGV("%s: Camera %d: Flag 0x%x", __FUNCTION__, mCameraId, flag);
Mutex::Autolock icl(mICameraLock);
- status_t res;
+
if ( checkPid(__FUNCTION__) != OK) return;
SharedParameters::Lock l(mParameters);
@@ -1244,7 +1239,7 @@ status_t Camera2Client::commandPlayRecordingSoundL() {
return OK;
}
-status_t Camera2Client::commandStartFaceDetectionL(int type) {
+status_t Camera2Client::commandStartFaceDetectionL(int /*type*/) {
ALOGV("%s: Camera %d: Starting face detection",
__FUNCTION__, mCameraId);
status_t res;
@@ -1331,6 +1326,8 @@ void Camera2Client::notifyError(int errorCode, int arg1, int arg2) {
}
void Camera2Client::notifyShutter(int frameNumber, nsecs_t timestamp) {
+ (void)frameNumber;
+ (void)timestamp;
ALOGV("%s: Shutter notification for frame %d at time %lld", __FUNCTION__,
frameNumber, timestamp);
}
@@ -1452,6 +1449,8 @@ void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
}
void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) {
+ (void)newState;
+ (void)triggerId;
ALOGV("%s: Auto-whitebalance state now %d, last trigger %d",
__FUNCTION__, newState, triggerId);
}
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index d6445c1..5bfa085 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -765,7 +765,6 @@ status_t Camera2Device::MetadataQueue::setStreamSlot(
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
Mutex::Autolock l(mMutex);
- status_t res;
if (mStreamSlotCount > 0) {
freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
@@ -785,7 +784,7 @@ status_t Camera2Device::MetadataQueue::setStreamSlot(
}
status_t Camera2Device::MetadataQueue::dump(int fd,
- const Vector<String16>& args) {
+ const Vector<String16>& /*args*/) {
ATRACE_CALL();
String8 result;
status_t notLocked;
@@ -894,12 +893,13 @@ int Camera2Device::MetadataQueue::consumer_free(
{
ATRACE_CALL();
MetadataQueue *queue = getInstance(q);
+ (void)queue;
free_camera_metadata(old_buffer);
return OK;
}
int Camera2Device::MetadataQueue::producer_dequeue(
- const camera2_frame_queue_dst_ops_t *q,
+ const camera2_frame_queue_dst_ops_t * /*q*/,
size_t entries, size_t bytes,
camera_metadata_t **buffer)
{
@@ -912,7 +912,7 @@ int Camera2Device::MetadataQueue::producer_dequeue(
}
int Camera2Device::MetadataQueue::producer_cancel(
- const camera2_frame_queue_dst_ops_t *q,
+ const camera2_frame_queue_dst_ops_t * /*q*/,
camera_metadata_t *old_buffer)
{
ATRACE_CALL();
@@ -1184,7 +1184,7 @@ status_t Camera2Device::StreamAdapter::setTransform(int transform) {
}
status_t Camera2Device::StreamAdapter::dump(int fd,
- const Vector<String16>& args) {
+ const Vector<String16>& /*args*/) {
ATRACE_CALL();
String8 result = String8::format(" Stream %d: %d x %d, format 0x%x\n",
mId, mWidth, mHeight, mFormat);
@@ -1423,7 +1423,7 @@ status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
}
status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
- const Vector<String16>& args) {
+ const Vector<String16>& /*args*/) {
ATRACE_CALL();
String8 result =
String8::format(" Reprocess stream %d: %d x %d, fmt 0x%x\n",
@@ -1444,7 +1444,7 @@ int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
const camera2_stream_in_ops_t *w,
buffer_handle_t** buffer) {
ATRACE_CALL();
- int res;
+
ReprocessStreamAdapter* stream =
const_cast<ReprocessStreamAdapter*>(
static_cast<const ReprocessStreamAdapter*>(w));
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp
index b930c02..006a9c9 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/CameraClient.cpp
@@ -34,10 +34,6 @@ static int getCallingPid() {
return IPCThreadState::self()->getCallingPid();
}
-static int getCallingUid() {
- return IPCThreadState::self()->getCallingUid();
-}
-
CameraClient::CameraClient(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
int cameraId, int cameraFacing, int clientPid, int servicePid):
diff --git a/services/camera/libcameraservice/CameraHardwareInterface.h b/services/camera/libcameraservice/CameraHardwareInterface.h
index 05ac9fa..167b37c 100644
--- a/services/camera/libcameraservice/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/CameraHardwareInterface.h
@@ -427,7 +427,7 @@ public:
/**
* Dump state of the camera hardware
*/
- status_t dump(int fd, const Vector<String16>& args) const
+ status_t dump(int fd, const Vector<String16>& /*args*/) const
{
ALOGV("%s(%s)", __FUNCTION__, mName.string());
if (mDevice->ops->dump)
@@ -584,9 +584,10 @@ private:
#endif
static int __lock_buffer(struct preview_stream_ops* w,
- buffer_handle_t* buffer)
+ buffer_handle_t* /*buffer*/)
{
ANativeWindow *a = anw(w);
+ (void)a;
return 0;
}
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.cpp b/services/camera/libcameraservice/camera2/BurstCapture.cpp
index f56c50c..192d419 100644
--- a/services/camera/libcameraservice/camera2/BurstCapture.cpp
+++ b/services/camera/libcameraservice/camera2/BurstCapture.cpp
@@ -38,7 +38,8 @@ BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequen
BurstCapture::~BurstCapture() {
}
-status_t BurstCapture::start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId) {
+status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/,
+ int32_t /*firstCaptureId*/) {
ALOGE("Not completely implemented");
return INVALID_OPERATION;
}
@@ -75,7 +76,7 @@ bool BurstCapture::threadLoop() {
CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
CpuConsumer::LockedBuffer *imgBuffer,
- int quality)
+ int /*quality*/)
{
ALOGV("%s", __FUNCTION__);
@@ -91,7 +92,7 @@ CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
buffers.push_back(imgEncoded);
sp<JpegCompressor> jpeg = new JpegCompressor();
- status_t res = jpeg->start(buffers, 1);
+ jpeg->start(buffers, 1);
bool success = jpeg->waitForDone(10 * 1e9);
if(success) {
@@ -103,7 +104,7 @@ CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
}
}
-status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &client) {
+status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) {
ALOGE("Not implemented");
return INVALID_OPERATION;
}
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 3e9c255..307cfab 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -119,7 +119,6 @@ status_t CallbackProcessor::updateStream(const Parameters &params) {
status_t CallbackProcessor::deleteStream() {
ATRACE_CALL();
- status_t res;
Mutex::Autolock l(mInputMutex);
@@ -144,7 +143,7 @@ int CallbackProcessor::getStreamId() const {
return mCallbackStreamId;
}
-void CallbackProcessor::dump(int fd, const Vector<String16>& args) const {
+void CallbackProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
}
bool CallbackProcessor::threadLoop() {
@@ -173,7 +172,6 @@ status_t CallbackProcessor::processNewCallback(sp<Camera2Client> &client) {
ATRACE_CALL();
status_t res;
- int callbackHeapId;
sp<Camera2Heap> callbackHeap;
size_t heapIdx;
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
index 072453b..513a47e 100644
--- a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
@@ -130,7 +130,7 @@ void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp,
}
-void CaptureSequencer::dump(int fd, const Vector<String16>& args) {
+void CaptureSequencer::dump(int fd, const Vector<String16>& /*args*/) {
String8 result;
if (mCaptureRequest.entryCount() != 0) {
result = " Capture request:\n";
@@ -184,7 +184,6 @@ const CaptureSequencer::StateManager
};
bool CaptureSequencer::threadLoop() {
- status_t res;
sp<Camera2Client> client = mClient.promote();
if (client == 0) return false;
@@ -215,7 +214,8 @@ bool CaptureSequencer::threadLoop() {
return true;
}
-CaptureSequencer::CaptureState CaptureSequencer::manageIdle(sp<Camera2Client> &client) {
+CaptureSequencer::CaptureState CaptureSequencer::manageIdle(
+ sp<Camera2Client> &/*client*/) {
status_t res;
Mutex::Autolock l(mInputMutex);
while (!mStartCapture) {
@@ -352,13 +352,13 @@ CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
}
CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
ALOGV("%s", __FUNCTION__);
return DONE;
}
CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
ALOGV("%s", __FUNCTION__);
return START;
}
@@ -380,7 +380,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
}
CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
status_t res;
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
@@ -580,7 +580,7 @@ CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart(
}
CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
- sp<Camera2Client> &client) {
+ sp<Camera2Client> &/*client*/) {
status_t res;
ATRACE_CALL();
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
index 064607c..e032522 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
@@ -62,7 +62,7 @@ status_t FrameProcessor::removeListener(int32_t minId,
return OK;
}
-void FrameProcessor::dump(int fd, const Vector<String16>& args) {
+void FrameProcessor::dump(int fd, const Vector<String16>& /*args*/) {
String8 result(" Latest received frame:\n");
write(fd, result.string(), result.size());
mLastFrame.dump(fd, 2, 6);
@@ -128,7 +128,6 @@ void FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
status_t FrameProcessor::processListeners(const CameraMetadata &frame,
sp<Camera2Client> &client) {
- status_t res;
ATRACE_CALL();
camera_metadata_ro_entry_t entry;
@@ -173,7 +172,7 @@ status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
ATRACE_CALL();
camera_metadata_ro_entry_t entry;
bool enableFaceDetect;
- int maxFaces;
+
{
SharedParameters::Lock l(client->getParameters());
enableFaceDetect = l.mParameters.enableFaceDetect;
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
index 702ef58..c9af71e 100644
--- a/services/camera/libcameraservice/camera2/JpegCompressor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
@@ -144,7 +144,7 @@ bool JpegCompressor::isBusy() {
}
// old function -- TODO: update for new buffer type
-bool JpegCompressor::isStreamInUse(uint32_t id) {
+bool JpegCompressor::isStreamInUse(uint32_t /*id*/) {
ALOGV("%s", __FUNCTION__);
Mutex::Autolock lock(mBusyMutex);
@@ -203,14 +203,14 @@ void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) {
dest->free_in_buffer = kMaxJpegSize;
}
-boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) {
+boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr /*cinfo*/) {
ALOGV("%s", __FUNCTION__);
ALOGE("%s: JPEG destination buffer overflow!",
__FUNCTION__);
return true;
}
-void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+void JpegCompressor::jpegTermDestination(j_compress_ptr /*cinfo*/) {
ALOGV("%s", __FUNCTION__);
ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
__FUNCTION__, cinfo->dest->free_in_buffer);
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
index ffc072b..6280f83 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -139,7 +139,6 @@ status_t JpegProcessor::updateStream(const Parameters &params) {
status_t JpegProcessor::deleteStream() {
ATRACE_CALL();
- status_t res;
Mutex::Autolock l(mInputMutex);
@@ -164,7 +163,7 @@ int JpegProcessor::getStreamId() const {
return mCaptureStreamId;
}
-void JpegProcessor::dump(int fd, const Vector<String16>& args) const {
+void JpegProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
}
bool JpegProcessor::threadLoop() {
@@ -356,7 +355,7 @@ size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) {
// Find End of Image
// Scan JPEG buffer until End of Image (EOI)
bool foundEnd = false;
- for (size; size <= maxSize - MARKER_LENGTH; size++) {
+ for ( ; size <= maxSize - MARKER_LENGTH; size++) {
if ( checkJpegEnd(jpegBuffer + size) ) {
foundEnd = true;
size += MARKER_LENGTH;
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index 9a0083a..93927e6 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -951,7 +951,6 @@ status_t Parameters::buildQuirks() {
camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag,
size_t minCount, size_t maxCount) const {
- status_t res;
camera_metadata_ro_entry_t entry = info->find(tag);
if (CC_UNLIKELY( entry.count == 0 )) {
@@ -1567,6 +1566,10 @@ status_t Parameters::set(const String8& paramString) {
ALOGE("%s: Video stabilization not supported", __FUNCTION__);
}
+ // LIGHTFX
+ validatedParams.lightFx = lightFxStringToEnum(
+ newParams.get(CameraParameters::KEY_LIGHTFX));
+
/** Update internal parameters */
*this = validatedParams;
@@ -2094,6 +2097,18 @@ const char *Parameters::focusModeEnumToString(focusMode_t focusMode) {
}
}
+Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
+ const char *lightFxMode) {
+ return
+ !lightFxMode ?
+ Parameters::LIGHTFX_NONE :
+ !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
+ Parameters::LIGHTFX_LOWLIGHT :
+ !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
+ Parameters::LIGHTFX_HDR :
+ Parameters::LIGHTFX_NONE;
+}
+
status_t Parameters::parseAreas(const char *areasCStr,
Vector<Parameters::Area> *areas) {
static const size_t NUM_FIELDS = 5;
@@ -2414,7 +2429,7 @@ Parameters::CropRegion Parameters::calculateCropRegion(
return crop;
}
-int32_t Parameters::fpsFromRange(int32_t min, int32_t max) const {
+int32_t Parameters::fpsFromRange(int32_t /*min*/, int32_t max) const {
return max;
}
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index 54b1e8c..6d32bf6 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -261,6 +261,8 @@ struct Parameters {
static const char* flashModeEnumToString(flashMode_t flashMode);
static focusMode_t focusModeStringToEnum(const char *focusMode);
static const char* focusModeEnumToString(focusMode_t focusMode);
+ static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
+
static status_t parseAreas(const char *areasCStr,
Vector<Area> *areas);
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
index 207f780..6ea27b2 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -447,7 +447,6 @@ status_t StreamingProcessor::incrementStreamingIds() {
ATRACE_CALL();
Mutex::Autolock m(mMutex);
- status_t res;
mPreviewRequestId++;
if (mPreviewRequestId >= Camera2Client::kPreviewRequestIdEnd) {
mPreviewRequestId = Camera2Client::kPreviewRequestIdStart;
@@ -628,7 +627,7 @@ void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
}
-status_t StreamingProcessor::dump(int fd, const Vector<String16>& args) {
+status_t StreamingProcessor::dump(int fd, const Vector<String16>& /*args*/) {
String8 result;
result.append(" Current requests:\n");
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
index 1937955..9584028 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -69,11 +69,12 @@ void ZslProcessor::onFrameAvailable() {
}
}
-void ZslProcessor::onFrameAvailable(int32_t frameId, const CameraMetadata &frame) {
+void ZslProcessor::onFrameAvailable(int32_t /*frameId*/, const CameraMetadata &frame) {
Mutex::Autolock l(mInputMutex);
camera_metadata_ro_entry_t entry;
entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
nsecs_t timestamp = entry.data.i64[0];
+ (void)timestamp;
ALOGVV("Got preview frame for timestamp %lld", timestamp);
if (mState != RUNNING) return;
@@ -367,7 +368,7 @@ status_t ZslProcessor::clearZslQueueLocked() {
return OK;
}
-void ZslProcessor::dump(int fd, const Vector<String16>& args) const {
+void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const {
Mutex::Autolock l(mInputMutex);
if (!mLatestCapturedRequest.isEmpty()) {
String8 result(" Latest ZSL capture request:\n");