summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvivek mehta <mvivek@codeaurora.org>2012-12-04 11:46:08 -0800
committerSteve Kondik <shade@chemlab.org>2012-12-23 21:26:55 -0800
commit3a05365a93afccc5bb7db3bdc9f5d2efef69c8d3 (patch)
tree9384cf4640e68fb4eb9652bc6549b75ed074f5e0
parent97343328d2920ec95fb06794b65ec467bce793af (diff)
downloadframeworks_av-3a05365a93afccc5bb7db3bdc9f5d2efef69c8d3.zip
frameworks_av-3a05365a93afccc5bb7db3bdc9f5d2efef69c8d3.tar.gz
frameworks_av-3a05365a93afccc5bb7db3bdc9f5d2efef69c8d3.tar.bz2
Squashed commit of audio changes from CodeAurora
libstagefright: Add QC specific media format - Add QC specific media extensions - Add QC specific media definitions Change-Id: I7dca90be3b977701d9537f5e017117790a030f1f audio: Compile AudioParameter as shared library - AudioParameter as shared lib is needed by BT support in WFD source. Change-Id: I464b428ace0cbb57ce6bf7bf3b57d51a7d56f032 libstagefright: Send flush on both i/p and o/p ports together - ANR occurs in music due to race condition in OMX component if flush is issued separately for i/p and o/p ports as DSP only handles simultaneous flush on i/p and o/p ports. Change-Id: I5b16cd5a9b57c857dc8bed489d2663b8f54769e3 libstagefright: Enable extended A\V format - Add new files to support extended A\V format Change-Id: I1e61d78d35b868d55fd8e99f95de8cab9c465db4 libstagefright: Framework to plug-in propritory parser - Extend the current framework to plug-in propritory parser Change-Id: Ia586a3048420ddf1515261f20035589447263b7b audio: add support for QCOM audio formats - Add support for EVRC, QCELP, and WMA formats. Change-Id: Iaf80f982fc8b08617132dbd7d524a1748866745c frameworks/av: Support Tunnel Playback - Implement DirectTrack and DirectTrackClient - DirectTrack exposes API to client so it can create a direct output. - DirectTrackClient allows notifications to be sent to the client from DirectTrack - DirectTrack is being used for Tunnel Audio Change-Id: I2fbb18a781d8e44b8d65da9a357f6e39375f063a frameworks/av: Support LPA Playback Add support to enable Playback in LPA mode Change-Id: I1b8ac4904f4735017d62f3757ede7bbb56e62fd3 audio: Send correct channel mask in voice call recording. -Using popCount function to get channel count gives incorrect value on voice call recording. -Only STEREO and MONO bits to be considered to count channels on input Change-Id: I04c2c802422e868bdba0538ff8623dbf9eb659fe libstagefright: Thumbnail mode initial commit - use sync frame decoding mode when kClientNeedsFrameBuffer is set for hardware decoders - hardware decoder will only expect I frames, OMXCodec will set EOS on first ETB to stop more frames from being pulled - skip EOS check on FTB so that the first frame will be handled Change-Id: I0e8974e088fdcc468e27764861c128cfe291499f audio: Add support for QCOM's VOIP solution Change-Id: I1150f536fa204b535ca4019fdaa84f33f4695d93 audio: define QCOM audio parameters - Define QCOM audio paramters for FM, VOIP, fluence, SSR, and A2DP Change-Id: I29d02e37685846f6d4f00dee02e2726b015eaae7 Add ifdefs for QCOM enhanced features Change-Id: Ic8e5fe6ecc058466ced71030883b1af6c2bc055c
-rw-r--r--include/media/AudioParameter.h8
-rw-r--r--include/media/AudioSystem.h7
-rw-r--r--include/media/AudioTrack.h25
-rw-r--r--include/media/IAudioFlinger.h23
-rw-r--r--include/media/IDirectTrack.h93
-rw-r--r--include/media/IDirectTrackClient.h51
-rw-r--r--include/media/MediaPlayerInterface.h4
-rw-r--r--include/media/MediaProfiles.h5
-rw-r--r--include/media/mediarecorder.h12
-rw-r--r--include/media/stagefright/AudioPlayer.h21
-rw-r--r--include/media/stagefright/ExtendedWriter.h143
-rw-r--r--include/media/stagefright/LPAPlayer.h290
-rw-r--r--include/media/stagefright/OMXCodec.h26
-rw-r--r--include/media/stagefright/QCOMXCodec.h98
-rw-r--r--include/media/stagefright/TunnelPlayer.h251
-rw-r--r--include/media/stagefright/WAVEWriter.h108
-rw-r--r--media/libmedia/Android.mk21
-rw-r--r--media/libmedia/AudioParameter.cpp8
-rw-r--r--media/libmedia/AudioRecord.cpp5
-rw-r--r--media/libmedia/AudioTrack.cpp266
-rw-r--r--media/libmedia/IAudioFlinger.cpp69
-rw-r--r--media/libmedia/IAudioFlingerClient.cpp10
-rw-r--r--media/libmedia/IDirectTrack.cpp178
-rw-r--r--media/libmedia/IDirectTrackClient.cpp69
-rw-r--r--media/libmedia/MediaProfiles.cpp26
-rw-r--r--media/libmediaplayerservice/Android.mk7
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp152
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h11
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp160
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.h9
-rw-r--r--media/libstagefright/Android.mk28
-rw-r--r--media/libstagefright/AudioPlayer.cpp7
-rw-r--r--media/libstagefright/AwesomePlayer.cpp188
-rw-r--r--media/libstagefright/DataSource.cpp6
-rw-r--r--media/libstagefright/ExtendedExtractor.cpp110
-rw-r--r--media/libstagefright/ExtendedWriter.cpp390
-rw-r--r--media/libstagefright/LPAPlayerALSA.cpp791
-rw-r--r--media/libstagefright/MPEG4Extractor.cpp70
-rw-r--r--media/libstagefright/MediaExtractor.cpp11
-rw-r--r--[-rwxr-xr-x]media/libstagefright/OMXCodec.cpp655
-rw-r--r--media/libstagefright/QCMediaDefs.cpp55
-rw-r--r--media/libstagefright/QCOMXCodec.cpp548
-rw-r--r--media/libstagefright/StagefrightMediaScanner.cpp8
-rw-r--r--media/libstagefright/TunnelPlayer.cpp782
-rw-r--r--media/libstagefright/WAVEWriter.cpp323
-rw-r--r--media/libstagefright/include/AwesomePlayer.h8
-rw-r--r--media/libstagefright/include/ExtendedExtractor.h58
-rw-r--r--services/audioflinger/AudioFlinger.cpp839
-rw-r--r--services/audioflinger/AudioFlinger.h214
49 files changed, 7072 insertions, 175 deletions
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index 891bc4b..d29c699 100644
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2008-2011 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,6 +49,13 @@ public:
static const char * const keyFrameCount;
static const char * const keyInputSource;
static const char * const keyScreenState;
+#ifdef QCOM_HARDWARE
+ static const char * const keyHandleFm;
+ static const char * const keyVoipCheck;
+ static const char * const keyFluenceType;
+ static const char * const keySSR;
+ static const char * const keyHandleA2dpDevice;
+#endif
String8 toString();
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 49e1afc..e66fc3a 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -1,4 +1,8 @@
/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -144,6 +148,9 @@ public:
INPUT_CLOSED,
INPUT_CONFIG_CHANGED,
STREAM_CONFIG_CHANGED,
+#ifdef QCOM_HARDWARE
+ EFFECT_CONFIG_CHANGED,
+#endif
NUM_CONFIG_EVENTS
};
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 34108b3..77a0b26 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -31,6 +31,10 @@
#include <cutils/sched_policy.h>
#include <utils/threads.h>
+#ifdef QCOM_HARDWARE
+#include <media/IDirectTrackClient.h>
+#endif
+
namespace android {
// ----------------------------------------------------------------------------
@@ -39,7 +43,11 @@ class audio_track_cblk_t;
// ----------------------------------------------------------------------------
-class AudioTrack : virtual public RefBase
+class AudioTrack :
+#ifdef QCOM_HARDWARE
+ public BnDirectTrackClient,
+#endif
+ virtual public RefBase
{
public:
enum channel_index {
@@ -451,6 +459,11 @@ public:
*/
status_t dump(int fd, const Vector<String16>& args) const;
+#ifdef QCOM_HARDWARE
+ virtual void notify(int msg);
+ virtual status_t getTimeStamp(uint64_t *tstamp);
+#endif
+
protected:
/* copying audio tracks is not allowed */
AudioTrack(const AudioTrack& other);
@@ -496,6 +509,9 @@ protected:
status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart);
bool stopped_l() const { return !mActive; }
+#ifdef QCOM_HARDWARE
+ sp<IDirectTrack> mDirectTrack;
+#endif
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
sp<AudioTrackThread> mAudioTrackThread;
@@ -529,10 +545,17 @@ protected:
uint32_t mUpdatePeriod;
bool mFlushed; // FIXME will be made obsolete by making flush() synchronous
audio_output_flags_t mFlags;
+#ifdef QCOM_HARDWARE
+ sp<IAudioFlinger> mAudioFlinger;
+ audio_io_handle_t mAudioDirectOutput;
+#endif
int mSessionId;
int mAuxEffectId;
mutable Mutex mLock;
status_t mRestoreStatus;
+#ifdef QCOM_HARDWARE
+ void* mObserver;
+#endif
bool mIsTimed;
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5170a87..c895c13 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -1,4 +1,8 @@
/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -24,6 +28,10 @@
#include <utils/RefBase.h>
#include <utils/Errors.h>
#include <binder/IInterface.h>
+#ifdef QCOM_HARDWARE
+#include <media/IDirectTrack.h>
+#include <media/IDirectTrackClient.h>
+#endif
#include <media/IAudioTrack.h>
#include <media/IAudioRecord.h>
#include <media/IAudioFlingerClient.h>
@@ -69,6 +77,21 @@ public:
int *sessionId,
status_t *status) = 0;
+#ifdef QCOM_HARDWARE
+ /* create a direct audio track and registers it with AudioFlinger.
+ * return null if the track cannot be created.
+ */
+ virtual sp<IDirectTrack> createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient* client,
+ audio_stream_type_t streamType,
+ status_t *status) = 0;
+#endif
+
virtual sp<IAudioRecord> openRecord(
pid_t pid,
audio_io_handle_t input,
diff --git a/include/media/IDirectTrack.h b/include/media/IDirectTrack.h
new file mode 100644
index 0000000..c1f4f09
--- /dev/null
+++ b/include/media/IDirectTrack.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IDIRECTTRACK_H
+#define ANDROID_IDIRECTTRACK_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class IDirectTrack : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(DirectTrack);
+
+ /* After it's created the track is not active. Call start() to
+ * make it active. If set, the callback will start being called.
+ */
+ virtual status_t start() = 0;
+
+ /* Stop a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will be processed, unless flush() is called.
+ */
+ virtual void stop() = 0;
+
+ /* flush a stopped track. All pending buffers are discarded.
+ * This function has no effect if the track is not stoped.
+ */
+ virtual void flush() = 0;
+
+ /* mute or unmutes this track.
+ * While mutted, the callback, if set, is still called.
+ */
+ virtual void mute(bool) = 0;
+
+ /* Pause a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will be processed, unless flush() is called.
+ */
+ virtual void pause() = 0;
+
+ /* set volume for both left and right channels.
+ */
+ virtual void setVolume(float l, float r) = 0;
+
+ virtual ssize_t write(const void*, size_t) = 0;
+
+ virtual int64_t getTimeStamp() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnDirectTrack : public BnInterface<IDirectTrack>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_IAUDIOTRACK_H
diff --git a/include/media/IDirectTrackClient.h b/include/media/IDirectTrackClient.h
new file mode 100644
index 0000000..9383690
--- /dev/null
+++ b/include/media/IDirectTrackClient.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IDIRECTTRACKCLIENT_H
+#define ANDROID_IDIRECTTRACKCLIENT_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class IDirectTrackClient: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(DirectTrackClient);
+
+ virtual void notify(int msg) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnDirectTrackClient: public BnInterface<IDirectTrackClient>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IDIRECTTRACKCLIENT_H
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index b7bee3f..a7570d6 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -110,6 +110,10 @@ public:
virtual status_t setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
virtual bool needsTrailingPadding() { return true; }
+#ifdef QCOM_HARDWARE
+ virtual ssize_t sampleRate() const {return 0;};
+ virtual status_t getTimeStamp(uint64_t *tstamp) {return 0;};
+#endif
};
MediaPlayerBase() : mCookie(0), mNotify(0) {}
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 60d6bef..0df9fd4 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -1,6 +1,7 @@
/*
**
** Copyright 2010, The Android Open Source Project.
+ ** Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -461,6 +462,10 @@ private:
static VideoEncoderCap* createDefaultH263VideoEncoderCap();
static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
static AudioEncoderCap* createDefaultAmrNBEncoderCap();
+#ifdef QCOM_HARDWARE
+ static AudioEncoderCap* createDefaultAacEncoderCap();
+ static AudioEncoderCap* createDefaultLpcmEncoderCap();
+#endif
static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 3c2e700..6dfa5d9 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -1,5 +1,6 @@
/*
** Copyright (C) 2008 The Android Open Source Project
+ ** Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -67,6 +68,12 @@ enum output_format {
/* H.264/AAC data encapsulated in MPEG2/TS */
OUTPUT_FORMAT_MPEG2TS = 8,
+#ifdef QCOM_HARDWARE
+ OUTPUT_FORMAT_QCP = 9, // QCP file format
+ OUTPUT_FORMAT_THREE_GPP2 = 10, /*3GPP2*/
+ OUTPUT_FORMAT_WAVE = 11, /*WAVE*/
+#endif
+
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
@@ -77,6 +84,11 @@ enum audio_encoder {
AUDIO_ENCODER_AAC = 3,
AUDIO_ENCODER_HE_AAC = 4,
AUDIO_ENCODER_AAC_ELD = 5,
+#ifdef QCOM_HARDWARE
+ AUDIO_ENCODER_EVRC = 6,
+ AUDIO_ENCODER_QCELP = 7,
+ AUDIO_ENCODER_LPCM = 8,
+#endif
AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
};
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 1dc408f..624fe3e 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -43,27 +43,27 @@ public:
virtual ~AudioPlayer();
// Caller retains ownership of "source".
- void setSource(const sp<MediaSource> &source);
+ virtual void setSource(const sp<MediaSource> &source);
// Return time in us.
virtual int64_t getRealTimeUs();
- status_t start(bool sourceAlreadyStarted = false);
+ virtual status_t start(bool sourceAlreadyStarted = false);
- void pause(bool playPendingSamples = false);
- void resume();
+ virtual void pause(bool playPendingSamples = false);
+ virtual void resume();
// Returns the timestamp of the last buffer played (in us).
- int64_t getMediaTimeUs();
+ virtual int64_t getMediaTimeUs();
// Returns true iff a mapping is established, i.e. the AudioPlayer
// has played at least one frame of audio.
- bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+ virtual bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
- status_t seekTo(int64_t time_us);
+ virtual status_t seekTo(int64_t time_us);
- bool isSeeking();
- bool reachedEOS(status_t *finalStatus);
+ virtual bool isSeeking();
+ virtual bool reachedEOS(status_t *finalStatus);
status_t setPlaybackRatePermille(int32_t ratePermille);
@@ -91,6 +91,9 @@ private:
int64_t mSeekTimeUs;
bool mStarted;
+#ifdef QCOM_HARDWARE
+ bool mSourcePaused;
+#endif
bool mIsFirstBuffer;
status_t mFirstBufferResult;
diff --git a/include/media/stagefright/ExtendedWriter.h b/include/media/stagefright/ExtendedWriter.h
new file mode 100644
index 0000000..23944b0
--- /dev/null
+++ b/include/media/stagefright/ExtendedWriter.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EXTENDED_WRITER_H_
+
+#define EXTENDED_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+#include <cutils/log.h>
+
+#define LITERAL_TO_STRING_INTERNAL(x) #x
+#define LITERAL_TO_STRING(x) LITERAL_TO_STRING_INTERNAL(x)
+
+#define CHECK_EQ(x,y) \
+ LOG_ALWAYS_FATAL_IF( \
+ (x) != (y), \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x " != " #y)
+
+#define CHECK(x) \
+ LOG_ALWAYS_FATAL_IF( \
+ !(x), \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x)
+
+namespace android {
+
+struct MediaSource;
+struct MetaData;
+
+struct ExtendedWriter : public MediaWriter {
+ ExtendedWriter(const char *filename);
+ ExtendedWriter(int fd);
+
+ status_t initCheck() const;
+
+ virtual status_t addSource(const sp<MediaSource> &source);
+ virtual bool reachedEOS();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual status_t pause();
+
+protected:
+ virtual ~ExtendedWriter();
+
+private:
+ FILE *mFile;
+ status_t mInitCheck;
+ sp<MediaSource> mSource;
+ bool mStarted;
+ volatile bool mPaused;
+ volatile bool mResumed;
+ volatile bool mDone;
+ volatile bool mReachedEOS;
+ pthread_t mThread;
+ int64_t mEstimatedSizeBytes;
+ int64_t mEstimatedDurationUs;
+
+ int32_t mFormat;
+
+ //QCP/EVRC header
+ struct QCPEVRCHeader
+ {
+ /* RIFF Section */
+ char riff[4];
+ unsigned int s_riff;
+ char qlcm[4];
+
+ /* Format chunk */
+ char fmt[4];
+ unsigned int s_fmt;
+ char mjr;
+ char mnr;
+ unsigned int data1;
+
+ /* UNIQUE ID of the codec */
+ unsigned short data2;
+ unsigned short data3;
+ char data4[8];
+ unsigned short ver;
+
+ /* Codec Info */
+ char name[80];
+ unsigned short abps;
+
+ /* average bits per sec of the codec */
+ unsigned short bytes_per_pkt;
+ unsigned short samp_per_block;
+ unsigned short samp_per_sec;
+ unsigned short bits_per_samp;
+ unsigned char vr_num_of_rates;
+
+ /* Rate Header fmt info */
+ unsigned char rvd1[3];
+ unsigned short vr_bytes_per_pkt[8];
+ unsigned int rvd2[5];
+
+ /* Vrat chunk */
+ unsigned char vrat[4];
+ unsigned int s_vrat;
+ unsigned int v_rate;
+ unsigned int size_in_pkts;
+
+ /* Data chunk */
+ unsigned char data[4];
+ unsigned int s_data;
+ } __attribute__ ((packed));
+
+ struct QCPEVRCHeader mHeader;
+ off_t mOffset; //note off_t
+
+ static void *ThreadWrapper(void *);
+ status_t threadFunc();
+ bool exceedsFileSizeLimit();
+ bool exceedsFileDurationLimit();
+
+ ExtendedWriter(const ExtendedWriter &);
+ ExtendedWriter &operator=(const ExtendedWriter &);
+
+ status_t writeQCPHeader( );
+ status_t writeEVRCHeader( );
+};
+
+} // namespace android
+
+#endif // AMR_WRITER_H_
diff --git a/include/media/stagefright/LPAPlayer.h b/include/media/stagefright/LPAPlayer.h
new file mode 100644
index 0000000..c351211
--- /dev/null
+++ b/include/media/stagefright/LPAPlayer.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LPA_PLAYER_H_
+
+#define LPA_PLAYER_H_
+
+#include "AudioPlayer.h"
+#include <media/IAudioFlinger.h>
+#include <utils/threads.h>
+#include <utils/List.h>
+#include <utils/Vector.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <binder/IServiceManager.h>
+#include <linux/unistd.h>
+#include <include/TimedEventQueue.h>
+#include <binder/BinderService.h>
+#include <binder/MemoryDealer.h>
+#include <powermanager/IPowerManager.h>
+
+// Pause timeout = 3sec
+#define LPA_PAUSE_TIMEOUT_USEC 3000000
+
+namespace android {
+
+class LPAPlayer : public AudioPlayer {
+public:
+ enum {
+ REACHED_EOS,
+ SEEK_COMPLETE
+ };
+
+ enum {
+ TRACK_DIRECT,
+ TRACK_REGULAR,
+ TRACK_NONE
+ };
+
+ LPAPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *audioObserver = NULL);
+
+ virtual ~LPAPlayer();
+
+ // Caller retains ownership of "source".
+ virtual void setSource(const sp<MediaSource> &source);
+
+ // Return time in us.
+ virtual int64_t getRealTimeUs();
+
+ virtual status_t start(bool sourceAlreadyStarted = false);
+
+ virtual void pause(bool playPendingSamples = false);
+ virtual void resume();
+
+ // Returns the timestamp of the last buffer played (in us).
+ virtual int64_t getMediaTimeUs();
+
+ // Returns true iff a mapping is established, i.e. the LPAPlayer
+ // has played at least one frame of audio.
+ virtual bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+
+ virtual status_t seekTo(int64_t time_us);
+
+ virtual bool isSeeking();
+ virtual bool reachedEOS(status_t *finalStatus);
+
+ static int objectsAlive;
+private:
+ int64_t mPositionTimeMediaUs;
+ int64_t mPositionTimeRealUs;
+ bool mInternalSeeking;
+ bool mIsAudioRouted;
+ bool mStarted;
+ bool mPaused;
+ bool mA2DPEnabled;
+ int32_t mChannelMask;
+ int32_t numChannels;
+ int32_t mSampleRate;
+ int64_t mLatencyUs;
+ size_t mFrameSize;
+ int64_t mTimeStarted;
+ int64_t mTimePlayed;
+ int64_t mNumFramesPlayed;
+ int64_t mNumFramesPlayedSysTimeUs;
+ int64_t mNumA2DPBytesPlayed;
+
+ void clearPowerManager();
+
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(void *obj){parentClass = (LPAPlayer *)obj;}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ LPAPlayer *parentClass;
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ friend class LPAPlayer;
+ };
+
+ friend class PMDeathRecipient;
+
+ void acquireWakeLock();
+ void releaseWakeLock();
+
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ sp<PMDeathRecipient> mDeathRecipient;
+
+ pthread_t decoderThread;
+
+ pthread_t A2DPNotificationThread;
+
+ //Kill Thread boolean
+ bool killDecoderThread;
+
+
+
+ bool killA2DPNotificationThread;
+
+ //Thread alive boolean
+ bool decoderThreadAlive;
+
+
+ bool a2dpNotificationThreadAlive;
+
+ //Declare the condition Variables and Mutex
+
+ pthread_mutex_t decoder_mutex;
+
+ pthread_mutex_t audio_sink_setup_mutex;
+
+ pthread_mutex_t a2dp_notification_mutex;
+
+
+
+ pthread_cond_t decoder_cv;
+
+
+ pthread_cond_t a2dp_notification_cv;
+
+
+ // make sure Decoder thread has exited
+ void requestAndWaitForDecoderThreadExit();
+
+
+ // make sure the Effects thread also exited
+ void requestAndWaitForA2DPNotificationThreadExit();
+
+ static void *decoderThreadWrapper(void *me);
+ void decoderThreadEntry();
+ static void *A2DPNotificationThreadWrapper(void *me);
+ void A2DPNotificationThreadEntry();
+
+ void createThreads();
+
+ volatile bool mIsA2DPEnabled;
+
+ //Structure to recieve the BT notification from the flinger.
+ class AudioFlingerLPAdecodeClient: public IBinder::DeathRecipient, public BnAudioFlingerClient {
+ public:
+ AudioFlingerLPAdecodeClient(void *obj);
+
+ LPAPlayer *pBaseClass;
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+
+ friend class LPAPlayer;
+ };
+
+ sp<IAudioFlinger> mAudioFlinger;
+
+ // helper function to obtain AudioFlinger service handle
+ void getAudioFlinger();
+
+ void handleA2DPSwitch();
+ void onPauseTimeOut();
+
+ sp<AudioFlingerLPAdecodeClient> AudioFlingerClient;
+ friend class AudioFlingerLPAdecodeClient;
+ Mutex AudioFlingerLock;
+ sp<MediaSource> mSource;
+
+ MediaBuffer *mInputBuffer;
+
+ Mutex mLock;
+ Mutex mResumeLock;
+
+ bool mSeeking;
+ bool mReachedEOS;
+ bool mReachedOutputEOS;
+ status_t mFinalStatus;
+ int64_t mSeekTimeUs;
+ int64_t mPauseTime;
+
+
+ bool mIsFirstBuffer;
+ status_t mFirstBufferResult;
+ MediaBuffer *mFirstBuffer;
+ TimedEventQueue mQueue;
+ bool mQueueStarted;
+ sp<TimedEventQueue::Event> mPauseEvent;
+ bool mPauseEventPending;
+
+ sp<MediaPlayerBase::AudioSink> mAudioSink;
+ AwesomePlayer *mObserver;
+ int mTrackType;
+
+ static size_t AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *data, size_t size, void *me);
+
+ enum A2DPState {
+ A2DP_ENABLED,
+ A2DP_DISABLED,
+ A2DP_CONNECT,
+ A2DP_DISCONNECT
+ };
+
+ int64_t getTimeStamp(A2DPState state);
+
+ size_t fillBuffer(void *data, size_t size);
+
+ int64_t getRealTimeUsLocked();
+
+ void reset();
+
+ status_t setupAudioSink();
+ static size_t AudioCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie);
+ size_t AudioCallback(void *cookie, void *data, size_t size);
+
+ LPAPlayer(const LPAPlayer &);
+ LPAPlayer &operator=(const LPAPlayer &);
+};
+
+struct TimedEvent : public TimedEventQueue::Event {
+ TimedEvent(LPAPlayer *player,
+ void (LPAPlayer::*method)())
+ : mPlayer(player),
+ mMethod(method) {
+ }
+
+protected:
+ virtual ~TimedEvent() {}
+
+ virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+ (mPlayer->*mMethod)();
+ }
+
+private:
+ LPAPlayer *mPlayer;
+ void (LPAPlayer::*mMethod)();
+
+ TimedEvent(const TimedEvent &);
+ TimedEvent &operator=(const TimedEvent &);
+};
+
+} // namespace android
+
+#endif // LPA_PLAYER_H_
+
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 2439be6..aad8844 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +23,9 @@
#include <media/IOMX.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
+#ifdef QCOM_HARDWARE
+#include <media/stagefright/QCOMXCodec.h>
+#endif
#include <utils/threads.h>
#include <OMX_Audio.h>
@@ -100,6 +104,10 @@ struct OMXCodec : public MediaSource,
kSupportsMultipleFramesPerInputBuffer = 1024,
kRequiresLargerEncoderOutputBuffer = 2048,
kOutputBuffersAreUnreadable = 4096,
+#ifdef QCOM_HARDWARE
+ kRequiresGlobalFlush = 0x20000000, // 2^29
+ kRequiresWMAProComponent = 0x40000000, //2^30
+#endif
};
struct CodecNameAndQuirks {
@@ -127,6 +135,11 @@ private:
// Make sure mLock is accessible to OMXCodecObserver
friend class OMXCodecObserver;
+#ifdef QCOM_HARDWARE
+ // QCOMXCodec can access variables of OMXCodec
+ friend class QCOMXCodec;
+#endif
+
// Call this with mLock hold
void on_message(const omx_message &msg);
@@ -143,6 +156,9 @@ private:
};
enum {
+#ifdef QCOM_HARDWARE
+ kPortIndexBoth = -1,
+#endif
kPortIndexInput = 0,
kPortIndexOutput = 1
};
@@ -250,6 +266,11 @@ private:
void setG711Format(int32_t numChannels);
+#ifdef QCOM_HARDWARE
+ void setEVRCFormat( int32_t sampleRate, int32_t numChannels, int32_t bitRate);
+ void setQCELPFormat( int32_t sampleRate, int32_t numChannels, int32_t bitRate);
+#endif
+
status_t setVideoPortFormatType(
OMX_U32 portIndex,
OMX_VIDEO_CODINGTYPE compressionFormat,
@@ -361,6 +382,11 @@ private:
OMXCodec(const OMXCodec &);
OMXCodec &operator=(const OMXCodec &);
+
+#ifdef QCOM_HARDWARE
+ status_t setWMAFormat(const sp<MetaData> &inputFormat);
+ void setAC3Format(int32_t numChannels, int32_t sampleRate);
+#endif
};
struct CodecCapabilities {
diff --git a/include/media/stagefright/QCOMXCodec.h b/include/media/stagefright/QCOMXCodec.h
new file mode 100644
index 0000000..7259af9
--- /dev/null
+++ b/include/media/stagefright/QCOMXCodec.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef QC_OMX_CODEC_H_
+
+#define QC_OMX_CODEC_H_
+
+#include <android/native_window.h>
+#include <media/IOMX.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/foundation/AString.h>
+#include <utils/threads.h>
+
+#include <OMX_Audio.h>
+
+namespace android {
+
+struct MediaCodecList;
+struct OMXCodec;
+
+enum{
+ kRequiresWMAProComponent = 0x40000000,
+};
+
+
+struct QCOMXCodec {
+
+ static uint32_t getQCComponentQuirks(const MediaCodecList *list, size_t index);
+
+ static status_t configureDIVXCodec(const sp<MetaData> &meta, char* mime,
+ sp<IOMX> OMXhandle,IOMX::node_id nodeID, int port_index);
+
+ static status_t setQCFormat(const sp<MetaData> &meta, char* mime,
+ sp<IOMX> OMXhandle,IOMX::node_id nodeID,
+ OMXCodec *handle, bool isEncoder);
+
+ static status_t setWMAFormat(const sp<MetaData> &meta, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, bool isEncoder );
+
+ static status_t setQCVideoInputFormat(const char *mime,
+ OMX_VIDEO_CODINGTYPE *compressionFormat);
+
+ static status_t setQCVideoOutputFormat(const char *mime,
+ OMX_VIDEO_CODINGTYPE *compressionFormat);
+
+ static status_t checkQCFormats(int format, AString* meta);
+
+ static void setASFQuirks(uint32_t quirks, const sp<MetaData> &meta,
+ const char* componentName);
+
+ static void checkAndAddRawFormat(OMXCodec *handle, const sp<MetaData> &meta);
+
+ static void setEVRCFormat(int32_t numChannels, int32_t sampleRate,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID,
+ OMXCodec *handle, bool isEncoder );
+
+ static void setQCELPFormat(int32_t numChannels, int32_t sampleRate,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID,
+ OMXCodec *handle, bool isEncoder );
+
+ static void setAC3Format(int32_t numChannels, int32_t sampleRate,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+ static void checkQCRole(const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder,const char *mime);
+
+};
+
+}
+#endif /*QC_OMX_CODEC_H_ */
+
diff --git a/include/media/stagefright/TunnelPlayer.h b/include/media/stagefright/TunnelPlayer.h
new file mode 100644
index 0000000..71c4f10
--- /dev/null
+++ b/include/media/stagefright/TunnelPlayer.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TUNNEL_PLAYER_H_
+
+#define TUNNEL_PLAYER_H_
+
+#include "AudioPlayer.h"
+#include <media/IAudioFlinger.h>
+#include <utils/threads.h>
+#include <utils/List.h>
+#include <utils/Vector.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <binder/IServiceManager.h>
+#include <linux/unistd.h>
+#include <include/TimedEventQueue.h>
+#include <binder/BinderService.h>
+#include <binder/MemoryDealer.h>
+#include <powermanager/IPowerManager.h>
+
+// Pause timeout = 3sec
+#define TUNNEL_PAUSE_TIMEOUT_USEC 3000000
+namespace android {
+
+class TunnelPlayer : public AudioPlayer {
+public:
+ enum {
+ REACHED_EOS,
+ SEEK_COMPLETE
+ };
+
+ TunnelPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *audioObserver = NULL, bool hasVideo = false);
+
+ virtual ~TunnelPlayer();
+
+ // Caller retains ownership of "source".
+ virtual void setSource(const sp<MediaSource> &source);
+
+ // Return time in us.
+ virtual int64_t getRealTimeUs();
+
+ virtual status_t start(bool sourceAlreadyStarted = false);
+
+ virtual void pause(bool playPendingSamples = false);
+ virtual void resume();
+
+ // Returns the timestamp of the last buffer played (in us).
+ virtual int64_t getMediaTimeUs();
+
+ // Returns true iff a mapping is established, i.e. the TunnelPlayer
+ // has played at least one frame of audio.
+ virtual bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+
+ virtual status_t seekTo(int64_t time_us);
+
+ virtual bool isSeeking();
+ virtual bool reachedEOS(status_t *finalStatus);
+
+
+ static int mTunnelObjectsAlive;
+private:
+ int64_t mPositionTimeMediaUs;
+ int64_t mPositionTimeRealUs;
+ bool mInternalSeeking;
+ bool mIsAudioRouted;
+ bool mStarted;
+ bool mPaused;
+ bool mA2DPEnabled;
+ int32_t mChannelMask;
+ int32_t numChannels;
+ int32_t mSampleRate;
+ int64_t mLatencyUs;
+ size_t mFrameSize;
+ int64_t mNumFramesPlayed;
+ int64_t mNumFramesPlayedSysTimeUs;
+ audio_format_t mFormat;
+ bool mHasVideo;
+ void clearPowerManager();
+
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(void *obj){parentClass = (TunnelPlayer *)obj;}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ TunnelPlayer *parentClass;
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ friend class TunnelPlayer;
+ };
+
+ friend class PMDeathRecipient;
+
+ void acquireWakeLock();
+ void releaseWakeLock();
+
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ sp<PMDeathRecipient> mDeathRecipient;
+
+ pthread_t extractorThread;
+
+ //Kill Thread boolean
+ bool killExtractorThread;
+
+ //Thread alive boolean
+ bool extractorThreadAlive;
+
+
+ //Declare the condition Variables and Mutex
+
+ pthread_mutex_t extractor_mutex;
+ pthread_cond_t extractor_cv;
+
+
+ // make sure Decoder thread has exited
+ void requestAndWaitForExtractorThreadExit();
+
+
+ static void *extractorThreadWrapper(void *me);
+ void extractorThreadEntry();
+
+ void createThreads();
+
+ volatile bool mIsA2DPEnabled;
+
+ //Structure to recieve the BT notification from the flinger.
+ class AudioFlingerTunneldecodeClient: public IBinder::DeathRecipient, public BnAudioFlingerClient {
+ public:
+ AudioFlingerTunneldecodeClient(void *obj);
+
+ TunnelPlayer *pBaseClass;
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+
+ friend class TunnelPlayer;
+ };
+
+ sp<IAudioFlinger> mAudioFlinger;
+
+ // helper function to obtain AudioFlinger service handle
+ void getAudioFlinger();
+ void onPauseTimeOut();
+
+ sp<AudioFlingerTunneldecodeClient> mAudioFlingerClient;
+ friend class AudioFlingerTunneldecodeClient;
+ Mutex mAudioFlingerLock;
+ sp<MediaSource> mSource;
+
+ MediaBuffer *mInputBuffer;
+
+ Mutex pmLock;
+ Mutex mLock;
+
+ bool mSeeking;
+ bool mReachedEOS;
+ bool mReachedOutputEOS;
+ status_t mFinalStatus;
+ int64_t mSeekTimeUs;
+ int64_t mPauseTime;
+
+
+ bool mIsFirstBuffer;
+ status_t mFirstBufferResult;
+ MediaBuffer *mFirstBuffer;
+ TimedEventQueue mQueue;
+ bool mQueueStarted;
+ sp<TimedEventQueue::Event> mPauseEvent;
+ bool mPauseEventPending;
+
+ sp<MediaPlayerBase::AudioSink> mAudioSink;
+ AwesomePlayer *mObserver;
+
+ static size_t AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *data, size_t size, void *me);
+
+ enum A2DPState {
+ A2DP_ENABLED,
+ A2DP_DISABLED,
+ A2DP_CONNECT,
+ A2DP_DISCONNECT
+ };
+
+ int64_t getTimeStamp(A2DPState state);
+
+ size_t fillBuffer(void *data, size_t size);
+
+ int64_t getRealTimeUsLocked();
+
+ void reset();
+
+ TunnelPlayer(const TunnelPlayer &);
+ TunnelPlayer &operator=(const TunnelPlayer &);
+};
+
+struct TunnelEvent : public TimedEventQueue::Event {
+ TunnelEvent(TunnelPlayer *player,
+ void (TunnelPlayer::*method)())
+ : mPlayer(player),
+ mMethod(method) {
+ }
+
+protected:
+ virtual ~TunnelEvent() {}
+
+ virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+ (mPlayer->*mMethod)();
+ }
+
+private:
+ TunnelPlayer *mPlayer;
+ void (TunnelPlayer::*mMethod)();
+
+ TunnelEvent(const TunnelEvent &);
+ TunnelEvent &operator=(const TunnelEvent &);
+};
+
+} // namespace android
+
+#endif // LPA_PLAYER_H_
diff --git a/include/media/stagefright/WAVEWriter.h b/include/media/stagefright/WAVEWriter.h
new file mode 100644
index 0000000..766d8f4
--- /dev/null
+++ b/include/media/stagefright/WAVEWriter.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WAVE_WRITER_H_
+
+#define WAVE_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+
+namespace android {
+
+
+#define ID_RIFF 0x46464952
+#define ID_WAVE 0x45564157
+#define ID_FMT 0x20746d66
+#define ID_DATA 0x61746164
+#define FORMAT_PCM 1
+
+
+struct MediaSource;
+struct MetaData;
+
+struct wav_header {
+ uint32_t riff_id;
+ uint32_t riff_sz;
+ uint32_t riff_fmt;
+ uint32_t fmt_id;
+ uint32_t fmt_sz;
+ uint16_t audio_format;
+ uint16_t num_channels;
+ uint32_t sample_rate;
+ uint32_t byte_rate; /* sample_rate * num_channels * bps / 8 */
+ uint16_t block_align; /* num_channels * bps / 8 */
+ uint16_t bits_per_sample;
+ uint32_t data_id;
+ uint32_t data_sz;
+};
+
+
+struct WAVEWriter : public MediaWriter {
+ WAVEWriter(const char *filename);
+ WAVEWriter(int fd);
+
+ status_t initCheck() const;
+
+ virtual status_t addSource(const sp<MediaSource> &source);
+ virtual bool reachedEOS();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual status_t pause();
+
+protected:
+ virtual ~WAVEWriter();
+
+private:
+ int mFd;
+ status_t mInitCheck;
+ sp<MediaSource> mSource;
+ bool mStarted;
+ volatile bool mPaused;
+ volatile bool mResumed;
+ volatile bool mDone;
+ volatile bool mReachedEOS;
+ pthread_t mThread;
+ int64_t mEstimatedSizeBytes;
+ int64_t mEstimatedDurationUs;
+
+ static void *ThreadWrapper(void *);
+ status_t threadFunc();
+ bool exceedsFileSizeLimit();
+ bool exceedsFileDurationLimit();
+
+ WAVEWriter(const WAVEWriter &);
+ WAVEWriter &operator=(const WAVEWriter &);
+};
+
+} // namespace android
+
+#endif // WAVE_WRITER_H_
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 54666fb..69361fa 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -11,6 +11,17 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_SRC_FILES:= AudioParameter.cpp
+LOCAL_MODULE:= libaudioparameter
+LOCAL_MODULE_TAGS := optional
+LOCAL_SHARED_LIBRARIES := libutils
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+endif
+
LOCAL_SRC_FILES:= \
AudioTrack.cpp \
IAudioFlinger.cpp \
@@ -51,6 +62,16 @@ LOCAL_SRC_FILES:= \
SoundPool.cpp \
SoundPoolThread.cpp
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_SRC_FILES += \
+ IDirectTrack.cpp \
+ IDirectTrackClient.cpp
+
+ifeq ($(TARGET_QCOM_AUDIO_VARIANT),caf)
+LOCAL_CFLAGS += -DQCOM_ENHANCED_AUDIO
+endif
+endif
+
LOCAL_SHARED_LIBRARIES := \
libui libcutils libutils libbinder libsonivox libicuuc libexpat \
libcamera_client libstagefright_foundation \
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index e3fea77..fbb34f4 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2006-2011 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,6 +33,13 @@ const char * const AudioParameter::keyChannels = AUDIO_PARAMETER_STREAM_CHANNELS
const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
+#ifdef QCOM_HARDWARE
+const char * const AudioParameter::keyHandleFm = AUDIO_PARAMETER_KEY_HANDLE_FM;
+const char * const AudioParameter::keyVoipCheck = AUDIO_PARAMETER_KEY_VOIP_CHECK;
+const char * const AudioParameter::keyFluenceType = AUDIO_PARAMETER_KEY_FLUENCE_TYPE;
+const char * const AudioParameter::keySSR = AUDIO_PARAMETER_KEY_SSR;
+const char * const AudioParameter::keyHandleA2dpDevice = AUDIO_PARAMETER_KEY_HANDLE_A2DP_DEVICE;
+#endif
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 8ea6306..ce03754 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -1,6 +1,7 @@
/*
**
** Copyright 2008, The Android Open Source Project
+** Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -156,7 +157,11 @@ status_t AudioRecord::set(
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ int channelCount = popcount((channelMask) & (AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_MONO | AUDIO_CHANNEL_IN_5POINT1));
+#else
int channelCount = popcount(channelMask);
+#endif
if (sessionId == 0 ) {
mSessionId = AudioSystem::newAudioSessionId();
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index fa52fa9..a2172c9 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -2,6 +2,10 @@
**
** Copyright 2007, The Android Open Source Project
**
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
@@ -96,6 +100,10 @@ AudioTrack::AudioTrack()
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
}
@@ -114,6 +122,10 @@ AudioTrack::AudioTrack(
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
@@ -135,6 +147,10 @@ AudioTrack::AudioTrack(
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
mStatus = set((audio_stream_type_t)streamType, sampleRate, (audio_format_t)format,
(audio_channel_mask_t) channelMask,
@@ -157,6 +173,10 @@ AudioTrack::AudioTrack(
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -177,9 +197,22 @@ AudioTrack::~AudioTrack()
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
+#ifdef QCOM_HARDWARE
+ if (mAudioTrack != 0) {
+ mAudioTrack.clear();
+ AudioSystem::releaseAudioSessionId(mSessionId);
+ }
+
+ if (mDirectTrack != 0) {
+ mDirectTrack.clear();
+ }
+#else
mAudioTrack.clear();
+#endif
IPCThreadState::self()->flushCommands();
+#ifndef QCOM_HARDWARE
AudioSystem::releaseAudioSessionId(mSessionId);
+#endif
}
}
@@ -252,12 +285,24 @@ status_t AudioTrack::set(
flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
}
+#ifdef QCOM_ENHANCED_AUDIO
+ if ((streamType == AUDIO_STREAM_VOICE_CALL)
+ && (channelMask == AUDIO_CHANNEL_OUT_MONO)
+ && ((sampleRate == 8000 || sampleRate == 16000)))
+ {
+ ALOGD("Turn on Direct Output for VOIP RX");
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_VOIP_RX|AUDIO_OUTPUT_FLAG_DIRECT);
+ }
+#endif
+
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
uint32_t channelCount = popcount(channelMask);
+ ALOGV("AudioTrack getOutput streamType %d, sampleRate %d, format %d, channelMask %d, flags %x",
+ streamType, sampleRate, format, channelMask, flags);
audio_io_handle_t output = AudioSystem::getOutput(
streamType,
sampleRate, format, channelMask,
@@ -278,46 +323,86 @@ status_t AudioTrack::set(
mFlags = flags;
mCbf = cbf;
- if (cbf != NULL) {
- mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
- mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
- }
-
- // create the IAudioTrack
- status_t status = createTrack_l(streamType,
- sampleRate,
- format,
- channelMask,
- frameCount,
- flags,
- sharedBuffer,
- output);
+#ifdef QCOM_HARDWARE
+ if (flags & AUDIO_OUTPUT_FLAG_LPA || flags & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ ALOGV("Creating Direct Track");
+ const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
+ if (audioFlinger == 0) {
+ ALOGE("Could not get audioflinger");
+ return NO_INIT;
+ }
+ mAudioFlinger = audioFlinger;
+ status_t status = NO_ERROR;
+ mAudioDirectOutput = output;
+ mDirectTrack = audioFlinger->createDirectTrack( getpid(),
+ sampleRate,
+ channelMask,
+ mAudioDirectOutput,
+ &mSessionId,
+ this,
+ streamType,
+ &status);
+ if(status != NO_ERROR) {
+ ALOGE("createDirectTrack returned with status %d", status);
+ return status;
+ }
+ mAudioTrack = NULL;
+ mSharedBuffer = NULL;
+ }
+ else {
+#endif
+ if (cbf != NULL) {
+ mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
+ mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
+ }
+ // create the IAudioTrack
+ status_t status = createTrack_l(streamType,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ sharedBuffer,
+ output);
- if (status != NO_ERROR) {
- if (mAudioTrackThread != 0) {
- mAudioTrackThread->requestExit();
- mAudioTrackThread.clear();
+ if (status != NO_ERROR) {
+ if (mAudioTrackThread != 0) {
+ mAudioTrackThread->requestExit();
+ mAudioTrackThread.clear();
+ }
+ return status;
}
- return status;
+#ifdef QCOM_HARDWARE
+ AudioSystem::acquireAudioSessionId(mSessionId);
+ mAudioDirectOutput = -1;
+ mDirectTrack = NULL;
+ mSharedBuffer = sharedBuffer;
}
-
+ mUserData = user;
+#endif
mStatus = NO_ERROR;
mStreamType = streamType;
mFormat = format;
mChannelMask = channelMask;
mChannelCount = channelCount;
- mSharedBuffer = sharedBuffer;
+
mMuted = false;
mActive = false;
- mUserData = user;
+
mLoopCount = 0;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
mFlushed = false;
+
+#ifndef QCOM_HARDWARE
+ mSharedBuffer = sharedBuffer;
+ mUserData = user;
AudioSystem::acquireAudioSessionId(mSessionId);
+#endif
+
mRestoreStatus = NO_ERROR;
return NO_ERROR;
}
@@ -331,6 +416,11 @@ status_t AudioTrack::initCheck() const
uint32_t AudioTrack::latency() const
{
+#ifdef QCOM_HARDWARE
+ if(mAudioDirectOutput != -1) {
+ return mAudioFlinger->latency(mAudioDirectOutput);
+ }
+#endif
return mLatency;
}
@@ -351,6 +441,11 @@ int AudioTrack::channelCount() const
uint32_t AudioTrack::frameCount() const
{
+#ifdef QCOM_HARDWARE
+ if(mAudioDirectOutput != -1) {
+ return mAudioFlinger->frameCount(mAudioDirectOutput);
+ }
+#endif
return mCblk->frameCount;
}
@@ -372,6 +467,16 @@ sp<IMemory>& AudioTrack::sharedBuffer()
void AudioTrack::start()
{
+#ifdef QCOM_HARDWARE
+ if (mDirectTrack != NULL) {
+ if(mActive == 0) {
+ mActive = 1;
+ mDirectTrack->start();
+ }
+ return;
+ }
+#endif
+
sp<AudioTrackThread> t = mAudioTrackThread;
ALOGV("start %p", this);
@@ -436,26 +541,35 @@ void AudioTrack::stop()
AutoMutex lock(mLock);
if (mActive) {
- mActive = false;
- mCblk->cv.signal();
- mAudioTrack->stop();
- // Cancel loops (If we are in the middle of a loop, playback
- // would not stop until loopCount reaches 0).
- setLoop_l(0, 0, 0);
- // the playback head position will reset to 0, so if a marker is set, we need
- // to activate it again
- mMarkerReached = false;
- // Force flush if a shared buffer is used otherwise audioflinger
- // will not stop before end of buffer is reached.
- if (mSharedBuffer != 0) {
- flush_l();
- }
- if (t != 0) {
- t->pause();
- } else {
- setpriority(PRIO_PROCESS, 0, mPreviousPriority);
- set_sched_policy(0, mPreviousSchedulingGroup);
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ mActive = false;
+ mDirectTrack->stop();
+ } else if (mAudioTrack != NULL) {
+#endif
+ mActive = false;
+ mCblk->cv.signal();
+ mAudioTrack->stop();
+ // Cancel loops (If we are in the middle of a loop, playback
+ // would not stop until loopCount reaches 0).
+ setLoop_l(0, 0, 0);
+ // the playback head position will reset to 0, so if a marker is set, we need
+ // to activate it again
+ mMarkerReached = false;
+ // Force flush if a shared buffer is used otherwise audioflinger
+ // will not stop before end of buffer is reached.
+ if (mSharedBuffer != 0) {
+ flush_l();
+ }
+ if (t != 0) {
+ t->pause();
+ } else {
+ setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+ set_sched_policy(0, mPreviousSchedulingGroup);
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
}
}
@@ -469,7 +583,12 @@ bool AudioTrack::stopped() const
void AudioTrack::flush()
{
AutoMutex lock(mLock);
- flush_l();
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ mDirectTrack->flush();
+ } else
+#endif
+ flush_l();
}
// must be called with mLock held
@@ -497,14 +616,28 @@ void AudioTrack::pause()
AutoMutex lock(mLock);
if (mActive) {
mActive = false;
- mCblk->cv.signal();
- mAudioTrack->pause();
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ ALOGV("mDirectTrack pause");
+ mDirectTrack->pause();
+ } else {
+#endif
+ mCblk->cv.signal();
+ mAudioTrack->pause();
+#ifdef QCOM_HARDWARE
+ }
+#endif
}
}
void AudioTrack::mute(bool e)
{
- mAudioTrack->mute(e);
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ mDirectTrack->mute(e);
+ } else
+#endif
+ mAudioTrack->mute(e);
mMuted = e;
}
@@ -522,8 +655,13 @@ status_t AudioTrack::setVolume(float left, float right)
AutoMutex lock(mLock);
mVolume[LEFT] = left;
mVolume[RIGHT] = right;
-
- mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ ALOGV("mDirectTrack->setVolume(left = %f , right = %f)", left,right);
+ mDirectTrack->setVolume(left, right);
+ } else
+#endif
+ mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
return NO_ERROR;
}
@@ -540,6 +678,11 @@ void AudioTrack::getVolume(float* left, float* right) const
status_t AudioTrack::setAuxEffectSendLevel(float level)
{
+#ifdef QCOM_HARDWARE
+ if (mDirectTrack != NULL) {
+ return NO_ERROR;
+ }
+#endif
ALOGV("setAuxEffectSendLevel(%f)", level);
if (level < 0.0f || level > 1.0f) {
return BAD_VALUE;
@@ -586,6 +729,11 @@ uint32_t AudioTrack::getSampleRate() const
}
AutoMutex lock(mLock);
+#ifdef QCOM_HARDWARE
+ if(mAudioDirectOutput != -1) {
+ return mAudioFlinger->sampleRate(mAudioDirectOutput);
+ }
+#endif
return mCblk->sampleRate;
}
@@ -1076,7 +1224,12 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer)
ssize_t AudioTrack::write(const void* buffer, size_t userSize)
{
-
+#ifdef QCOM_HARDWARE
+ if (mDirectTrack != NULL) {
+ mDirectTrack->write(buffer,userSize);
+ return userSize;
+ }
+#endif
if (mSharedBuffer != 0) return INVALID_OPERATION;
if (mIsTimed) return INVALID_OPERATION;
@@ -1456,6 +1609,23 @@ status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
return NO_ERROR;
}
+#ifdef QCOM_HARDWARE
+void AudioTrack::notify(int msg) {
+ if (msg == EVENT_UNDERRUN) {
+ ALOGV("Posting event underrun to Audio Sink.");
+ mCbf(EVENT_UNDERRUN, mUserData, 0);
+ }
+}
+
+status_t AudioTrack::getTimeStamp(uint64_t *tstamp) {
+ if (mDirectTrack != NULL) {
+ *tstamp = mDirectTrack->getTimeStamp();
+ ALOGV("Timestamp %lld ", *tstamp);
+ }
+ return NO_ERROR;
+}
+#endif
+
// =========================================================================
AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index ce8ffc4..cc6a75c 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -1,6 +1,9 @@
/*
**
** Copyright 2007, The Android Open Source Project
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -73,6 +76,9 @@ enum {
LOAD_HW_MODULE,
GET_PRIMARY_OUTPUT_SAMPLING_RATE,
GET_PRIMARY_OUTPUT_FRAME_COUNT,
+#ifdef QCOM_HARDWARE
+ CREATE_DIRECT_TRACK
+#endif
};
class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -132,6 +138,49 @@ public:
return track;
}
+#ifdef QCOM_HARDWARE
+ virtual sp<IDirectTrack> createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient* client,
+ audio_stream_type_t streamType,
+ status_t *status)
+ {
+ Parcel data, reply;
+ sp<IDirectTrack> track;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(pid);
+ data.writeInt32(sampleRate);
+ data.writeInt32(channelMask);
+ data.writeInt32((int32_t)output);
+ int lSessionId = 0;
+ if (sessionId != NULL) {
+ lSessionId = *sessionId;
+ }
+ data.writeInt32(lSessionId);
+ data.write(client, sizeof(IDirectTrackClient));
+ data.writeInt32((int32_t) streamType);
+ status_t lStatus = remote()->transact(CREATE_DIRECT_TRACK, data, &reply);
+ if (lStatus != NO_ERROR) {
+ ALOGE("createDirectTrack error: %s", strerror(-lStatus));
+ } else {
+ lSessionId = reply.readInt32();
+ if (sessionId != NULL) {
+ *sessionId = lSessionId;
+ }
+ lStatus = reply.readInt32();
+ track = interface_cast<IDirectTrack>(reply.readStrongBinder());
+ }
+ if (status) {
+ *status = lStatus;
+ }
+ return track;
+ }
+#endif
+
virtual sp<IAudioRecord> openRecord(
pid_t pid,
audio_io_handle_t input,
@@ -738,6 +787,26 @@ status_t BnAudioFlinger::onTransact(
reply->writeStrongBinder(track->asBinder());
return NO_ERROR;
} break;
+#ifdef QCOM_HARDWARE
+ case CREATE_DIRECT_TRACK: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ pid_t pid = data.readInt32();
+ uint32_t sampleRate = data.readInt32();
+ audio_channel_mask_t channelMask = data.readInt32();
+ audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+ int sessionId = data.readInt32();
+ IDirectTrackClient* client;
+ data.read(client,sizeof(IDirectTrackClient));
+ int streamType = data.readInt32();
+ status_t status;
+ sp<IDirectTrack> track = createDirectTrack(pid,
+ sampleRate, channelMask, output, &sessionId, client,(audio_stream_type_t) streamType, &status);
+ reply->writeInt32(sessionId);
+ reply->writeInt32(status);
+ reply->writeStrongBinder(track->asBinder());
+ return NO_ERROR;
+ } break;
+#endif
case OPEN_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
pid_t pid = data.readInt32();
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 4178b29..e289703 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -1,4 +1,8 @@
/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -49,7 +53,11 @@ public:
uint32_t stream = *(const uint32_t *)param2;
ALOGV("ioConfigChanged stream %d", stream);
data.writeInt32(stream);
- } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
+ } else if (event != AudioSystem::OUTPUT_CLOSED &&
+#ifdef QCOM_HARDWARE
+ event != AudioSystem::EFFECT_CONFIG_CHANGED &&
+#endif
+ event != AudioSystem::INPUT_CLOSED) {
const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2;
data.writeInt32(desc->samplingRate);
data.writeInt32(desc->format);
diff --git a/media/libmedia/IDirectTrack.cpp b/media/libmedia/IDirectTrack.cpp
new file mode 100644
index 0000000..480761f
--- /dev/null
+++ b/media/libmedia/IDirectTrack.cpp
@@ -0,0 +1,178 @@
+/*
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IDirectTrack"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+
+#include <media/IDirectTrack.h>
+
+namespace android {
+
+enum {
+ START = IBinder::FIRST_CALL_TRANSACTION,
+ STOP,
+ FLUSH,
+ MUTE,
+ PAUSE,
+ SET_VOLUME,
+ WRITE,
+ GET_TIMESTAMP
+};
+
+class BpDirectTrack : public BpInterface<IDirectTrack>
+{
+public:
+ BpDirectTrack(const sp<IBinder>& impl)
+ : BpInterface<IDirectTrack>(impl)
+ {
+ }
+
+ virtual status_t start()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ status_t status = remote()->transact(START, data, &reply);
+ if (status == NO_ERROR) {
+ status = reply.readInt32();
+ } else {
+ ALOGW("start() error: %s", strerror(-status));
+ }
+ return status;
+ }
+
+ virtual void stop()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(STOP, data, &reply);
+ }
+
+ virtual void flush()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(FLUSH, data, &reply);
+ }
+
+ virtual void mute(bool e)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ data.writeInt32(e);
+ remote()->transact(MUTE, data, &reply);
+ }
+
+ virtual void pause()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(PAUSE, data, &reply);
+ }
+
+ virtual void setVolume(float left, float right)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(SET_VOLUME, data, &reply);
+ }
+
+ virtual ssize_t write(const void* buffer, size_t bytes)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ ssize_t bytesWritten = remote()->transact(WRITE, data, &reply);
+ return bytesWritten;
+ }
+
+ virtual int64_t getTimeStamp() {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ int64_t tstamp = remote()->transact(GET_TIMESTAMP, data, &reply);
+ return tstamp;
+ }
+};
+
+IMPLEMENT_META_INTERFACE(DirectTrack, "android.media.IDirectTrack");
+
+// ----------------------------------------------------------------------
+
+status_t BnDirectTrack::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case START: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ reply->writeInt32(start());
+ return NO_ERROR;
+ } break;
+ case STOP: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ stop();
+ return NO_ERROR;
+ } break;
+ case FLUSH: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ flush();
+ return NO_ERROR;
+ } break;
+ case MUTE: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ mute( data.readInt32() );
+ return NO_ERROR;
+ } break;
+ case PAUSE: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ pause();
+ return NO_ERROR;
+ }
+ case SET_VOLUME: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ float left = 1.0;
+ float right = 1.0;
+ setVolume(left, right);
+ return NO_ERROR;
+ }
+ case WRITE: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ const void *buffer = (void *)data.readInt32();
+ size_t bytes = data.readInt32();
+ ssize_t bytesWritten = write(buffer, bytes);
+ reply->writeInt32(bytesWritten);
+ return NO_ERROR;
+ }
+ case GET_TIMESTAMP: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ int64_t time = getTimeStamp();
+ reply->writeInt32(time);
+ return NO_ERROR;
+ }
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+}; // namespace android
+
diff --git a/media/libmedia/IDirectTrackClient.cpp b/media/libmedia/IDirectTrackClient.cpp
new file mode 100644
index 0000000..86a47ec
--- /dev/null
+++ b/media/libmedia/IDirectTrackClient.cpp
@@ -0,0 +1,69 @@
+/*
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <media/IDirectTrackClient.h>
+
+namespace android {
+
+enum {
+ NOTIFY = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpDirectTrackClient: public BpInterface<IDirectTrackClient>
+{
+public:
+ BpDirectTrackClient(const sp<IBinder>& impl)
+ : BpInterface<IDirectTrackClient>(impl)
+ {
+ }
+
+ virtual void notify(int msg)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrackClient::getInterfaceDescriptor());
+ data.writeInt32(msg);
+ remote()->transact(NOTIFY, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+};
+
+IMPLEMENT_META_INTERFACE(DirectTrackClient, "android.media.IDirectTrackClient");
+
+// ----------------------------------------------------------------------
+
+status_t BnDirectTrackClient::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch (code) {
+ case NOTIFY: {
+ CHECK_INTERFACE(IDirectTrackClient, data, reply);
+ int msg = data.readInt32();
+ notify(msg);
+ return NO_ERROR;
+ } break;
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+}; // namespace android
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index f69dbea..fa536a6 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -1,6 +1,7 @@
/*
**
** Copyright 2010, The Android Open Source Project
+** Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -45,7 +46,10 @@ const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
{"amrwb", AUDIO_ENCODER_AMR_WB},
{"aac", AUDIO_ENCODER_AAC},
{"heaac", AUDIO_ENCODER_HE_AAC},
- {"aaceld", AUDIO_ENCODER_AAC_ELD}
+ {"aaceld", AUDIO_ENCODER_AAC_ELD},
+#ifdef QCOM_HARDWARE
+ {"lpcm", AUDIO_ENCODER_LPCM},
+#endif
};
const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
@@ -804,6 +808,10 @@ MediaProfiles::createDefaultCamcorderProfiles(MediaProfiles *profiles)
MediaProfiles::createDefaultAudioEncoders(MediaProfiles *profiles)
{
profiles->mAudioEncoders.add(createDefaultAmrNBEncoderCap());
+#ifdef QCOM_HARDWARE
+ profiles->mAudioEncoders.add(createDefaultAacEncoderCap());
+ profiles->mAudioEncoders.add(createDefaultLpcmEncoderCap());
+#endif
}
/*static*/ void
@@ -838,6 +846,22 @@ MediaProfiles::createDefaultAmrNBEncoderCap()
AUDIO_ENCODER_AMR_NB, 5525, 12200, 8000, 8000, 1, 1);
}
+#ifdef QCOM_HARDWARE
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultAacEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_AAC, 64000, 156000, 8000, 48000, 1, 2);
+}
+
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultLpcmEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_LPCM, 768000, 4608000, 48000, 48000, 1, 6);
+}
+#endif
+
/*static*/ void
MediaProfiles::createDefaultImageEncodingQualityLevels(MediaProfiles *profiles)
{
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 5b5ed71..a583d48 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -47,7 +47,12 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/rtsp \
$(TOP)/frameworks/av/media/libstagefright/wifi-display \
$(TOP)/frameworks/native/include/media/openmax \
- $(TOP)/external/tremolo/Tremolo \
+ $(TOP)/external/tremolo/Tremolo
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_C_INCLUDES += \
+ $(TOP)/hardware/qcom/media/mm-core/inc
+endif
LOCAL_MODULE:= libmediaplayerservice
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9bedff1..414c262 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1,5 +1,9 @@
/*
**
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,6 +78,7 @@
#include "Crypto.h"
#include "HDCP.h"
#include "RemoteDisplay.h"
+#define DEFAULT_SAMPLE_RATE 44100
namespace {
using android::media::Metadata;
@@ -1363,6 +1368,22 @@ status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position) const
return mTrack->getPosition(position);
}
+#ifdef QCOM_HARDWARE
+ssize_t MediaPlayerService::AudioOutput::sampleRate() const
+{
+ if (mTrack == 0) return NO_INIT;
+ return DEFAULT_SAMPLE_RATE;
+}
+
+status_t MediaPlayerService::AudioOutput::getTimeStamp(uint64_t *tstamp)
+{
+ if (tstamp == 0) return BAD_VALUE;
+ if (mTrack == 0) return NO_INIT;
+ mTrack->getTimeStamp(tstamp);
+ return NO_ERROR;
+}
+#endif
+
status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const
{
if (mTrack == 0) return NO_INIT;
@@ -1379,6 +1400,65 @@ status_t MediaPlayerService::AudioOutput::open(
mCallback = cb;
mCallbackCookie = cookie;
+#ifdef QCOM_HARDWARE
+ if (flags & AUDIO_OUTPUT_FLAG_LPA || flags & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ ALOGV("AudioOutput open: with flags %x",flags);
+ channelMask = audio_channel_out_mask_from_count(channelCount);
+ if (0 == channelMask) {
+ ALOGE("open() error, can't derive mask for %d audio channels", channelCount);
+ return NO_INIT;
+ }
+ AudioTrack *audioTrack = NULL;
+ CallbackData *newcbd = NULL;
+ if (mCallback != NULL) {
+ newcbd = new CallbackData(this);
+ audioTrack = new AudioTrack(
+ mStreamType,
+ sampleRate,
+ format,
+ channelMask,
+ 0,
+ flags,
+ CallbackWrapper,
+ newcbd,
+ 0,
+ mSessionId);
+ if ((audioTrack == 0) || (audioTrack->initCheck() != NO_ERROR)) {
+ ALOGE("Unable to create audio track");
+ delete audioTrack;
+ delete newcbd;
+ return NO_INIT;
+ }
+ } else {
+ ALOGE("no callback supplied");
+ return NO_INIT;
+ }
+
+ if (mRecycledTrack) {
+ //usleep(500000);
+ // if we're not going to reuse the track, unblock and flush it
+ if (mCallbackData != NULL) {
+ mCallbackData->setOutput(NULL);
+ mCallbackData->endTrackSwitch();
+ }
+ mRecycledTrack->flush();
+ delete mRecycledTrack;
+ mRecycledTrack = NULL;
+ delete mCallbackData;
+ mCallbackData = NULL;
+ close();
+ }
+
+ ALOGV("setVolume");
+ mCallbackData = newcbd;
+ audioTrack->setVolume(mLeftVolume, mRightVolume);
+ mSampleRateHz = sampleRate;
+ mFlags = flags;
+ mTrack = audioTrack;
+ return NO_ERROR;
+ }
+#endif
+
// Check argument "bufferCount" against the mininum buffer count
if (bufferCount < mMinBufferCount) {
ALOGD("bufferCount (%d) is too small and increased to %d", bufferCount, mMinBufferCount);
@@ -1551,7 +1631,7 @@ void MediaPlayerService::AudioOutput::switchToNextOutput() {
ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size)
{
- LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+ //LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
//ALOGV("write(%p, %u)", buffer, size);
if (mTrack) {
@@ -1637,35 +1717,56 @@ status_t MediaPlayerService::AudioOutput::attachAuxEffect(int effectId)
void MediaPlayerService::AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
//ALOGV("callbackwrapper");
- if (event != AudioTrack::EVENT_MORE_DATA) {
- return;
- }
-
- CallbackData *data = (CallbackData*)cookie;
- data->lock();
- AudioOutput *me = data->getOutput();
- AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
- if (me == NULL) {
- // no output set, likely because the track was scheduled to be reused
- // by another player, but the format turned out to be incompatible.
+#ifdef QCOM_HARDWARE
+ if (event == AudioTrack::EVENT_UNDERRUN) {
+ ALOGW("Event underrun");
+ CallbackData *data = (CallbackData*)cookie;
+ data->lock();
+ AudioOutput *me = data->getOutput();
+ if (me == NULL) {
+ // no output set, likely because the track was scheduled to be reused
+ // by another player, but the format turned out to be incompatible.
+ data->unlock();
+ return;
+ }
+ ALOGD("Callback!!!");
+ (*me->mCallback)(
+ me, NULL, (size_t)AudioTrack::EVENT_UNDERRUN, me->mCallbackCookie);
data->unlock();
- buffer->size = 0;
return;
}
+#endif
+ if (event == AudioTrack::EVENT_MORE_DATA) {
+ CallbackData *data = (CallbackData*)cookie;
+ data->lock();
+ AudioOutput *me = data->getOutput();
+ AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+ if (me == NULL) {
+ // no output set, likely because the track was scheduled to be reused
+ // by another player, but the format turned out to be incompatible.
+ data->unlock();
+ buffer->size = 0;
+ return;
+ }
+
+ size_t actualSize = (*me->mCallback)(
+ me, buffer->raw, buffer->size, me->mCallbackCookie);
- size_t actualSize = (*me->mCallback)(
- me, buffer->raw, buffer->size, me->mCallbackCookie);
+ if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
+ // We've reached EOS but the audio track is not stopped yet,
+ // keep playing silence.
- if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
- // We've reached EOS but the audio track is not stopped yet,
- // keep playing silence.
+ memset(buffer->raw, 0, buffer->size);
+ actualSize = buffer->size;
+ }
- memset(buffer->raw, 0, buffer->size);
- actualSize = buffer->size;
+ buffer->size = actualSize;
+ data->unlock();
}
- buffer->size = actualSize;
- data->unlock();
+ return;
+
+
}
int MediaPlayerService::AudioOutput::getSessionId() const
@@ -1700,6 +1801,13 @@ status_t MediaPlayerService::AudioCache::getPosition(uint32_t *position) const
return NO_ERROR;
}
+#ifdef QCOM_HARDWARE
+ssize_t MediaPlayerService::AudioCache::sampleRate() const
+{
+ return mSampleRate;
+}
+#endif
+
status_t MediaPlayerService::AudioCache::getFramesWritten(uint32_t *written) const
{
if (written == 0) return BAD_VALUE;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index fd648df..54df9d2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -114,6 +114,10 @@ class MediaPlayerService : public BnMediaPlayerService
void setNextOutput(const sp<AudioOutput>& nextOutput);
void switchToNextOutput();
virtual bool needsTrailingPadding() { return mNextOutput == NULL; }
+#ifdef QCOM_HARDWARE
+ virtual ssize_t sampleRate() const;
+ virtual status_t getTimeStamp(uint64_t *tstamp);
+#endif
private:
static void setMinBufferCount();
@@ -205,8 +209,10 @@ class MediaPlayerService : public BnMediaPlayerService
virtual void close() {}
void setAudioStreamType(audio_stream_type_t streamType) {}
void setVolume(float left, float right) {}
- virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
+#ifndef QCOM_HARDWARE
uint32_t sampleRate() const { return mSampleRate; }
+#endif
+ virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
audio_format_t format() const { return mFormat; }
size_t size() const { return mSize; }
status_t wait();
@@ -216,6 +222,9 @@ class MediaPlayerService : public BnMediaPlayerService
static void notify(void* cookie, int msg,
int ext1, int ext2, const Parcel *obj);
virtual status_t dump(int fd, const Vector<String16>& args) const;
+#ifdef QCOM_HARDWARE
+ virtual ssize_t sampleRate() const;
+#endif
private:
AudioCache();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 57b0ec2..aa25eff 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,6 +30,10 @@
#include <media/stagefright/AudioSource.h>
#include <media/stagefright/AMRWriter.h>
#include <media/stagefright/AACWriter.h>
+#ifdef QCOM_HARDWARE
+#include <media/stagefright/ExtendedWriter.h>
+#include <media/stagefright/WAVEWriter.h>
+#endif
#include <media/stagefright/CameraSource.h>
#include <media/stagefright/CameraSourceTimeLapse.h>
#include <media/stagefright/MPEG2TSWriter.h>
@@ -49,6 +54,9 @@
#include <unistd.h>
#include <system/audio.h>
+#ifdef QCOM_HARDWARE
+#include <QCMediaDefs.h>
+#endif
#include "ARTPWriter.h"
@@ -159,6 +167,26 @@ status_t StagefrightRecorder::setAudioEncoder(audio_encoder ae) {
mAudioEncoder = ae;
}
+#ifdef QCOM_HARDWARE
+ // Use default values if appropriate setparam's weren't called.
+ if(mAudioEncoder == AUDIO_ENCODER_AAC) {
+ mSampleRate = mSampleRate ? mSampleRate : 48000;
+ mAudioChannels = mAudioChannels ? mAudioChannels : 2;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 156000;
+ } else if(mAudioEncoder == AUDIO_ENCODER_LPCM) {
+ mSampleRate = mSampleRate ? mSampleRate : 48000;
+ mAudioChannels = mAudioChannels ? mAudioChannels : 2;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 4608000;
+ } else if(mAudioEncoder == AUDIO_ENCODER_AMR_WB) {
+ mSampleRate = 16000;
+ mAudioChannels = 1;
+ mAudioBitRate = 23850;
+ } else {
+ mSampleRate = mSampleRate ? mSampleRate : 8000;
+ mAudioChannels = mAudioChannels ? mAudioChannels : 1;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 12200;
+ }
+#endif
return OK;
}
@@ -768,7 +796,15 @@ status_t StagefrightRecorder::start() {
case OUTPUT_FORMAT_MPEG2TS:
status = startMPEG2TSRecording();
break;
+#ifdef QCOM_HARDWARE
+ case OUTPUT_FORMAT_QCP:
+ status = startExtendedRecording( );
+ break;
+ case OUTPUT_FORMAT_WAVE:
+ status = startWAVERecording( );
+ break;
+#endif
default:
ALOGE("Unsupported output file format: %d", mOutputFormat);
status = UNKNOWN_ERROR;
@@ -809,6 +845,11 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
sp<MetaData> encMeta = new MetaData;
const char *mime;
switch (mAudioEncoder) {
+#ifdef QCOM_HARDWARE
+ case AUDIO_ENCODER_LPCM:
+ mime = MEDIA_MIMETYPE_AUDIO_RAW;
+ break;
+#endif
case AUDIO_ENCODER_AMR_NB:
case AUDIO_ENCODER_DEFAULT:
mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
@@ -828,6 +869,14 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
mime = MEDIA_MIMETYPE_AUDIO_AAC;
encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectELD);
break;
+#ifdef QCOM_HARDWARE
+ case AUDIO_ENCODER_EVRC:
+ mime = MEDIA_MIMETYPE_AUDIO_EVRC;
+ break;
+ case AUDIO_ENCODER_QCELP:
+ mime = MEDIA_MIMETYPE_AUDIO_QCELP;
+ break;
+#endif
default:
ALOGE("Unknown audio encoder: %d", mAudioEncoder);
@@ -852,6 +901,17 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
sp<MediaSource> audioEncoder =
OMXCodec::Create(client.interface(), encMeta,
true /* createEncoder */, audioSource);
+#ifdef QCOM_HARDWARE
+ // If encoder could not be created (as in LPCM), then
+ // use the AudioSource directly as the MediaSource.
+ if (audioEncoder == NULL && AUDIO_ENCODER_LPCM) {
+ ALOGD("No encoder is needed, use the AudioSource directly as the MediaSource for LPCM format");
+ audioEncoder = audioSource;
+ }
+ if (mAudioSourceNode != NULL) {
+ mAudioSourceNode.clear();
+ }
+#endif
mAudioSourceNode = audioSource;
return audioEncoder;
@@ -888,13 +948,35 @@ status_t StagefrightRecorder::startAMRRecording() {
mAudioEncoder);
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ if (mSampleRate != 8000) {
+ ALOGE("Invalid sampling rate %d used for AMRNB recording",
+ mSampleRate);
+ return BAD_VALUE;
+ }
+#endif
} else { // mOutputFormat must be OUTPUT_FORMAT_AMR_WB
if (mAudioEncoder != AUDIO_ENCODER_AMR_WB) {
ALOGE("Invlaid encoder %d used for AMRWB recording",
mAudioEncoder);
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ if (mSampleRate != 16000) {
+ ALOGE("Invalid sample rate %d used for AMRWB recording",
+ mSampleRate);
+ return BAD_VALUE;
+ }
+#endif
+ }
+
+#ifdef QCOM_HARDWARE
+ if (mAudioChannels != 1) {
+ ALOGE("Invalid number of audio channels %d used for amr recording",
+ mAudioChannels);
+ return BAD_VALUE;
}
+#endif
mWriter = new AMRWriter(mOutputFd);
status_t status = startRawAudioRecording();
@@ -905,6 +987,24 @@ status_t StagefrightRecorder::startAMRRecording() {
return status;
}
+#ifdef QCOM_HARDWARE
+status_t StagefrightRecorder::startWAVERecording() {
+ CHECK(mOutputFormat == OUTPUT_FORMAT_WAVE);
+
+ CHECK(mAudioEncoder == AUDIO_ENCODER_LPCM);
+ CHECK(mAudioSource != AUDIO_SOURCE_CNT);
+
+ mWriter = new WAVEWriter(mOutputFd);
+ status_t status = startRawAudioRecording();
+ if (status != OK) {
+ mWriter.clear();
+ mWriter = NULL;
+ }
+
+ return status;
+}
+#endif
+
status_t StagefrightRecorder::startRawAudioRecording() {
if (mAudioSource >= AUDIO_SOURCE_CNT) {
ALOGE("Invalid audio source: %d", mAudioSource);
@@ -1450,6 +1550,9 @@ status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
case AUDIO_ENCODER_AAC:
case AUDIO_ENCODER_HE_AAC:
case AUDIO_ENCODER_AAC_ELD:
+#ifdef QCOM_HARDWARE
+ case AUDIO_ENCODER_LPCM:
+#endif
break;
default:
@@ -1611,7 +1714,12 @@ status_t StagefrightRecorder::stop() {
::close(mOutputFd);
mOutputFd = -1;
}
-
+#ifdef QCOM_HARDWARE
+ if (mAudioSourceNode != NULL) {
+ mAudioSourceNode.clear();
+ mAudioSourceNode = NULL;
+ }
+#endif
if (mStarted) {
mStarted = false;
@@ -1653,9 +1761,15 @@ status_t StagefrightRecorder::reset() {
mVideoHeight = 144;
mFrameRate = -1;
mVideoBitRate = 192000;
+#ifdef QCOM_HARDWARE
+ mSampleRate = 0;
+ mAudioChannels = 0;
+ mAudioBitRate = 0;
+#else
mSampleRate = 8000;
mAudioChannels = 1;
mAudioBitRate = 12200;
+#endif
mInterleaveDurationUs = 0;
mIFramesIntervalSec = 1;
mAudioSourceNode = 0;
@@ -1767,4 +1881,48 @@ status_t StagefrightRecorder::dump(
::write(fd, result.string(), result.size());
return OK;
}
+
+#ifdef QCOM_HARDWARE
+status_t StagefrightRecorder::startExtendedRecording() {
+ CHECK(mOutputFormat == OUTPUT_FORMAT_QCP);
+
+ if (mSampleRate != 8000) {
+ ALOGE("Invalid sampling rate %d used for recording",
+ mSampleRate);
+ return BAD_VALUE;
+ }
+ if (mAudioChannels != 1) {
+ ALOGE("Invalid number of audio channels %d used for recording",
+ mAudioChannels);
+ return BAD_VALUE;
+ }
+
+ if (mAudioSource >= AUDIO_SOURCE_CNT) {
+ ALOGE("Invalid audio source: %d", mAudioSource);
+ return BAD_VALUE;
+ }
+
+ sp<MediaSource> audioEncoder = createAudioSource();
+
+ if (audioEncoder == NULL) {
+ ALOGE("AudioEncoder NULL");
+ return UNKNOWN_ERROR;
+ }
+
+ mWriter = new ExtendedWriter(dup(mOutputFd));
+ mWriter->addSource(audioEncoder);
+
+ if (mMaxFileDurationUs != 0) {
+ mWriter->setMaxFileDuration(mMaxFileDurationUs);
+ }
+ if (mMaxFileSizeBytes != 0) {
+ mWriter->setMaxFileSize(mMaxFileSizeBytes);
+ }
+ mWriter->setListener(mListener);
+ mWriter->start();
+
+ return OK;
+}
+#endif
+
} // namespace android
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index ec5ce7e..3f0b821 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -132,6 +133,9 @@ private:
status_t startMPEG4Recording();
status_t startAMRRecording();
status_t startAACRecording();
+#ifdef QCOM_HARDWARE
+ status_t startWAVERecording();
+#endif
status_t startRawAudioRecording();
status_t startRTPRecording();
status_t startMPEG2TSRecording();
@@ -187,6 +191,11 @@ private:
StagefrightRecorder(const StagefrightRecorder &);
StagefrightRecorder &operator=(const StagefrightRecorder &);
+
+#ifdef QCOM_HARDWARE
+ /* extension */
+ status_t startExtendedRecording();
+#endif
};
} // namespace android
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 7302692..35a5d05 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -1,6 +1,12 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
+ifeq ($(BOARD_USES_ALSA_AUDIO),true)
+ ifeq ($(call is-chipset-in-board-platform,msm8960),true)
+ LOCAL_CFLAGS += -DUSE_TUNNEL_MODE
+ endif
+endif
+
include frameworks/av/media/libstagefright/codecs/common/Config.mk
LOCAL_SRC_FILES:= \
@@ -63,7 +69,27 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/flac/include \
$(TOP)/external/tremolo \
- $(TOP)/external/openssl/include \
+ $(TOP)/external/openssl/include
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_SRC_FILES += \
+ ExtendedWriter.cpp \
+ QCMediaDefs.cpp \
+ QCOMXCodec.cpp \
+ WAVEWriter.cpp \
+ ExtendedExtractor.cpp
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/hardware/qcom/media/mm-core/inc
+
+ifeq ($(TARGET_QCOM_AUDIO_VARIANT),caf)
+LOCAL_CFLAGS += -DQCOM_ENHANCED_AUDIO
+LOCAL_SRC_FILES += \
+ LPAPlayerALSA.cpp \
+ TunnelPlayer.cpp
+endif
+endif
+
LOCAL_SHARED_LIBRARIES := \
libbinder \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 4208019..deb6b70 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -310,6 +310,13 @@ size_t AudioPlayer::AudioSinkCallback(
void *buffer, size_t size, void *cookie) {
AudioPlayer *me = (AudioPlayer *)cookie;
+#ifdef QCOM_ENHANCED_AUDIO
+ if (buffer == NULL) {
+ //Not applicable for AudioPlayer
+ ALOGE("This indicates the event underrun case for LPA/Tunnel");
+ return 0;
+ }
+#endif
return me->fillBuffer(buffer, size);
}
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 48b6371..5cffad8 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -2,6 +2,10 @@
* Copyright (C) 2009 The Android Open Source Project
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
@@ -40,6 +44,12 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/timedtext/TimedTextDriver.h>
#include <media/stagefright/AudioPlayer.h>
+#ifdef QCOM_ENHANCED_AUDIO
+#include <media/stagefright/LPAPlayer.h>
+#ifdef USE_TUNNEL_MODE
+#include <media/stagefright/TunnelPlayer.h>
+#endif
+#endif
#include <media/stagefright/DataSource.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaBuffer.h>
@@ -65,6 +75,9 @@ static int64_t kLowWaterMarkUs = 2000000ll; // 2secs
static int64_t kHighWaterMarkUs = 5000000ll; // 5secs
static const size_t kLowWaterMarkBytes = 40000;
static const size_t kHighWaterMarkBytes = 200000;
+#ifdef QCOM_ENHANCED_AUDIO
+int AwesomePlayer::mTunnelAliveAP = 0;
+#endif
struct AwesomeEvent : public TimedEventQueue::Event {
AwesomeEvent(
@@ -215,6 +228,9 @@ AwesomePlayer::AwesomePlayer()
mAudioStatusEventPending = false;
reset();
+#ifdef QCOM_ENHANCED_AUDIO
+ mIsTunnelAudio = false;
+#endif
}
AwesomePlayer::~AwesomePlayer() {
@@ -224,6 +240,17 @@ AwesomePlayer::~AwesomePlayer() {
reset();
+#ifdef QCOM_ENHANCED_AUDIO
+ // Disable Tunnel Mode Audio
+ if (mIsTunnelAudio) {
+ if(mTunnelAliveAP > 0) {
+ mTunnelAliveAP--;
+ ALOGV("mTunnelAliveAP = %d", mTunnelAliveAP);
+ }
+ }
+ mIsTunnelAudio = false;
+#endif
+
mClient.disconnect();
}
@@ -858,6 +885,9 @@ status_t AwesomePlayer::play() {
}
status_t AwesomePlayer::play_l() {
+#ifdef QCOM_ENHANCED_AUDIO
+ int tunnelObjectsAlive = 0;
+#endif
modifyFlags(SEEK_PREVIEW, CLEAR);
if (mFlags & PLAYING) {
@@ -885,6 +915,13 @@ status_t AwesomePlayer::play_l() {
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
if (mAudioSink != NULL) {
+#ifdef QCOM_ENHANCED_AUDIO
+ sp<MetaData> format = mAudioTrack->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+#endif
+
bool allowDeepBuffering;
int64_t cachedDurationUs;
bool eos;
@@ -896,8 +933,64 @@ status_t AwesomePlayer::play_l() {
} else {
allowDeepBuffering = false;
}
-
- mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
+#ifdef QCOM_ENHANCED_AUDIO
+#ifdef USE_TUNNEL_MODE
+ // Create tunnel player if tunnel mode is enabled
+ ALOGW("Trying to create tunnel player mIsTunnelAudio %d, \
+ LPAPlayer::objectsAlive %d, \
+ TunnelPlayer::mTunnelObjectsAlive = %d,\
+ (mAudioPlayer == NULL) %d",
+ mIsTunnelAudio, TunnelPlayer::mTunnelObjectsAlive,
+ LPAPlayer::objectsAlive,(mAudioPlayer == NULL));
+
+ if(mIsTunnelAudio && (mAudioPlayer == NULL) &&
+ (LPAPlayer::objectsAlive == 0) &&
+ (TunnelPlayer::mTunnelObjectsAlive == 0)) {
+ ALOGD("Tunnel player created for mime %s duration %lld\n",\
+ mime, mDurationUs);
+ bool initCheck = false;
+ if(mVideoSource != NULL) {
+ // The parameter true is to inform tunnel player that
+ // clip is audio video
+ mAudioPlayer = new TunnelPlayer(mAudioSink, initCheck,
+ this, true);
+ }
+ else {
+ mAudioPlayer = new TunnelPlayer(mAudioSink, initCheck,
+ this);
+ }
+ if(!initCheck) {
+ ALOGE("deleting Tunnel Player - initCheck failed");
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+ }
+ }
+ tunnelObjectsAlive = (TunnelPlayer::mTunnelObjectsAlive);
+#endif
+ char lpaDecode[128];
+ property_get("lpa.decode",lpaDecode,"0");
+ if((strcmp("true",lpaDecode) == 0) && (mAudioPlayer == NULL) && tunnelObjectsAlive==0 )
+ {
+ ALOGV("LPAPlayer::getObjectsAlive() %d",LPAPlayer::objectsAlive);
+ if ( mDurationUs > 60000000
+ && (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) || !strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC))
+ && LPAPlayer::objectsAlive == 0 && mVideoSource == NULL) {
+ ALOGD("LPAPlayer created, LPA MODE detected mime %s duration %lld", mime, mDurationUs);
+ bool initCheck = false;
+ mAudioPlayer = new LPAPlayer(mAudioSink, initCheck, this);
+ if(!initCheck) {
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+ }
+ }
+ }
+ if(mAudioPlayer == NULL) {
+ ALOGV("AudioPlayer created, Non-LPA mode mime %s duration %lld\n", mime, mDurationUs);
+#endif
+ mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
+#ifdef QCOM_ENHANCED_AUDIO
+ }
+#endif
mAudioPlayer->setSource(mAudioSource);
mTimeSource = mAudioPlayer;
@@ -915,9 +1008,14 @@ status_t AwesomePlayer::play_l() {
if (mVideoSource == NULL) {
// We don't want to post an error notification at this point,
// the error returned from MediaPlayer::start() will suffice.
-
- status_t err = startAudioPlayer_l(
- false /* sendErrorNotification */);
+ bool sendErrorNotification = false;
+#ifdef QCOM_ENHANCED_AUDIO
+ if(mIsTunnelAudio) {
+ // For tunnel Audio error has to be posted to the client
+ sendErrorNotification = true;
+ }
+#endif
+ status_t err = startAudioPlayer_l(sendErrorNotification);
if (err != OK) {
delete mAudioPlayer;
@@ -1387,14 +1485,92 @@ status_t AwesomePlayer::initAudioDecoder() {
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
-
+#ifdef QCOM_ENHANCED_AUDIO
+#ifdef USE_TUNNEL_MODE
+ char value[PROPERTY_VALUE_MAX];
+ char tunnelDecode[128];
+ property_get("tunnel.decode",tunnelDecode,"0");
+ // Enable tunnel mode for mp3 and aac and if the clip is not aac adif
+ // and if no other tunnel mode instances aare running.
+ ALOGD("Tunnel Mime Type: %s, object alive = %d, mTunnelAliveAP = %d",\
+ mime, (TunnelPlayer::mTunnelObjectsAlive), mTunnelAliveAP);
+ if(((strcmp("true",tunnelDecode) == 0)||(atoi(tunnelDecode))) &&
+ (TunnelPlayer::mTunnelObjectsAlive == 0) &&
+ mTunnelAliveAP == 0 &&
+ ((!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) ||
+ (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC)))) {
+
+ if(mVideoSource != NULL) {
+ char tunnelAVDecode[128];
+ property_get("tunnel.audiovideo.decode",tunnelAVDecode,"0");
+ if(((strncmp("true", tunnelAVDecode, 4) == 0)||(atoi(tunnelAVDecode)))) {
+ ALOGD("Enable Tunnel Mode for A-V playback");
+ mIsTunnelAudio = true;
+ }
+ }
+ else {
+ ALOGI("Tunnel Mode Audio Enabled");
+ mIsTunnelAudio = true;
+ }
+ }
+ else
+ ALOGD("Normal Audio Playback");
+#endif
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW) ||
+ (mIsTunnelAudio && (mTunnelAliveAP == 0))) {
+ ALOGD("Set Audio Track as Audio Source");
+ if(mIsTunnelAudio) {
+ mTunnelAliveAP++;
+ }
+#else
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+#endif
mAudioSource = mAudioTrack;
} else {
+#ifdef QCOM_ENHANCED_AUDIO
+ // For LPA Playback use the decoder without OMX layer
+ char *matchComponentName = NULL;
+ int64_t durationUs;
+ uint32_t flags = 0;
+ char lpaDecode[128];
+ property_get("lpa.decode",lpaDecode,"0");
+ if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
+ Mutex::Autolock autoLock(mMiscStateLock);
+ if (mDurationUs < 0 || durationUs > mDurationUs) {
+ mDurationUs = durationUs;
+ }
+ }
+ if ( mDurationUs > 60000000
+ && (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) || !strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC))
+ && LPAPlayer::objectsAlive == 0 && mVideoSource == NULL && (strcmp("true",lpaDecode) == 0)) {
+ char nonOMXDecoder[128];
+ if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+ ALOGD("matchComponentName is set to MP3Decoder %lld, mime %s",mDurationUs,mime);
+ property_get("use.non-omx.mp3.decoder",nonOMXDecoder,"0");
+ if((strcmp("true",nonOMXDecoder) == 0)) {
+ matchComponentName = (char *) "MP3Decoder";
+ }
+ } else if((!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC))) {
+ ALOGD("matchComponentName is set to AACDecoder %lld, mime %s",mDurationUs,mime);
+ property_get("use.non-omx.aac.decoder",nonOMXDecoder,"0");
+ if((strcmp("true",nonOMXDecoder) == 0)) {
+ matchComponentName = (char *) "AACDecoder";
+ } else {
+ matchComponentName = (char *) "OMX.google.aac.decoder";
+ }
+ }
+ flags |= OMXCodec::kSoftwareCodecsOnly;
+ }
+ mAudioSource = OMXCodec::Create(
+ mClient.interface(), mAudioTrack->getFormat(),
+ false, // createEncoder
+ mAudioTrack, matchComponentName, flags,NULL);
+#else
mAudioSource = OMXCodec::Create(
mClient.interface(), mAudioTrack->getFormat(),
false, // createEncoder
mAudioTrack);
+#endif
}
if (mAudioSource != NULL) {
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 9d0eea2..4ab602f 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -33,6 +33,9 @@
#include "include/OggExtractor.h"
#include "include/WAVExtractor.h"
#include "include/WVMExtractor.h"
+#ifdef QCOM_HARDWARE
+#include "include/ExtendedExtractor.h"
+#endif
#include "matroska/MatroskaExtractor.h"
@@ -122,6 +125,9 @@ void DataSource::RegisterDefaultSniffers() {
RegisterSniffer(SniffAAC);
RegisterSniffer(SniffMPEG2PS);
RegisterSniffer(SniffWVM);
+#ifdef QCOM_HARDWARE
+ RegisterSniffer(SniffExtendedExtractor);
+#endif
char value[PROPERTY_VALUE_MAX];
if (property_get("drm.service.enabled", value, NULL)
diff --git a/media/libstagefright/ExtendedExtractor.cpp b/media/libstagefright/ExtendedExtractor.cpp
new file mode 100644
index 0000000..8e0d5d7
--- /dev/null
+++ b/media/libstagefright/ExtendedExtractor.cpp
@@ -0,0 +1,110 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ExtendedExtractor"
+#include <utils/Log.h>
+#include <dlfcn.h> // for dlopen/dlclose
+
+#include "include/ExtendedExtractor.h"
+
+static const char* EXTENDED_PARSER_LIB = "libExtendedExtractor.so";
+
+namespace android {
+
+void* ExtendedParserLib() {
+ static void* extendedParserLib = NULL;
+ static bool alreadyTriedToOpenParsers = false;
+
+ if(!alreadyTriedToOpenParsers) {
+ alreadyTriedToOpenParsers = true;
+
+ extendedParserLib = ::dlopen(EXTENDED_PARSER_LIB, RTLD_LAZY);
+
+ if(extendedParserLib == NULL) {
+ ALOGV("Failed to open EXTENDED_PARSER_LIB, dlerror = %s \n", dlerror());
+ }
+ }
+
+ return extendedParserLib;
+}
+
+MediaExtractor* ExtendedExtractor::CreateExtractor(const sp<DataSource> &source, const char *mime) {
+ static MediaExtractorFactory mediaFactoryFunction = NULL;
+ static bool alreadyTriedToFindFactoryFunction = false;
+
+ MediaExtractor* extractor = NULL;
+
+ if(!alreadyTriedToFindFactoryFunction) {
+
+ void *extendedParserLib = ExtendedParserLib();
+ if (extendedParserLib != NULL) {
+
+ mediaFactoryFunction = (MediaExtractorFactory) dlsym(extendedParserLib, MEDIA_CREATE_EXTRACTOR);
+ alreadyTriedToFindFactoryFunction = true;
+ }
+ }
+
+ if(mediaFactoryFunction==NULL) {
+ ALOGE(" dlsym for ExtendedExtractor factory function failed, dlerror = %s \n", dlerror());
+ return NULL;
+ }
+
+ extractor = mediaFactoryFunction(source, mime);
+ if(extractor==NULL) {
+ ALOGE(" ExtendedExtractor failed to instantiate extractor \n");
+ }
+
+ return extractor;
+}
+
+bool SniffExtendedExtractor(const sp<DataSource> &source, String8 *mimeType,
+ float *confidence,sp<AMessage> *meta) {
+ void *extendedParserLib = ExtendedParserLib();
+ bool retVal = false;
+ if (extendedParserLib != NULL) {
+ ExtendedExtractorSniffers extendedExtractorSniffers=
+ (ExtendedExtractorSniffers) dlsym(extendedParserLib, EXTENDED_EXTRACTOR_SNIFFERS);
+
+ if(extendedExtractorSniffers == NULL) {
+ ALOGE(" dlsym for extendedExtractorSniffers function failed, dlerror = %s \n", dlerror());
+ return retVal;
+ }
+
+ retVal = extendedExtractorSniffers(source, mimeType, confidence, meta);
+
+ if(!retVal) {
+ ALOGV("ExtendedExtractor:: ExtendedExtractorSniffers Failed");
+ }
+ }
+ return retVal;
+}
+
+} // namespace android
+
+
diff --git a/media/libstagefright/ExtendedWriter.cpp b/media/libstagefright/ExtendedWriter.cpp
new file mode 100644
index 0000000..7c8b08e
--- /dev/null
+++ b/media/libstagefright/ExtendedWriter.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/ExtendedWriter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/mediarecorder.h>
+#include <system/audio.h>
+
+#include <sys/prctl.h>
+#include <sys/resource.h>
+
+#include <arpa/inet.h>
+#include <QCMediaDefs.h>
+
+#undef LOG_TAG
+#define LOG_TAG "ExtendedWriter"
+
+namespace android {
+
+ExtendedWriter::ExtendedWriter(const char *filename)
+ : mFile(fopen(filename, "wb")),
+ mInitCheck(mFile != NULL ? OK : NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false),
+ mOffset(0) {
+}
+
+ExtendedWriter::ExtendedWriter(int fd)
+ : mFile(fdopen(fd, "wb")),
+ mInitCheck(mFile != NULL ? OK : NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false),
+ mOffset(0) {
+}
+
+ExtendedWriter::~ExtendedWriter() {
+ if (mStarted) {
+ stop();
+ }
+
+ if (mFile != NULL) {
+ fclose(mFile);
+ mFile = NULL;
+ }
+}
+
+status_t ExtendedWriter::initCheck() const {
+ return mInitCheck;
+}
+
+status_t ExtendedWriter::addSource(const sp<MediaSource> &source) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource != NULL) {
+ ALOGE("A source already exists, return");
+ return UNKNOWN_ERROR;
+ }
+
+ sp<MetaData> meta = source->getFormat();
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if ( !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
+ mFormat = AUDIO_FORMAT_QCELP;
+ } else if ( !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EVRC)) {
+ mFormat = AUDIO_FORMAT_EVRC;
+ }
+ else {
+ return UNKNOWN_ERROR;
+ }
+
+ int32_t channelCount;
+ int32_t sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
+ CHECK_EQ(channelCount, 1);
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK_EQ(sampleRate, 8000);
+
+ mSource = source;
+
+ return OK;
+}
+
+status_t ExtendedWriter::start(MetaData *params) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource == NULL) {
+ ALOGE("NULL Source");
+ return UNKNOWN_ERROR;
+ }
+
+ if (mStarted && mPaused) {
+ mPaused = false;
+ mResumed = true;
+ return OK;
+ } else if (mStarted) {
+ ALOGE("Already startd, return");
+ return OK;
+ }
+
+ //space for header;
+ size_t headerSize = sizeof( struct QCPEVRCHeader );
+ uint8_t * header = (uint8_t *)malloc(headerSize);
+ memset( header, '?', headerSize);
+ fwrite( header, 1, headerSize, mFile );
+ mOffset += headerSize;
+ delete header;
+
+ status_t err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ mReachedEOS = false;
+ mDone = false;
+
+ pthread_create(&mThread, &attr, ThreadWrapper, this);
+ pthread_attr_destroy(&attr);
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t ExtendedWriter::pause() {
+ if (!mStarted) {
+ return OK;
+ }
+ mPaused = true;
+ return OK;
+}
+
+status_t ExtendedWriter::stop() {
+ if (!mStarted) {
+ return OK;
+ }
+
+ mDone = true;
+
+ void *dummy;
+ pthread_join(mThread, &dummy);
+
+ status_t err = (status_t) dummy;
+ {
+ status_t status = mSource->stop();
+ if (err == OK &&
+ (status != OK && status != ERROR_END_OF_STREAM)) {
+ err = status;
+ }
+ }
+
+ mStarted = false;
+ return err;
+}
+
+bool ExtendedWriter::exceedsFileSizeLimit() {
+ if (mMaxFileSizeLimitBytes == 0) {
+ return false;
+ }
+ return mEstimatedSizeBytes >= mMaxFileSizeLimitBytes;
+}
+
+bool ExtendedWriter::exceedsFileDurationLimit() {
+ if (mMaxFileDurationLimitUs == 0) {
+ return false;
+ }
+ return mEstimatedDurationUs >= mMaxFileDurationLimitUs;
+}
+
+// static
+void *ExtendedWriter::ThreadWrapper(void *me) {
+ return (void *) static_cast<ExtendedWriter *>(me)->threadFunc();
+}
+
+status_t ExtendedWriter::threadFunc() {
+ mEstimatedDurationUs = 0;
+ mEstimatedSizeBytes = 0;
+ bool stoppedPrematurely = true;
+ int64_t previousPausedDurationUs = 0;
+ int64_t maxTimestampUs = 0;
+ status_t err = OK;
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"ExtendedWriter", 0, 0, 0);
+ while (!mDone) {
+ MediaBuffer *buffer;
+ err = mSource->read(&buffer);
+
+ if (err != OK) {
+ break;
+ }
+
+ if (mPaused) {
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+
+ mEstimatedSizeBytes += buffer->range_length();
+ if (exceedsFileSizeLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ break;
+ }
+
+ int64_t timestampUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timestampUs));
+ if (timestampUs > mEstimatedDurationUs) {
+ mEstimatedDurationUs = timestampUs;
+ }
+ if (mResumed) {
+ previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+ mResumed = false;
+ }
+ timestampUs -= previousPausedDurationUs;
+ ALOGV("time stamp: %lld, previous paused duration: %lld",
+ timestampUs, previousPausedDurationUs);
+ if (timestampUs > maxTimestampUs) {
+ maxTimestampUs = timestampUs;
+ }
+
+ if (exceedsFileDurationLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+ break;
+ }
+ ssize_t n = fwrite(
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ 1,
+ buffer->range_length(),
+ mFile);
+ mOffset += n;
+
+ if (n < (ssize_t)buffer->range_length()) {
+ buffer->release();
+ buffer = NULL;
+
+ break;
+ }
+
+ // XXX: How to tell it is stopped prematurely?
+ if (stoppedPrematurely) {
+ stoppedPrematurely = false;
+ }
+
+ buffer->release();
+ buffer = NULL;
+ }
+
+ if (stoppedPrematurely) {
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS, UNKNOWN_ERROR);
+ }
+
+ if ( mFormat == AUDIO_FORMAT_QCELP ) {
+ writeQCPHeader( );
+ }
+ else if ( mFormat == AUDIO_FORMAT_EVRC ) {
+ writeEVRCHeader( );
+ }
+
+ fflush(mFile);
+ fclose(mFile);
+ mFile = NULL;
+ mReachedEOS = true;
+ if (err == ERROR_END_OF_STREAM || (err == -ETIMEDOUT)) {
+ return OK;
+ }
+ return err;
+}
+
+bool ExtendedWriter::reachedEOS() {
+ return mReachedEOS;
+}
+
+status_t ExtendedWriter::writeQCPHeader() {
+ /* Common part */
+ struct QCPEVRCHeader header = {
+ {'R', 'I', 'F', 'F'}, 0, {'Q', 'L', 'C', 'M'}, /* Riff */
+ {'f', 'm', 't', ' '}, 150, 1, 0, 0, 0, 0,{0}, 0, {0},0,0,160,8000,16,0,{0},{0},{0}, /* Fmt */
+ {'v','r','a','t'}, 0, 0, 0, /* Vrat */
+ {'d','a','t','a'},0 /* Data */
+ };
+
+ fseeko(mFile, 0, SEEK_SET);
+ header.s_riff = (mOffset - 8);
+ header.data1 = (0x5E7F6D41);
+ header.data2 = (0xB115);
+ header.data3 = (0x11D0);
+ header.data4[0] = 0xBA;
+ header.data4[1] = 0x91;
+ header.data4[2] = 0x00;
+ header.data4[3] = 0x80;
+ header.data4[4] = 0x5F;
+ header.data4[5] = 0xB4;
+ header.data4[6] = 0xB9;
+ header.data4[7] = 0x7E;
+ header.ver = (0x0002);
+ memcpy(header.name, "Qcelp 13K", 9);
+ header.abps = (13000);
+ header.bytes_per_pkt = (35);
+ header.vr_num_of_rates = 5;
+ header.vr_bytes_per_pkt[0] = (0x0422);
+ header.vr_bytes_per_pkt[1] = (0x0310);
+ header.vr_bytes_per_pkt[2] = (0x0207);
+ header.vr_bytes_per_pkt[3] = (0x0103);
+ header.s_vrat = (0x00000008);
+ header.v_rate = (0x00000001);
+ header.size_in_pkts = (mOffset - sizeof( struct QCPEVRCHeader ))/ header.bytes_per_pkt;
+ header.s_data = mOffset - sizeof( struct QCPEVRCHeader );
+ fwrite( &header, 1, sizeof( struct QCPEVRCHeader ), mFile );
+ return OK;
+}
+
+status_t ExtendedWriter::writeEVRCHeader() {
+ /* Common part */
+ struct QCPEVRCHeader header = {
+ {'R', 'I', 'F', 'F'}, 0, {'Q', 'L', 'C', 'M'}, /* Riff */
+ {'f', 'm', 't', ' '}, 150, 1, 0, 0, 0, 0,{0}, 0, {0},0,0,160,8000,16,0,{0},{0},{0}, /* Fmt */
+ {'v','r','a','t'}, 0, 0, 0, /* Vrat */
+ {'d','a','t','a'},0 /* Data */
+ };
+
+ fseeko(mFile, 0, SEEK_SET);
+ header.s_riff = (mOffset - 8);
+ header.data1 = (0xe689d48d);
+ header.data2 = (0x9076);
+ header.data3 = (0x46b5);
+ header.data4[0] = 0x91;
+ header.data4[1] = 0xef;
+ header.data4[2] = 0x73;
+ header.data4[3] = 0x6a;
+ header.data4[4] = 0x51;
+ header.data4[5] = 0x00;
+ header.data4[6] = 0xce;
+ header.data4[7] = 0xb4;
+ header.ver = (0x0001);
+ memcpy(header.name, "TIA IS-127 Enhanced Variable Rate Codec, Speech Service Option 3", 64);
+ header.abps = (9600);
+ header.bytes_per_pkt = (23);
+ header.vr_num_of_rates = 4;
+ header.vr_bytes_per_pkt[0] = (0x0416);
+ header.vr_bytes_per_pkt[1] = (0x030a);
+ header.vr_bytes_per_pkt[2] = (0x0200);
+ header.vr_bytes_per_pkt[3] = (0x0102);
+ header.s_vrat = (0x00000008);
+ header.v_rate = (0x00000001);
+ header.size_in_pkts = (mOffset - sizeof( struct QCPEVRCHeader )) / header.bytes_per_pkt;
+ header.s_data = mOffset - sizeof( struct QCPEVRCHeader );
+ fwrite( &header, 1, sizeof( struct QCPEVRCHeader ), mFile );
+ return OK;
+}
+
+
+} // namespace android
diff --git a/media/libstagefright/LPAPlayerALSA.cpp b/media/libstagefright/LPAPlayerALSA.cpp
new file mode 100644
index 0000000..db3a022
--- /dev/null
+++ b/media/libstagefright/LPAPlayerALSA.cpp
@@ -0,0 +1,791 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDDEBUG 0
+#define LOG_NDEBUG 0
+#define LOG_TAG "LPAPlayerALSA"
+
+#include <utils/Log.h>
+#include <utils/threads.h>
+
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+
+#include <media/stagefright/LPAPlayer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <hardware_legacy/power.h>
+
+#include <linux/unistd.h>
+
+#include "include/AwesomePlayer.h"
+#include <powermanager/PowerManager.h>
+
+static const char mName[] = "LPAPlayer";
+
+#define MEM_BUFFER_SIZE 262144
+#define MEM_BUFFER_COUNT 4
+
+#define PCM_FORMAT 2
+#define NUM_FDS 2
+namespace android {
+int LPAPlayer::objectsAlive = 0;
+
+LPAPlayer::LPAPlayer(
+ const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *observer)
+:AudioPlayer(audioSink,observer),
+mPositionTimeMediaUs(-1),
+mPositionTimeRealUs(-1),
+mInternalSeeking(false),
+mStarted(false),
+mA2DPEnabled(false),
+mSampleRate(0),
+mLatencyUs(0),
+mFrameSize(0),
+mNumFramesPlayed(0),
+mNumFramesPlayedSysTimeUs(0),
+mInputBuffer(NULL),
+mSeeking(false),
+mReachedEOS(false),
+mReachedOutputEOS(false),
+mFinalStatus(OK),
+mSeekTimeUs(0),
+mPauseTime(0),
+mIsFirstBuffer(false),
+mFirstBufferResult(OK),
+mFirstBuffer(NULL),
+mAudioSink(audioSink),
+mObserver(observer) {
+ ALOGV("LPAPlayer::LPAPlayer() ctor");
+ objectsAlive++;
+ numChannels =0;
+ mPaused = false;
+ mIsA2DPEnabled = false;
+ mAudioFlinger = NULL;
+ AudioFlingerClient = NULL;
+ /* Initialize Suspend/Resume related variables */
+ mQueue.start();
+ mQueueStarted = true;
+ mPauseEvent = new TimedEvent(this, &LPAPlayer::onPauseTimeOut);
+ mPauseEventPending = false;
+ getAudioFlinger();
+ ALOGV("Registering client with AudioFlinger");
+ //mAudioFlinger->registerClient(AudioFlingerClient);
+
+ mIsAudioRouted = false;
+
+ initCheck = true;
+
+}
+
+LPAPlayer::~LPAPlayer() {
+ ALOGV("LPAPlayer::~LPAPlayer()");
+ if (mQueueStarted) {
+ mQueue.stop();
+ }
+
+ reset();
+
+ //mAudioFlinger->deregisterClient(AudioFlingerClient);
+ objectsAlive--;
+}
+
+void LPAPlayer::getAudioFlinger() {
+ Mutex::Autolock _l(AudioFlingerLock);
+
+ if ( mAudioFlinger.get() == 0 ) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_flinger"));
+ if ( binder != 0 )
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while ( true );
+ if ( AudioFlingerClient == NULL ) {
+ AudioFlingerClient = new AudioFlingerLPAdecodeClient(this);
+ }
+
+ binder->linkToDeath(AudioFlingerClient);
+ mAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ }
+ ALOGE_IF(mAudioFlinger==0, "no AudioFlinger!?");
+}
+
+LPAPlayer::AudioFlingerLPAdecodeClient::AudioFlingerLPAdecodeClient(void *obj)
+{
+ ALOGV("LPAPlayer::AudioFlingerLPAdecodeClient::AudioFlingerLPAdecodeClient");
+ pBaseClass = (LPAPlayer*)obj;
+}
+
+void LPAPlayer::AudioFlingerLPAdecodeClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(pBaseClass->AudioFlingerLock);
+
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}
+
+void LPAPlayer::AudioFlingerLPAdecodeClient::ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+ /*
+ if ( event != AudioSystem::A2DP_OUTPUT_STATE &&
+ event != AudioSystem::EFFECT_CONFIG_CHANGED) {
+ return;
+ }
+
+ switch ( event ) {
+ case AudioSystem::A2DP_OUTPUT_STATE:
+ {
+ ALOGV("ioConfigChanged() A2DP_OUTPUT_STATE iohandle is %d with A2DPEnabled in %d", ioHandle, pBaseClass->mIsA2DPEnabled);
+ if ( -1 == ioHandle ) {
+ if ( pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = false;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Disabled");
+ }
+ } else {
+ if ( !pBaseClass->mIsA2DPEnabled ) {
+
+ pBaseClass->mIsA2DPEnabled = true;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+
+ ALOGV("ioConfigChanged:: A2DP Enabled");
+ }
+ }
+ }
+ break;
+ }
+ ALOGV("ioConfigChanged Out");
+ */
+}
+
+void LPAPlayer::handleA2DPSwitch() {
+ //TODO: Implement
+}
+
+void LPAPlayer::setSource(const sp<MediaSource> &source) {
+ CHECK(mSource == NULL);
+ ALOGV("Setting source from LPA Player");
+ mSource = source;
+}
+
+status_t LPAPlayer::start(bool sourceAlreadyStarted) {
+ CHECK(!mStarted);
+ CHECK(mSource != NULL);
+
+ ALOGV("start: sourceAlreadyStarted %d", sourceAlreadyStarted);
+ //Check if the source is started, start it
+ status_t err;
+ if (!sourceAlreadyStarted) {
+ err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ //Create decoder and a2dp notification thread and initialize all the
+ //mutexes and coditional variables
+ createThreads();
+ ALOGV("All Threads Created.");
+
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ MediaSource::ReadOptions options;
+ if (mSeeking) {
+ options.setSeekTo(mSeekTimeUs);
+ mSeeking = false;
+ }
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ ALOGV("INFO_FORMAT_CHANGED!!!");
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+ CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+
+ success = format->findInt32(kKeySampleRate, &mSampleRate);
+ CHECK(success);
+
+ success = format->findInt32(kKeyChannelCount, &numChannels);
+ CHECK(success);
+
+ if(!format->findInt32(kKeyChannelMask, &mChannelMask)) {
+ // log only when there's a risk of ambiguity of channel mask selection
+ ALOGI_IF(numChannels > 2,
+ "source format didn't specify channel mask, using (%d) channel order", numChannels);
+ mChannelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_LPA |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ ALOGV("mAudiosink->open() mSampleRate %d, numChannels %d, mChannelMask %d, flags %d",mSampleRate, numChannels, mChannelMask, flags);
+ err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &LPAPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags));
+
+ if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ ALOGE("Opening a routing session failed");
+ return err;
+ }
+
+ mIsAudioRouted = true;
+ mStarted = true;
+ mAudioSink->start();
+ ALOGV("Waking up decoder thread");
+ pthread_cond_signal(&decoder_cv);
+
+ return OK;
+}
+
+status_t LPAPlayer::seekTo(int64_t time_us) {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("seekTo: time_us %lld", time_us);
+ if ( mReachedEOS ) {
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ }
+ mSeeking = true;
+ mSeekTimeUs = time_us;
+ mPauseTime = mSeekTimeUs;
+ ALOGV("In seekTo(), mSeekTimeUs %lld",mSeekTimeUs);
+ mAudioSink->flush();
+ pthread_cond_signal(&decoder_cv);
+ return OK;
+}
+
+void LPAPlayer::pause(bool playPendingSamples) {
+ CHECK(mStarted);
+ if (mPaused) {
+ return;
+ }
+ ALOGV("pause: playPendingSamples %d", playPendingSamples);
+ mPaused = true;
+ A2DPState state;
+ if (playPendingSamples) {
+ if (!mIsA2DPEnabled) {
+ if (!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, LPA_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_DISABLED);
+ }
+ else {
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_ENABLED);
+ }
+ if (mAudioSink.get() != NULL)
+ mAudioSink->pause();
+ } else {
+ if (!mIsA2DPEnabled) {
+ if(!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, LPA_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_DISABLED);
+ } else {
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_ENABLED);
+ }
+ if (mAudioSink.get() != NULL) {
+ ALOGV("AudioSink pause");
+ mAudioSink->pause();
+ }
+ }
+}
+
+void LPAPlayer::resume() {
+ ALOGV("resume: mPaused %d",mPaused);
+ Mutex::Autolock autoLock(mResumeLock);
+ if ( mPaused) {
+ CHECK(mStarted);
+ if (!mIsA2DPEnabled) {
+ if(mPauseEventPending) {
+ ALOGV("Resume(): Cancelling the puaseTimeout event");
+ mPauseEventPending = false;
+ mQueue.cancelEvent(mPauseEvent->eventID());
+ }
+
+ }
+
+ if (!mIsAudioRouted) {
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_LPA |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ status_t err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &LPAPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags ));
+ if (err != NO_ERROR) {
+ ALOGE("Audio sink open failed.");
+ }
+ mIsAudioRouted = true;
+ }
+ mPaused = false;
+ mAudioSink->start();
+ pthread_cond_signal(&decoder_cv);
+ }
+}
+
+//static
+size_t LPAPlayer::AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+ if (buffer == NULL && size == AudioTrack::EVENT_UNDERRUN) {
+ LPAPlayer *me = (LPAPlayer *)cookie;
+ me->mReachedEOS = true;
+ me->mReachedOutputEOS = true;
+ ALOGV("postAudioEOS");
+ me->mObserver->postAudioEOS(0);
+ }
+ return 1;
+}
+
+void LPAPlayer::reset() {
+
+ ALOGV("Reset");
+ // Close the audiosink after all the threads exited to make sure
+ mReachedEOS = true;
+
+ // make sure Decoder thread has exited
+ ALOGV("Closing all the threads");
+ requestAndWaitForDecoderThreadExit();
+ requestAndWaitForA2DPNotificationThreadExit();
+
+ ALOGV("Close the Sink");
+ if (mIsAudioRouted) {
+ mAudioSink->stop();
+ mAudioSink->close();
+ mAudioSink.clear();
+ }
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("AudioPlayer releasing input buffer.");
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mSource;
+ mSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+
+ mPositionTimeMediaUs = -1;
+ mPositionTimeRealUs = -1;
+ mSeeking = false;
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mFinalStatus = OK;
+ mStarted = false;
+}
+
+
+bool LPAPlayer::isSeeking() {
+ Mutex::Autolock autoLock(mLock);
+ return mSeeking;
+}
+
+bool LPAPlayer::reachedEOS(status_t *finalStatus) {
+ *finalStatus = OK;
+ Mutex::Autolock autoLock(mLock);
+ *finalStatus = mFinalStatus;
+ return mReachedOutputEOS;
+}
+
+
+void *LPAPlayer::decoderThreadWrapper(void *me) {
+ static_cast<LPAPlayer *>(me)->decoderThreadEntry();
+ return NULL;
+}
+
+
+void LPAPlayer::decoderThreadEntry() {
+
+ pthread_mutex_lock(&decoder_mutex);
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"LPA DecodeThread", 0, 0, 0);
+
+ ALOGV("decoderThreadEntry wait for signal \n");
+ if (!mStarted) {
+ pthread_cond_wait(&decoder_cv, &decoder_mutex);
+ }
+ ALOGV("decoderThreadEntry ready to work \n");
+ pthread_mutex_unlock(&decoder_mutex);
+ if (killDecoderThread) {
+ return;
+ }
+ void* local_buf = malloc(MEM_BUFFER_SIZE);
+ int bytesWritten = 0;
+ while (!killDecoderThread) {
+
+ if (mReachedEOS || mPaused || !mIsAudioRouted) {
+ pthread_mutex_lock(&decoder_mutex);
+ pthread_cond_wait(&decoder_cv, &decoder_mutex);
+ pthread_mutex_unlock(&decoder_mutex);
+ continue;
+ }
+
+ if (!mIsA2DPEnabled) {
+ ALOGV("FillBuffer: MemBuffer size %d", MEM_BUFFER_SIZE);
+ ALOGV("Fillbuffer started");
+ //TODO: Add memset
+ bytesWritten = fillBuffer(local_buf, MEM_BUFFER_SIZE);
+ ALOGV("FillBuffer completed bytesToWrite %d", bytesWritten);
+
+ if(!killDecoderThread) {
+ mAudioSink->write(local_buf, bytesWritten);
+ }
+ }
+ }
+
+ free(local_buf);
+
+ //TODO: Call fillbuffer with different size and write to mAudioSink()
+}
+
+void *LPAPlayer::A2DPNotificationThreadWrapper(void *me) {
+ static_cast<LPAPlayer *>(me)->A2DPNotificationThreadEntry();
+ return NULL;
+}
+
+void LPAPlayer::A2DPNotificationThreadEntry() {
+ while (1) {
+ pthread_mutex_lock(&a2dp_notification_mutex);
+ pthread_cond_wait(&a2dp_notification_cv, &a2dp_notification_mutex);
+ pthread_mutex_unlock(&a2dp_notification_mutex);
+ if (killA2DPNotificationThread) {
+ break;
+ }
+
+ ALOGV("A2DP notification has come mIsA2DPEnabled: %d", mIsA2DPEnabled);
+
+ if (mIsA2DPEnabled) {
+ //TODO:
+ }
+ else {
+ //TODO
+ }
+ }
+ a2dpNotificationThreadAlive = false;
+ ALOGV("A2DPNotificationThread is dying");
+
+}
+
+void LPAPlayer::createThreads() {
+
+ //Initialize all the Mutexes and Condition Variables
+ pthread_mutex_init(&decoder_mutex, NULL);
+ pthread_mutex_init(&a2dp_notification_mutex, NULL);
+ pthread_cond_init (&decoder_cv, NULL);
+ pthread_cond_init (&a2dp_notification_cv, NULL);
+
+ // Create 4 threads Effect, decoder, event and A2dp
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ killDecoderThread = false;
+ killA2DPNotificationThread = false;
+
+ decoderThreadAlive = true;
+ a2dpNotificationThreadAlive = true;
+
+ ALOGV("Creating decoder Thread");
+ pthread_create(&decoderThread, &attr, decoderThreadWrapper, this);
+
+ ALOGV("Creating A2DP Notification Thread");
+ pthread_create(&A2DPNotificationThread, &attr, A2DPNotificationThreadWrapper, this);
+
+ pthread_attr_destroy(&attr);
+}
+
+size_t LPAPlayer::fillBuffer(void *data, size_t size) {
+
+ if (mReachedEOS) {
+ return 0;
+ }
+
+ bool postSeekComplete = false;
+
+ size_t size_done = 0;
+ size_t size_remaining = size;
+ while (size_remaining > 0) {
+ MediaSource::ReadOptions options;
+
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSeeking) {
+ mInternalSeeking = false;
+ }
+ if (mSeeking || mInternalSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
+ options.setSeekTo(mSeekTimeUs);
+
+ if (mInputBuffer != NULL) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ size_remaining = size;
+ size_done = 0;
+
+ mSeeking = false;
+ if (mObserver && !mInternalSeeking) {
+ ALOGV("fillBuffer: Posting audio seek complete event");
+ postSeekComplete = true;
+ }
+ mInternalSeeking = false;
+ }
+ }
+
+ if (mInputBuffer == NULL) {
+ status_t err;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ err = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+ err = mSource->read(&mInputBuffer, &options);
+ }
+
+ CHECK((err == OK && mInputBuffer != NULL)
+ || (err != OK && mInputBuffer == NULL));
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (err != OK) {
+ mReachedEOS = true;
+ mFinalStatus = err;
+ break;
+ }
+
+ CHECK(mInputBuffer->meta_data()->findInt64(
+ kKeyTime, &mPositionTimeMediaUs));
+ mFrameSize = mAudioSink->frameSize();
+ mPositionTimeRealUs =
+ ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+ / mSampleRate;
+ }
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+
+ continue;
+ }
+
+ size_t copy = size_remaining;
+ if (copy > mInputBuffer->range_length()) {
+ copy = mInputBuffer->range_length();
+ }
+
+ memcpy((char *)data + size_done,
+ (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ copy);
+
+ mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
+ mInputBuffer->range_length() - copy);
+
+ size_done += copy;
+ size_remaining -= copy;
+ }
+
+ if (postSeekComplete) {
+ mObserver->postAudioSeekComplete();
+ }
+
+ return size_done;
+}
+
+int64_t LPAPlayer::getRealTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ return getRealTimeUsLocked();
+}
+
+
+int64_t LPAPlayer::getRealTimeUsLocked(){
+ //Used for AV sync: irrelevant API for LPA.
+ return 0;
+}
+
+int64_t LPAPlayer::getTimeStamp(A2DPState state) {
+ uint64_t timestamp = 0;
+ switch (state) {
+ case A2DP_ENABLED:
+ case A2DP_DISCONNECT:
+ ALOGV("Get timestamp for A2DP");
+ break;
+ case A2DP_DISABLED:
+ case A2DP_CONNECT: {
+ mAudioSink->getTimeStamp(&timestamp);
+ break;
+ }
+ default:
+ break;
+ }
+ ALOGV("timestamp %lld ", timestamp);
+ return timestamp;
+}
+
+int64_t LPAPlayer::getMediaTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("getMediaTimeUs() mPaused %d mSeekTimeUs %lld mPauseTime %lld", mPaused, mSeekTimeUs, mPauseTime);
+ if (mPaused) {
+ return mPauseTime;
+ } else {
+ A2DPState state = mIsA2DPEnabled ? A2DP_ENABLED : A2DP_DISABLED;
+ return (mSeekTimeUs + getTimeStamp(state));
+ }
+}
+
+bool LPAPlayer::getMediaTimeMapping(
+ int64_t *realtime_us, int64_t *mediatime_us) {
+ Mutex::Autolock autoLock(mLock);
+
+ *realtime_us = mPositionTimeRealUs;
+ *mediatime_us = mPositionTimeMediaUs;
+
+ return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
+}
+
+void LPAPlayer::requestAndWaitForDecoderThreadExit() {
+
+ if (!decoderThreadAlive)
+ return;
+ killDecoderThread = true;
+
+ /* Flush the audio sink to unblock the decoder thread
+ if any write to audio HAL is blocked */
+ if (!mReachedOutputEOS && mIsAudioRouted)
+ mAudioSink->flush();
+
+ pthread_cond_signal(&decoder_cv);
+ pthread_join(decoderThread,NULL);
+ ALOGV("decoder thread killed");
+
+}
+
+void LPAPlayer::requestAndWaitForA2DPNotificationThreadExit() {
+ if (!a2dpNotificationThreadAlive)
+ return;
+ killA2DPNotificationThread = true;
+ pthread_cond_signal(&a2dp_notification_cv);
+ pthread_join(A2DPNotificationThread,NULL);
+ ALOGV("a2dp notification thread killed");
+}
+
+void LPAPlayer::onPauseTimeOut() {
+ ALOGV("onPauseTimeOut");
+ Mutex::Autolock autoLock(mResumeLock);
+ if (!mPauseEventPending) {
+ return;
+ }
+ mPauseEventPending = false;
+ if(!mIsA2DPEnabled) {
+ // 1.) Set seek flags
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ if(mSeeking == false) {
+ mSeekTimeUs += getTimeStamp(A2DP_DISABLED);
+ mInternalSeeking = true;
+ } else {
+ //do not update seek time if user has already seeked
+ // to a new position
+ // also seek has to be posted back to player,
+ // so do not set mInternalSeeking flag
+ ALOGV("do not update seek time %lld ", mSeekTimeUs);
+ }
+ ALOGV("newseek time = %lld ", mSeekTimeUs);
+ // 2.) Close routing Session
+ mAudioSink->close();
+ mIsAudioRouted = false;
+ }
+
+}
+
+} //namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index dc8e4a3..62ba826 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -40,6 +40,9 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <utils/String8.h>
+#ifdef QCOM_HARDWARE
+#include <QCMediaDefs.h>
+#endif
namespace android {
@@ -257,6 +260,28 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('a', 'v', 'c', '1'):
return MEDIA_MIMETYPE_VIDEO_AVC;
+#ifdef QCOM_HARDWARE
+ case FOURCC('s', 'q', 'c', 'p'):
+ return MEDIA_MIMETYPE_AUDIO_QCELP;
+
+ case FOURCC('s', 'e', 'v', 'c'):
+ return MEDIA_MIMETYPE_AUDIO_EVRC;
+
+ case FOURCC('d', 't', 's', 'c'):
+ case FOURCC('d', 't', 's', 'h'):
+ case FOURCC('d', 't', 's', 'l'):
+ return MEDIA_MIMETYPE_AUDIO_DTS;
+
+ case FOURCC('d', 't', 's', 'e'):
+ return MEDIA_MIMETYPE_AUDIO_DTS_LBR;
+
+ case FOURCC('a', 'c', '-', '3'):
+ return MEDIA_MIMETYPE_AUDIO_AC3;
+
+ case FOURCC('e', 'c', '-', '3'):
+ return MEDIA_MIMETYPE_AUDIO_EAC3;
+#endif
+
default:
CHECK(!"should not be here.");
return NULL;
@@ -921,6 +946,17 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('m', 'p', '4', 'a'):
case FOURCC('s', 'a', 'm', 'r'):
case FOURCC('s', 'a', 'w', 'b'):
+#ifdef QCOM_HARDWARE
+ case FOURCC('.', 'm', 'p', '3'):
+ case FOURCC('s', 'e', 'v', 'c'):
+ case FOURCC('s', 'q', 'c', 'p'):
+ case FOURCC('d', 't', 's', 'c'):
+ case FOURCC('d', 't', 's', 'h'):
+ case FOURCC('d', 't', 's', 'l'):
+ case FOURCC('d', 't', 's', 'e'):
+ case FOURCC('a', 'c', '-', '3'):
+ case FOURCC('e', 'c', '-', '3'):
+#endif
{
uint8_t buffer[8 + 20];
if (chunk_data_size < (ssize_t)sizeof(buffer)) {
@@ -961,7 +997,16 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + sizeof(buffer);
+#ifdef QCOM_HARDWARE
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG, FourCC2MIME(chunk_type)) ||
+ !strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(chunk_type))) {
+ // ESD is not required in mp3
+ // amr wb with damr atom corrupted can cause the clip to not play
+ *offset = stop_offset;
+ } else
+#endif
+ *offset = data_offset + sizeof(buffer);
+
while (*offset < stop_offset) {
status_t err = parseChunk(offset, depth + 1);
if (err != OK) {
@@ -1219,6 +1264,18 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
break;
}
+#ifdef QCOM_HARDWARE
+ case FOURCC('d', 'd', 't', 's'):
+ case FOURCC('d', 'a', 'c', '3'):
+ case FOURCC('d', 'e', 'c', '3'):
+ {
+ //no information need to be passed here, just log and end
+ ALOGV("ddts/dac3/dec3 pass from mpeg4 extractor");
+ *offset += chunk_size;
+ break;
+ }
+#endif
+
case FOURCC('a', 'v', 'c', 'C'):
{
char buffer[256];
@@ -1799,6 +1856,14 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
return ERROR_MALFORMED;
}
+#ifdef QCOM_HARDWARE
+ if (objectTypeIndication == 0xA0) {
+ // This isn't MPEG4 audio at all, it's EVRC
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EVRC);
+ return OK;
+ }
+#endif
+
if (objectTypeIndication == 0xe1) {
// This isn't MPEG4 audio at all, it's QCELP 14k...
mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
@@ -2313,6 +2378,9 @@ static bool LegacySniffMPEG4(
}
if (!memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
+#ifdef QCOM_HARDWARE
+ || !memcmp(header, "ftyp3g2a", 8) || !memcmp(header, "ftyp3g2b", 8) || !memcmp(header, "ftyp3g2c", 8)
+#endif
|| !memcmp(header, "ftyp3gr6", 8) || !memcmp(header, "ftyp3gs6", 8)
|| !memcmp(header, "ftyp3ge6", 8) || !memcmp(header, "ftyp3gg6", 8)
|| !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index b18c916..cca49fe 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -30,6 +30,9 @@
#include "include/WVMExtractor.h"
#include "include/FLACExtractor.h"
#include "include/AACExtractor.h"
+#ifdef QCOM_HARDWARE
+#include "include/ExtendedExtractor.h"
+#endif
#include "matroska/MatroskaExtractor.h"
@@ -132,6 +135,14 @@ sp<MediaExtractor> MediaExtractor::Create(
}
}
+#ifdef QCOM_HARDWARE
+ if(ret == NULL) {
+ //Create Extended Extractor only if default extractor are not selected
+ ALOGV("Using ExtendedExtractor\n");
+ ret = ExtendedExtractor::CreateExtractor(source, mime);
+ }
+#endif
+
return ret;
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 560c1bb..4e6c395 100755..100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1,5 +1,9 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,6 +46,12 @@
#include <OMX_Audio.h>
#include <OMX_Component.h>
+#ifdef QCOM_HARDWARE
+#include <QCMediaDefs.h>
+#include <QCMetaData.h>
+#include <QOMX_AudioExtensions.h>
+#include <OMX_QCOMExtns.h>
+#endif
#include "include/avc_utils.h"
#ifdef USE_SAMSUNG_COLORFORMAT
@@ -271,6 +281,26 @@ uint32_t OMXCodec::getComponentQuirks(
index, "output-buffers-are-unreadable")) {
quirks |= kOutputBuffersAreUnreadable;
}
+#ifdef QCOM_HARDWARE
+ if (list->codecHasQuirk(
+ index, "requies-loaded-to-idle-after-allocation")) {
+ quirks |= kRequiresLoadedToIdleAfterAllocation;
+ }
+ if (list->codecHasQuirk(
+ index, "requires-global-flush")) {
+ quirks |= kRequiresGlobalFlush;
+ }
+ if (list->codecHasQuirk(
+ index, "requires-wma-pro-component")) {
+ quirks |= kRequiresWMAProComponent;
+ }
+ if (list->codecHasQuirk(
+ index, "defers-output-buffer-allocation")) {
+ quirks |= kDefersOutputBufferAllocation;
+ }
+
+ quirks |= QCOMXCodec::getQCComponentQuirks(list,index);
+#endif
return quirks;
}
@@ -352,7 +382,28 @@ sp<MediaSource> OMXCodec::Create(
return softwareCodec;
}
}
-
+#ifdef QCOM_HARDWARE
+ //quirks = getComponentQuirks(componentNameBase, createEncoder);
+ if(quirks & kRequiresWMAProComponent)
+ {
+ int32_t version;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ if(version==kTypeWMA)
+ {
+ componentName = "OMX.qcom.audio.decoder.wma";
+ }
+ else if(version==kTypeWMAPro)
+ {
+ componentName= "OMX.qcom.audio.decoder.wma10Pro";
+ }
+ else if(version==kTypeWMALossLess)
+ {
+ componentName= "OMX.qcom.audio.decoder.wmaLossLess";
+ }
+ }
+
+ QCOMXCodec::setASFQuirks(quirks, meta, componentName);
+#endif
ALOGV("Attempting to allocate OMX node '%s'", componentName);
if (!createEncoder
@@ -515,9 +566,25 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
addCodecSpecificData(data, size);
+#ifdef QCOM_HARDWARE
+ } else if (meta->findData(kKeyRawCodecSpecificData, &type, &data, &size)) {
+ ALOGV("OMXCodec::configureCodec found kKeyRawCodecSpecificData of size %d\n", size);
+ addCodecSpecificData(data, size);
+ } else {
+ QCOMXCodec::checkAndAddRawFormat(this,meta);
+#endif
}
}
+#ifdef QCOM_HARDWARE
+ status_t errRetVal = QCOMXCodec::configureDIVXCodec( meta, mMIME, mOMX, mNode,
+ (OMXCodec::mIsEncoder ?
+ kPortIndexOutput : kPortIndexInput));
+ if(OK != errRetVal) {
+ return errRetVal;
+ }
+#endif
+
int32_t bitRate = 0;
if (mIsEncoder) {
CHECK(meta->findInt32(kKeyBitRate, &bitRate));
@@ -545,6 +612,42 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CODEC_LOGE("setAACFormat() failed (err = %d)", err);
return err;
}
+
+#ifdef QCOM_HARDWARE
+ uint32_t type;
+ const void *data;
+ size_t size;
+
+ if (meta->findData(kKeyAacCodecSpecificData, &type, &data, &size)) {
+ ALOGV("OMXCodec:: configureCodec found kKeyAacCodecSpecificData of size %d\n", size);
+ addCodecSpecificData(data, size);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setAC3Format(numChannels, sampleRate);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EAC3, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setAC3Format(numChannels, sampleRate); //since AC3 and EAC3 use same format at present
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EVRC, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setEVRCFormat(numChannels, sampleRate, bitRate);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_QCELP, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setQCELPFormat(numChannels, sampleRate, bitRate);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_WMA, mMIME)) {
+ status_t err = setWMAFormat(meta);
+ if(err!=OK){
+ return err;
+ }
+#endif
} else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME)
|| !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) {
// These are PCM-like formats with a fixed sample rate but
@@ -562,10 +665,37 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+#ifdef QCOM_HARDWARE
+ } else {
+ status_t err = QCOMXCodec::setQCFormat(meta, mMIME, mOMX, mNode, this, mIsEncoder);
+
+ if(OK != err) {
+ return err;
+ }
+#endif
}
if (!strncasecmp(mMIME, "video/", 6)) {
+#ifdef QCOM_HARDWARE
+ if ((mFlags & kClientNeedsFramebuffer) && !strncmp(mComponentName, "OMX.qcom.", 9)) {
+ ALOGV("Enabling thumbnail mode.");
+ QOMX_ENABLETYPE enableType;
+ OMX_INDEXTYPE indexType;
+
+ status_t err = mOMX->getExtensionIndex(
+ mNode, OMX_QCOM_INDEX_PARAM_VIDEO_SYNCFRAMEDECODINGMODE, &indexType);
+
+ CHECK_EQ(err, (status_t)OK);
+
+ enableType.bEnable = OMX_TRUE;
+ err = mOMX->setParameter(
+ mNode, indexType, &enableType, sizeof(enableType));
+ CHECK_EQ(err, (status_t)OK);
+
+ ALOGV("Thumbnail mode enabled.");
+ }
+#endif
if (mIsEncoder) {
setVideoInputFormat(mMIME, meta);
} else {
@@ -833,8 +963,15 @@ void OMXCodec::setVideoInputFormat(
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
compressionFormat = OMX_VIDEO_CodingH263;
} else {
- ALOGE("Not a supported video mime type: %s", mime);
- CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ status_t err = QCOMXCodec::setQCVideoInputFormat(mime, &compressionFormat);
+ if(err != OK) {
+#endif
+ ALOGE("Not a supported video mime type: %s", mime);
+ CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ }
+#endif
}
OMX_COLOR_FORMATTYPE colorFormat;
@@ -1230,8 +1367,16 @@ status_t OMXCodec::setVideoOutputFormat(
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG2;
} else {
- ALOGE("Not a supported video mime type: %s", mime);
- CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ status_t err = QCOMXCodec::setQCVideoOutputFormat(mime,&compressionFormat);
+
+ if(err != OK) {
+#endif
+ ALOGE("Not a supported video mime type: %s", mime);
+ CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ }
+#endif
}
status_t err = setVideoPortFormatType(
@@ -1389,6 +1534,9 @@ OMXCodec::OMXCodec(
mState(LOADED),
mInitialBufferSubmit(true),
mSignalledEOS(false),
+#ifdef QCOM_HARDWARE
+ mFinalStatus(OK),
+#endif
mNoMoreOutputData(false),
mOutputPortSettingsHaveChanged(false),
mSeekTimeUs(-1),
@@ -1437,6 +1585,12 @@ void OMXCodec::setComponentRole(
"audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW,
"audio_decoder.g711alaw", "audio_encoder.g711alaw" },
+#ifdef QCOM_HARDWARE
+ { MEDIA_MIMETYPE_AUDIO_EVRC,
+ "audio_decoder.evrchw", "audio_encoder.evrc" },
+ { MEDIA_MIMETYPE_AUDIO_QCELP,
+ "audio_decoder,qcelp13Hw", "audio_encoder.qcelp13" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_AVC,
"video_decoder.avc", "video_encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4,
@@ -1449,6 +1603,16 @@ void OMXCodec::setComponentRole(
"audio_decoder.raw", "audio_encoder.raw" },
{ MEDIA_MIMETYPE_AUDIO_FLAC,
"audio_decoder.flac", "audio_encoder.flac" },
+#ifdef QCOM_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_DIVX,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", NULL },
+ { MEDIA_MIMETYPE_AUDIO_EAC3,
+ "audio_decoder.eac3", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX311,
+ "video_decoder.divx", NULL },
+#endif
};
static const size_t kNumMimeToRole =
@@ -1462,6 +1626,9 @@ void OMXCodec::setComponentRole(
}
if (i == kNumMimeToRole) {
+#ifdef QCOM_HARDWARE
+ QCOMXCodec::checkQCRole(omx, node, isEncoder, mime);
+#endif
return;
}
@@ -2627,11 +2794,22 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
CODEC_LOGV("FLUSH_DONE(%ld)", portIndex);
- CHECK_EQ((int)mPortStatus[portIndex], (int)SHUTTING_DOWN);
- mPortStatus[portIndex] = ENABLED;
+#ifdef QCOM_HARDWARE
+ if (portIndex == (OMX_U32) -1) {
+ CHECK_EQ((int)mPortStatus[kPortIndexInput], (int)SHUTTING_DOWN);
+ mPortStatus[kPortIndexInput] = ENABLED;
+ CHECK_EQ((int)mPortStatus[kPortIndexOutput], (int)SHUTTING_DOWN);
+ mPortStatus[kPortIndexOutput] = ENABLED;
+ } else {
+#endif
+ CHECK_EQ((int)mPortStatus[portIndex], (int)SHUTTING_DOWN);
+ mPortStatus[portIndex] = ENABLED;
- CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
- mPortBuffers[portIndex].size());
+ CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
+ mPortBuffers[portIndex].size());
+#ifdef QCOM_HARDWARE
+ }
+#endif
if (mSkipCutBuffer != NULL && mPortStatus[kPortIndexOutput] == ENABLED) {
mSkipCutBuffer->clear();
@@ -2896,21 +3074,30 @@ bool OMXCodec::flushPortAsync(OMX_U32 portIndex) {
CHECK(mState == EXECUTING || mState == RECONFIGURING
|| mState == EXECUTING_TO_IDLE);
- CODEC_LOGV("flushPortAsync(%ld): we own %d out of %d buffers already.",
- portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
- mPortBuffers[portIndex].size());
+#ifdef QCOM_HARDWARE
+ if (portIndex == (OMX_U32) -1 ) {
+ mPortStatus[kPortIndexInput] = SHUTTING_DOWN;
+ mPortStatus[kPortIndexOutput] = SHUTTING_DOWN;
+ } else {
+#endif
+ CODEC_LOGV("flushPortAsync(%ld): we own %d out of %d buffers already.",
+ portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
+ mPortBuffers[portIndex].size());
- CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
- mPortStatus[portIndex] = SHUTTING_DOWN;
+ CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
+ mPortStatus[portIndex] = SHUTTING_DOWN;
- if ((mQuirks & kRequiresFlushCompleteEmulation)
- && countBuffersWeOwn(mPortBuffers[portIndex])
- == mPortBuffers[portIndex].size()) {
- // No flush is necessary and this component fails to send a
- // flush-complete event in this case.
+ if ((mQuirks & kRequiresFlushCompleteEmulation)
+ && countBuffersWeOwn(mPortBuffers[portIndex])
+ == mPortBuffers[portIndex].size()) {
+ // No flush is necessary and this component fails to send a
+ // flush-complete event in this case.
- return false;
+ return false;
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
status_t err =
mOMX->sendCommand(mNode, OMX_CommandFlush, portIndex);
@@ -2950,16 +3137,27 @@ void OMXCodec::fillOutputBuffers() {
// end-of-output-stream. If we own all input buffers and also own
// all output buffers and we already signalled end-of-input-stream,
// the end-of-output-stream is implied.
- if (mSignalledEOS
+
+#ifdef QCOM_HARDWARE
+ // NOTE: Thumbnail mode needs a call to fillOutputBuffer in order
+ // to get the decoded frame from the component. Currently,
+ // thumbnail mode calls emptyBuffer with an EOS flag on its first
+ // frame and sets mSignalledEOS to true, so without the check for
+ // !mThumbnailMode, fillOutputBuffer will never be called.
+ if (!((mFlags & kClientNeedsFramebuffer) && !strncmp(mComponentName, "OMX.qcom.", 9))){
+#endif
+ if (mSignalledEOS
&& countBuffersWeOwn(mPortBuffers[kPortIndexInput])
== mPortBuffers[kPortIndexInput].size()
&& countBuffersWeOwn(mPortBuffers[kPortIndexOutput])
== mPortBuffers[kPortIndexOutput].size()) {
- mNoMoreOutputData = true;
- mBufferFilled.signal();
-
- return;
+ mNoMoreOutputData = true;
+ mBufferFilled.signal();
+ return;
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
for (size_t i = 0; i < buffers->size(); ++i) {
@@ -3283,6 +3481,20 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {
if (signalEOS) {
flags |= OMX_BUFFERFLAG_EOS;
+#ifdef QCOM_HARDWARE
+ } else if ((mFlags & kClientNeedsFramebuffer) && !strncmp(mComponentName, "OMX.qcom.", 9)) {
+ // Because we don't get an EOS after getting the first frame, we
+ // need to notify the component with OMX_BUFFERFLAG_EOS, set
+ // mNoMoreOutputData to false so fillOutputBuffer gets called on
+ // the first output buffer (see comment in fillOutputBuffer), and
+ // mSignalledEOS must be true so drainInputBuffer is not executed
+ // on extra frames. Setting mFinalStatus to ERROR_END_OF_STREAM as
+ // we dont want to return OK and NULL buffer in read.
+ flags |= OMX_BUFFERFLAG_EOS;
+ mNoMoreOutputData = false;
+ mSignalledEOS = true;
+ mFinalStatus = ERROR_END_OF_STREAM;
+#endif
} else {
mNoMoreOutputData = false;
}
@@ -3381,6 +3593,14 @@ status_t OMXCodec::waitForBufferFilled_l() {
return mBufferFilled.wait(mLock);
}
status_t err = mBufferFilled.waitRelative(mLock, kBufferFilledEventTimeOutNs);
+#ifdef QCOM_HARDWARE
+ if ((err == -ETIMEDOUT) && (mPaused == true)){
+ // When the audio playback is paused, the fill buffer maybe timed out
+ // if input data is not available to decode. Hence, considering the
+ // timed out as a valid case.
+ err = OK;
+ }
+#endif
if (err != OK) {
CODEC_LOGE("Timed out waiting for output buffers: %d/%d",
countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
@@ -3605,6 +3825,182 @@ status_t OMXCodec::setAACFormat(
return OK;
}
+#ifdef QCOM_HARDWARE
+void OMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate) {
+
+ QOMX_AUDIO_PARAM_AC3TYPE profileAC3;
+ QOMX_AUDIO_PARAM_AC3PP profileAC3PP;
+ OMX_INDEXTYPE indexTypeAC3;
+ OMX_INDEXTYPE indexTypeAC3PP;
+ OMX_PARAM_PORTDEFINITIONTYPE portParam;
+
+ //configure input port
+ CODEC_LOGV("setAC3Format samplerate %d, numChannels %d", sampleRate, numChannels);
+ InitOMXParams(&portParam);
+ portParam.nPortIndex = 0;
+ status_t err = mOMX->getParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ //configure output port
+ portParam.nPortIndex = 1;
+ err = mOMX->getParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mOMX->getExtensionIndex(mNode, OMX_QCOM_INDEX_PARAM_AC3TYPE, &indexTypeAC3);
+
+ InitOMXParams(&profileAC3);
+ profileAC3.nPortIndex = kPortIndexInput;
+ err = mOMX->getParameter(mNode, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ profileAC3.nSamplingRate = sampleRate;
+ profileAC3.nChannels = 2;
+ profileAC3.eChannelConfig = OMX_AUDIO_AC3_CHANNEL_CONFIG_2_0;
+
+ CODEC_LOGE("numChannels = %d, profileAC3.nChannels = %d", numChannels, profileAC3.nChannels);
+
+ err = mOMX->setParameter(mNode, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ //for output port
+ OMX_AUDIO_PARAM_PCMMODETYPE profilePcm;
+ InitOMXParams(&profilePcm);
+ profilePcm.nPortIndex = kPortIndexOutput;
+ err = mOMX->getParameter(mNode, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+
+ profilePcm.nSamplingRate = sampleRate;
+ err = mOMX->setParameter(mNode, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+ mOMX->getExtensionIndex(mNode, OMX_QCOM_INDEX_PARAM_AC3PP, &indexTypeAC3PP);
+
+ InitOMXParams(&profileAC3PP);
+ profileAC3PP.nPortIndex = kPortIndexInput;
+ err = mOMX->getParameter(mNode, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+
+ int i;
+ int channel_routing[6];
+
+ for (i=0; i<6; i++) {
+ channel_routing[i] = -1;
+ }
+ for (i=0; i<6; i++) {
+ profileAC3PP.eChannelRouting[i] = (OMX_AUDIO_AC3_CHANNEL_ROUTING)channel_routing[i];
+ }
+
+ profileAC3PP.eChannelRouting[0] = OMX_AUDIO_AC3_CHANNEL_LEFT;
+ profileAC3PP.eChannelRouting[1] = OMX_AUDIO_AC3_CHANNEL_RIGHT;
+ err = mOMX->setParameter(mNode, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+
+}
+
+
+status_t OMXCodec::setWMAFormat(const sp<MetaData> &meta)
+{
+ if (mIsEncoder) {
+ CODEC_LOGE("WMA encoding not supported");
+ return OK;
+ } else {
+ int32_t version;
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+ QOMX_AUDIO_PARAM_WMA10PROTYPE paramWMA10;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ int32_t numChannels;
+ int32_t bitRate;
+ int32_t sampleRate;
+ int32_t encodeOptions;
+ int32_t blockAlign;
+ int32_t bitspersample;
+ int32_t formattag;
+ int32_t advencopt1;
+ int32_t advencopt2;
+ int32_t VirtualPktSize;
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ CHECK(meta->findInt32(kKeyWMABitspersample, &bitspersample));
+ CHECK(meta->findInt32(kKeyWMAFormatTag, &formattag));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt1,&advencopt1));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt2,&advencopt2));
+ CHECK(meta->findInt32(kKeyWMAVirPktSize,&VirtualPktSize));
+ }
+ if(version==kTypeWMA) {
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = kPortIndexInput;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ InitOMXParams(&paramWMA10);
+ paramWMA10.nPortIndex = kPortIndexInput;
+ }
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK(meta->findInt32(kKeyBitRate, &bitRate));
+ CHECK(meta->findInt32(kKeyWMAEncodeOpt, &encodeOptions));
+ CHECK(meta->findInt32(kKeyWMABlockAlign, &blockAlign));
+ CODEC_LOGV("Channels: %d, SampleRate: %d, BitRate; %d"
+ "EncodeOptions: %d, blockAlign: %d", numChannels,
+ sampleRate, bitRate, encodeOptions, blockAlign);
+ if(sampleRate>48000 || numChannels>2)
+ {
+ ALOGE("Unsupported samplerate/channels");
+ return ERROR_UNSUPPORTED;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess)
+ {
+ CODEC_LOGV("Bitspersample: %d, wmaformattag: %d,"
+ "advencopt1: %d, advencopt2: %d VirtualPktSize %d", bitspersample,
+ formattag, advencopt1, advencopt2, VirtualPktSize);
+ }
+ status_t err = OK;
+ OMX_INDEXTYPE index;
+ if(version==kTypeWMA) {
+ err = mOMX->getParameter(
+ mNode, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ mOMX->getExtensionIndex(mNode,"OMX.Qualcomm.index.audio.wma10Pro",&index);
+ err = mOMX->getParameter(
+ mNode, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ CHECK_EQ(err, (status_t)OK);
+ if(version==kTypeWMA) {
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nEncodeOptions = encodeOptions;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.nChannels = numChannels;
+ paramWMA10.nSamplingRate = sampleRate;
+ paramWMA10.nEncodeOptions = encodeOptions;
+ paramWMA10.nBitRate = bitRate;
+ paramWMA10.nBlockAlign = blockAlign;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.advancedEncodeOpt = advencopt1;
+ paramWMA10.advancedEncodeOpt2 = advencopt2;
+ paramWMA10.formatTag = formattag;
+ paramWMA10.validBitsPerSample = bitspersample;
+ paramWMA10.nVirtualPktSize = VirtualPktSize;
+ }
+ if(version==kTypeWMA) {
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ err = mOMX->setParameter(
+ mNode, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ return err;
+ }
+}
+#endif
+
void OMXCodec::setG711Format(int32_t numChannels) {
CHECK(!mIsEncoder);
setRawAudioFormat(kPortIndexInput, 8000, numChannels);
@@ -3846,19 +4242,32 @@ status_t OMXCodec::stopOmxComponent_l() {
CODEC_LOGV("This component requires a flush before transitioning "
"from EXECUTING to IDLE...");
- bool emulateInputFlushCompletion =
- !flushPortAsync(kPortIndexInput);
+#ifdef QCOM_HARDWARE
+ //DSP supports flushing of ports simultaneously.
+ //Flushing individual port is not supported.
+ if(mQuirks & kRequiresGlobalFlush) {
+ bool emulateFlushCompletion = !flushPortAsync(kPortIndexBoth);
+ if (emulateFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexBoth);
+ }
+ } else {
+#endif
+ bool emulateInputFlushCompletion =
+ !flushPortAsync(kPortIndexInput);
- bool emulateOutputFlushCompletion =
- !flushPortAsync(kPortIndexOutput);
+ bool emulateOutputFlushCompletion =
+ !flushPortAsync(kPortIndexOutput);
- if (emulateInputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexInput);
- }
+ if (emulateInputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexInput);
+ }
- if (emulateOutputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ if (emulateOutputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
} else {
mPortStatus[kPortIndexInput] = SHUTTING_DOWN;
mPortStatus[kPortIndexOutput] = SHUTTING_DOWN;
@@ -3966,16 +4375,39 @@ status_t OMXCodec::read(
CHECK_EQ((int)mState, (int)EXECUTING);
- bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
- bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
+#ifdef QCOM_HARDWARE
+ //DSP supports flushing of ports simultaneously. Flushing individual port is not supported.
- if (emulateInputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexInput);
- }
+ if(mQuirks & kRequiresGlobalFlush) {
+ bool emulateFlushCompletion = !flushPortAsync(kPortIndexBoth);
+ if (emulateFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexBoth);
+ }
+ } else {
+
+ //DSP supports flushing of ports simultaneously.
+ //Flushing individual port is not supported.
+ if(mQuirks & kRequiresGlobalFlush) {
+ bool emulateFlushCompletion = !flushPortAsync(kPortIndexBoth);
+ if (emulateFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexBoth);
+ }
+ } else {
+#endif
+ bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
+ bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
- if (emulateOutputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ if (emulateInputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexInput);
+ }
+
+ if (emulateOutputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ }
+#ifdef QCOM_HARDWARE
+ }
}
+#endif
while (mSeekTimeUs >= 0) {
if ((err = waitForBufferFilled_l()) != OK) {
@@ -4553,9 +4985,46 @@ void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
mOutputFormat->setInt32(kKeyChannelCount, numChannels);
mOutputFormat->setInt32(kKeySampleRate, sampleRate);
mOutputFormat->setInt32(kKeyBitRate, bitRate);
+#ifdef QCOM_HARDWARE
+ } else if (audio_def->eEncoding == OMX_AUDIO_CodingQCELP13 ) {
+ mOutputFormat->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
+ int32_t numChannels, sampleRate, bitRate;
+ inputFormat->findInt32(kKeyChannelCount, &numChannels);
+ inputFormat->findInt32(kKeySampleRate, &sampleRate);
+ inputFormat->findInt32(kKeyBitRate, &bitRate);
+ mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+ mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+ mOutputFormat->setInt32(kKeyBitRate, bitRate);
+ } else if (audio_def->eEncoding == OMX_AUDIO_CodingEVRC ) {
+ mOutputFormat->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EVRC);
+ int32_t numChannels, sampleRate, bitRate;
+ inputFormat->findInt32(kKeyChannelCount, &numChannels);
+ inputFormat->findInt32(kKeySampleRate, &sampleRate);
+ inputFormat->findInt32(kKeyBitRate, &bitRate);
+ mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+ mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+ mOutputFormat->setInt32(kKeyBitRate, bitRate);
} else {
- CHECK(!"Should not be here. Unknown audio encoding.");
+ AString mimeType;
+ if(OK == QCOMXCodec::checkQCFormats(audio_def->eEncoding, &mimeType)) {
+ mOutputFormat->setCString(
+ kKeyMIMEType, mimeType.c_str());
+ int32_t numChannels, sampleRate, bitRate;
+ inputFormat->findInt32(kKeyChannelCount, &numChannels);
+ inputFormat->findInt32(kKeySampleRate, &sampleRate);
+ inputFormat->findInt32(kKeyBitRate, &bitRate);
+ mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+ mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+ mOutputFormat->setInt32(kKeyBitRate, bitRate);
+#endif
+ } else {
+ CHECK(!"Should not be here. Unknown audio encoding.");
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
break;
}
@@ -4756,6 +5225,108 @@ status_t QueryCodecs(
return QueryCodecs(omx, mimeType, queryDecoders, false /*hwCodecOnly*/, results);
}
+#ifdef QCOM_HARDWARE
+void OMXCodec::setEVRCFormat(int32_t numChannels, int32_t sampleRate, int32_t bitRate) {
+ if (mIsEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingEVRC) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingEVRC;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_EVRCTYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = kPortIndexOutput;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else{
+ ALOGI("EVRC decoder \n");
+ }
+}
+
+void OMXCodec::setQCELPFormat(int32_t numChannels, int32_t sampleRate, int32_t bitRate) {
+ if (mIsEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingQCELP13) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingQCELP13;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_QCELP13TYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = kPortIndexOutput;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else{
+ ALOGI("QCELP decoder \n");
+ }
+}
+#endif
+
// These are supposed be equivalent to the logic in
// "audio_channel_out_mask_from_count".
status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]) {
diff --git a/media/libstagefright/QCMediaDefs.cpp b/media/libstagefright/QCMediaDefs.cpp
new file mode 100644
index 0000000..ec2d04e
--- /dev/null
+++ b/media/libstagefright/QCMediaDefs.cpp
@@ -0,0 +1,55 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <QCMediaDefs.h>
+
+namespace android {
+
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+
+const char *MEDIA_MIMETYPE_VIDEO_WMV = "video/x-ms-wmv";
+const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
+const char *MEDIA_MIMETYPE_CONTAINER_ASF = "video/x-ms-asf";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX = "video/divx";
+const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
+const char *MEDIA_MIMETYPE_CONTAINER_AAC = "audio/aac";
+const char *MEDIA_MIMETYPE_CONTAINER_QCP = "audio/vnd.qcelp";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX311 = "video/divx311";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX4 = "video/divx4";
+
+const char *MEDIA_MIMETYPE_CONTAINER_MPEG2 = "video/mp2";
+
+const char *MEDIA_MIMETYPE_CONTAINER_3G2 = "video/3g2";
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/dts";
+
+const char *MEDIA_MIMETYPE_AUDIO_DTS_LBR = "audio/dts-lbr";
+const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb-plus";
+
+} // namespace android
+
diff --git a/media/libstagefright/QCOMXCodec.cpp b/media/libstagefright/QCOMXCodec.cpp
new file mode 100644
index 0000000..1b24c8b
--- /dev/null
+++ b/media/libstagefright/QCOMXCodec.cpp
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "QCOMXCodec"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/QCOMXCodec.h>
+#include <media/stagefright/OMXCodec.h>
+#include <QCMetaData.h>
+#include <QCMediaDefs.h>
+#include <OMX_QCOMExtns.h>
+
+#include <OMX_Component.h>
+#include <QOMX_AudioExtensions.h>
+
+
+namespace android {
+
+uint32_t QCOMXCodec::getQCComponentQuirks(const MediaCodecList *list, size_t index) {
+ uint32_t quirks = 0;
+
+ if (list->codecHasQuirk(
+ index, "requires-wma-pro-component")) {
+ quirks |= kRequiresWMAProComponent;
+ }
+ return quirks;
+}
+
+void QCOMXCodec::setASFQuirks(uint32_t quirks, const sp<MetaData> &meta, const char* componentName) {
+ if(quirks & kRequiresWMAProComponent)
+ {
+ int32_t version;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ if(version==kTypeWMA) {
+ componentName = "OMX.qcom.audio.decoder.wma";
+ } else if(version==kTypeWMAPro) {
+ componentName= "OMX.qcom.audio.decoder.wma10Pro";
+ } else if(version==kTypeWMALossLess) {
+ componentName= "OMX.qcom.audio.decoder.wmaLossLess";
+ }
+ }
+}
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+
+status_t QCOMXCodec::configureDIVXCodec(const sp<MetaData> &meta, char* mime, sp<IOMX> OMXhandle, IOMX::node_id nodeID, int port_index) {
+ status_t err = OK;
+ if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ ALOGV("Setting the QOMX_VIDEO_PARAM_DIVXTYPE params ");
+ QOMX_VIDEO_PARAM_DIVXTYPE paramDivX;
+ InitOMXParams(&paramDivX);
+ paramDivX.nPortIndex = port_index;
+ int32_t DivxVersion = 0;
+ CHECK(meta->findInt32(kKeyDivXVersion,&DivxVersion));
+ ALOGV("Divx Version Type %d\n",DivxVersion);
+
+ if(DivxVersion == kTypeDivXVer_4) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat4;
+ } else if(DivxVersion == kTypeDivXVer_5) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat5;
+ } else if(DivxVersion == kTypeDivXVer_6) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat6;
+ } else if(DivxVersion == kTypeDivXVer_3_11 ) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat311;
+ } else {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormatUnused;
+ }
+ paramDivX.eProfile = (QOMX_VIDEO_DIVXPROFILETYPE)0; //Not used for now.
+
+ err = OMXhandle->setParameter(nodeID,
+ (OMX_INDEXTYPE)OMX_QcomIndexParamVideoDivx,
+ &paramDivX, sizeof(paramDivX));
+ }
+
+ return err;
+}
+
+void QCOMXCodec::checkAndAddRawFormat(OMXCodec *handle, const sp<MetaData> &meta){
+ uint32_t type;
+ const void *data;
+ size_t size;
+
+ if (meta->findData(kKeyRawCodecSpecificData, &type, &data, &size)) {
+ ALOGV("OMXCodec::configureCodec found kKeyRawCodecSpecificData of size %d\n", size);
+ handle->addCodecSpecificData(data, size);
+ }
+
+}
+
+status_t QCOMXCodec::setQCFormat(const sp<MetaData> &meta, char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, OMXCodec *handle, bool isEncoder ) {
+ ALOGV("setQCFormat -- called ");
+ status_t err = OK;
+ if ((!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mime)) ||
+ (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EAC3, mime))){
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setAC3Format(numChannels, sampleRate, OMXhandle, nodeID);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EVRC, mime)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setEVRCFormat(numChannels, sampleRate, OMXhandle, nodeID, handle,isEncoder );
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_QCELP, mime)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setQCELPFormat(numChannels, sampleRate, OMXhandle, nodeID, handle,isEncoder);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_WMA, mime)) {
+ err = setWMAFormat(meta, OMXhandle, nodeID, isEncoder);
+ }
+ return err;
+}
+
+
+void QCOMXCodec::setEVRCFormat(int32_t numChannels, int32_t sampleRate, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, OMXCodec *handle, bool isEncoder ) {
+ ALOGV("setEVRCFormat -- called ");
+ if (isEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ handle->setRawAudioFormat(OMXCodec::kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = OMXCodec::kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingEVRC) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = OMXCodec::kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingEVRC;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_EVRCTYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = OMXCodec::kPortIndexOutput;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else{
+ ALOGI("EVRC decoder \n");
+ }
+}
+
+
+void QCOMXCodec::setQCELPFormat(int32_t numChannels, int32_t sampleRate, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, OMXCodec *handle, bool isEncoder ) {
+ if (isEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ handle->setRawAudioFormat(OMXCodec::kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = OMXCodec::kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingQCELP13) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = OMXCodec::kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingQCELP13;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_QCELP13TYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = OMXCodec::kPortIndexOutput;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else {
+ ALOGI("QCELP decoder \n");
+ }
+}
+
+status_t QCOMXCodec::setWMAFormat(const sp<MetaData> &meta, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, bool isEncoder ) {
+ ALOGV("setWMAFormat Called");
+ if (isEncoder) {
+ ALOGE("WMA encoding not supported");
+ return OK;
+ } else {
+ int32_t version;
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+ QOMX_AUDIO_PARAM_WMA10PROTYPE paramWMA10;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ int32_t numChannels;
+ int32_t bitRate;
+ int32_t sampleRate;
+ int32_t encodeOptions;
+ int32_t blockAlign;
+ int32_t bitspersample;
+ int32_t formattag;
+ int32_t advencopt1;
+ int32_t advencopt2;
+ int32_t VirtualPktSize;
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ CHECK(meta->findInt32(kKeyWMABitspersample, &bitspersample));
+ CHECK(meta->findInt32(kKeyWMAFormatTag, &formattag));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt1,&advencopt1));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt2,&advencopt2));
+ CHECK(meta->findInt32(kKeyWMAVirPktSize,&VirtualPktSize));
+ }
+ if(version==kTypeWMA) {
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = OMXCodec::kPortIndexInput;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ InitOMXParams(&paramWMA10);
+ paramWMA10.nPortIndex = OMXCodec::kPortIndexInput;
+ }
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK(meta->findInt32(kKeyBitRate, &bitRate));
+ CHECK(meta->findInt32(kKeyWMAEncodeOpt, &encodeOptions));
+ CHECK(meta->findInt32(kKeyWMABlockAlign, &blockAlign));
+ ALOGV("Channels: %d, SampleRate: %d, BitRate; %d"
+ "EncodeOptions: %d, blockAlign: %d", numChannels,
+ sampleRate, bitRate, encodeOptions, blockAlign);
+ if(sampleRate>48000 || numChannels>2)
+ {
+ ALOGE("Unsupported samplerate/channels");
+ return ERROR_UNSUPPORTED;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess)
+ {
+ ALOGV("Bitspersample: %d, wmaformattag: %d,"
+ "advencopt1: %d, advencopt2: %d VirtualPktSize %d", bitspersample,
+ formattag, advencopt1, advencopt2, VirtualPktSize);
+ }
+ status_t err = OK;
+ OMX_INDEXTYPE index;
+ if(version==kTypeWMA) {
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ OMXhandle->getExtensionIndex(nodeID,"OMX.Qualcomm.index.audio.wma10Pro",&index);
+ err = OMXhandle->getParameter(
+ nodeID, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ CHECK_EQ(err, (status_t)OK);
+ if(version==kTypeWMA) {
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nEncodeOptions = encodeOptions;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.nChannels = numChannels;
+ paramWMA10.nSamplingRate = sampleRate;
+ paramWMA10.nEncodeOptions = encodeOptions;
+ paramWMA10.nBitRate = bitRate;
+ paramWMA10.nBlockAlign = blockAlign;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.advancedEncodeOpt = advencopt1;
+ paramWMA10.advancedEncodeOpt2 = advencopt2;
+ paramWMA10.formatTag = formattag;
+ paramWMA10.validBitsPerSample = bitspersample;
+ paramWMA10.nVirtualPktSize = VirtualPktSize;
+ }
+ if(version==kTypeWMA) {
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ err = OMXhandle->setParameter(
+ nodeID, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ return err;
+ }
+ return OK;
+}
+
+
+void QCOMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID) {
+ QOMX_AUDIO_PARAM_AC3TYPE profileAC3;
+ QOMX_AUDIO_PARAM_AC3PP profileAC3PP;
+ OMX_INDEXTYPE indexTypeAC3;
+ OMX_INDEXTYPE indexTypeAC3PP;
+ OMX_PARAM_PORTDEFINITIONTYPE portParam;
+
+ //configure input port
+ ALOGV("setAC3Format samplerate %d, numChannels %d", sampleRate, numChannels);
+ InitOMXParams(&portParam);
+ portParam.nPortIndex = 0;
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ //configure output port
+ portParam.nPortIndex = 1;
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ err = OMXhandle->getExtensionIndex(nodeID, OMX_QCOM_INDEX_PARAM_AC3TYPE, &indexTypeAC3);
+
+ InitOMXParams(&profileAC3);
+ profileAC3.nPortIndex = OMXCodec::kPortIndexInput;
+ err = OMXhandle->getParameter(nodeID, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ profileAC3.nSamplingRate = sampleRate;
+ profileAC3.nChannels = 2;
+ profileAC3.eChannelConfig = OMX_AUDIO_AC3_CHANNEL_CONFIG_2_0;
+
+ ALOGV("numChannels = %d, profileAC3.nChannels = %d", numChannels, profileAC3.nChannels);
+
+ err = OMXhandle->setParameter(nodeID, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ //for output port
+ OMX_AUDIO_PARAM_PCMMODETYPE profilePcm;
+ InitOMXParams(&profilePcm);
+ profilePcm.nPortIndex = OMXCodec::kPortIndexOutput;
+ err = OMXhandle->getParameter(nodeID, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+
+ profilePcm.nSamplingRate = sampleRate;
+ err = OMXhandle->setParameter(nodeID, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+ OMXhandle->getExtensionIndex(nodeID, OMX_QCOM_INDEX_PARAM_AC3PP, &indexTypeAC3PP);
+
+ InitOMXParams(&profileAC3PP);
+ profileAC3PP.nPortIndex = OMXCodec::kPortIndexInput;
+ err = OMXhandle->getParameter(nodeID, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+
+ int i;
+ int channel_routing[6];
+
+ for (i=0; i<6; i++) {
+ channel_routing[i] = -1;
+ }
+ for (i=0; i<6; i++) {
+ profileAC3PP.eChannelRouting[i] = (OMX_AUDIO_AC3_CHANNEL_ROUTING)channel_routing[i];
+ }
+
+ profileAC3PP.eChannelRouting[0] = OMX_AUDIO_AC3_CHANNEL_LEFT;
+ profileAC3PP.eChannelRouting[1] = OMX_AUDIO_AC3_CHANNEL_RIGHT;
+ err = OMXhandle->setParameter(nodeID, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+}
+
+
+status_t QCOMXCodec::setQCVideoInputFormat(const char *mime, OMX_VIDEO_CODINGTYPE *compressionFormat) {
+ status_t retVal = OK;
+ if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime)){
+ *compressionFormat= (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime)){
+ *compressionFormat= (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)){
+ *compressionFormat= (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_WMV, mime)){
+ *compressionFormat = OMX_VIDEO_CodingWMV;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_CONTAINER_MPEG2, mime)){
+ *compressionFormat = OMX_VIDEO_CodingMPEG2;
+ } else {
+ retVal = BAD_VALUE;
+ }
+
+ return retVal;
+}
+
+status_t QCOMXCodec::setQCVideoOutputFormat(const char *mime, OMX_VIDEO_CODINGTYPE *compressionFormat) {
+ status_t retVal = OK;
+ if(!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if(!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if(!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_WMV, mime)){
+ *compressionFormat = OMX_VIDEO_CodingWMV;
+ } else {
+ retVal = BAD_VALUE;
+ }
+ return retVal;
+}
+
+
+void QCOMXCodec::checkQCRole( const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder, const char *mime){
+ ALOGV("checkQCRole Called");
+ struct MimeToRole {
+ const char *mime;
+ const char *decoderRole;
+ const char *encoderRole;
+ };
+
+ static const MimeToRole kQCMimeToRole[] = {
+ { MEDIA_MIMETYPE_AUDIO_EVRC,
+ "audio_decoder.evrchw", "audio_encoder.evrc" },
+ { MEDIA_MIMETYPE_AUDIO_QCELP,
+ "audio_decoder,qcelp13Hw", "audio_encoder.qcelp13" },
+ { MEDIA_MIMETYPE_VIDEO_DIVX,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX311,
+ "video_decoder.divx", NULL },
+ };
+
+ static const size_t kNumMimeToRole =
+ sizeof(kQCMimeToRole) / sizeof(kQCMimeToRole[0]);
+
+ size_t i;
+ for (i = 0; i < kNumMimeToRole; ++i) {
+ if (!strcasecmp(mime, kQCMimeToRole[i].mime)) {
+ break;
+ }
+ }
+
+ if (i == kNumMimeToRole) {
+ return;
+ }
+
+ const char *role =
+ isEncoder ? kQCMimeToRole[i].encoderRole
+ : kQCMimeToRole[i].decoderRole;
+
+ if (role != NULL) {
+ OMX_PARAM_COMPONENTROLETYPE roleParams;
+ InitOMXParams(&roleParams);
+
+ strncpy((char *)roleParams.cRole,
+ role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+ roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+ status_t err = omx->setParameter(
+ node, OMX_IndexParamStandardComponentRole,
+ &roleParams, sizeof(roleParams));
+
+ if (err != OK) {
+ ALOGW("Failed to set standard component role '%s'.", role);
+ }
+ }
+
+}
+
+status_t QCOMXCodec::checkQCFormats(int format, AString* meta){
+ ALOGV("checkQCFormats called");
+ status_t retVal = OK;
+ if (format == OMX_AUDIO_CodingQCELP13 ) {
+ *meta = MEDIA_MIMETYPE_AUDIO_QCELP;
+ } else if(format == OMX_AUDIO_CodingEVRC ) {
+ *meta = MEDIA_MIMETYPE_AUDIO_EVRC;
+ } else {
+ retVal = BAD_VALUE;
+ }
+ return retVal;
+}
+
+}
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index b7cf96e..510252a 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,7 +43,12 @@ static bool FileHasAcceptableExtension(const char *extension) {
".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
- ".avi", ".mpeg", ".mpg"
+ ".avi", ".mpg",
+#ifndef QCOM_HARDWARE
+ ".mpeg"
+#else
+ ".qcp", ".awb", ".ac3", ".dts", ".wmv"
+#endif
};
static const size_t kNumValidExtensions =
sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
diff --git a/media/libstagefright/TunnelPlayer.cpp b/media/libstagefright/TunnelPlayer.cpp
new file mode 100644
index 0000000..34c260f
--- /dev/null
+++ b/media/libstagefright/TunnelPlayer.cpp
@@ -0,0 +1,782 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDDEBUG 0
+#define LOG_NDEBUG 0
+#define LOG_TAG "TunnelPlayer"
+#include <utils/Log.h>
+#include <utils/threads.h>
+
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+
+#include <media/stagefright/TunnelPlayer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <hardware_legacy/power.h>
+
+#include <linux/unistd.h>
+
+#include "include/AwesomePlayer.h"
+#include <powermanager/PowerManager.h>
+
+static const char mName[] = "TunnelPlayer";
+#define MEM_METADATA_SIZE 64
+#define MEM_BUFFER_SIZE (600*1024 - MEM_METADATA_SIZE)
+#define MEM_BUFFER_COUNT 4
+
+namespace android {
+int TunnelPlayer::mTunnelObjectsAlive = 0;
+
+TunnelPlayer::TunnelPlayer(
+ const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *observer, bool hasVideo)
+:AudioPlayer(audioSink,observer),
+mPositionTimeMediaUs(-1),
+mPositionTimeRealUs(-1),
+mInternalSeeking(false),
+mStarted(false),
+mA2DPEnabled(false),
+mSampleRate(0),
+mLatencyUs(0),
+mFrameSize(0),
+mNumFramesPlayed(0),
+mNumFramesPlayedSysTimeUs(0),
+mInputBuffer(NULL),
+mSeeking(false),
+mReachedEOS(false),
+mReachedOutputEOS(false),
+mFinalStatus(OK),
+mSeekTimeUs(0),
+mPauseTime(0),
+mIsFirstBuffer(false),
+mFirstBufferResult(OK),
+mFirstBuffer(NULL),
+mAudioSink(audioSink),
+mObserver(observer) {
+ ALOGD("TunnelPlayer::TunnelPlayer()");
+ mTunnelObjectsAlive++;
+ numChannels = 0;
+ mPaused = false;
+ mIsA2DPEnabled = false;
+ mAudioFlinger = NULL;
+ mAudioFlingerClient = NULL;
+ mFormat = AUDIO_FORMAT_MP3;
+ mQueue.start();
+ mQueueStarted = true;
+ mPauseEvent = new TunnelEvent(this, &TunnelPlayer::onPauseTimeOut);
+ mPauseEventPending = false;
+
+ //getAudioFlinger();
+ //ALOGD("Registering client with AudioFlinger");
+ //mAudioFlinger->registerClient(mAudioFlingerClient);
+
+ mSeekTimeUs = 0;
+
+ mHasVideo = hasVideo;
+ initCheck = true;
+
+ //mDeathRecipient = new PMDeathRecipient(this);
+}
+void TunnelPlayer::acquireWakeLock()
+{
+ /*Mutex::Autolock _l(pmLock);
+
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("Thread %s cannot connect to the power manager service", mName);
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != 0 && mWakeLockToken == 0) {
+ sp<IBinder> binder = new BBinder();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16(mName));
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
+ ALOGV("acquireWakeLock() %s status %d", mName, status);
+ }*/
+}
+
+void TunnelPlayer::releaseWakeLock()
+{
+ /*Mutex::Autolock _l(pmLock);
+
+ if (mWakeLockToken != 0) {
+ ALOGV("releaseWakeLock() %s", mName);
+ if (mPowerManager != 0) {
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ }
+ mWakeLockToken.clear();
+ }*/
+}
+
+void TunnelPlayer::clearPowerManager()
+{
+ Mutex::Autolock _l(pmLock);
+ releaseWakeLock();
+ mPowerManager.clear();
+}
+
+void TunnelPlayer::PMDeathRecipient::binderDied(const wp<IBinder>& who)
+{
+ parentClass->clearPowerManager();
+ ALOGW("power manager service died !!!");
+}
+
+TunnelPlayer::~TunnelPlayer() {
+ ALOGD("TunnelPlayer::~TunnelPlayer()");
+ if (mQueueStarted) {
+ mQueue.stop();
+ }
+
+ reset();
+ //mAudioFlinger->deregisterClient(mAudioFlingerClient);
+ mTunnelObjectsAlive--;
+
+ releaseWakeLock();
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = mPowerManager->asBinder();
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+
+
+}
+
+void TunnelPlayer::getAudioFlinger() {
+/* Mutex::Autolock _l(mAudioFlingerLock);
+
+ if ( mAudioFlinger.get() == 0 ) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_flinger"));
+ if ( binder != 0 )
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while ( true );
+ if ( mAudioFlingerClient == NULL ) {
+ mAudioFlingerClient = new AudioFlingerTunnelDecodeClient(this);
+ }
+
+ binder->linkToDeath(mAudioFlingerClient);
+ mAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ }
+ ALOGE_IF(mAudioFlinger==0, "no AudioFlinger!?");*/
+}
+
+/*TunnelPlayer::AudioFlingerTunnelDecodeClient::AudioFlingerTunnelDecodeClient(void *obj)
+{
+ ALOGD("TunnelPlayer::AudioFlingerTunnelDecodeClient - Constructor");
+ pBaseClass = (TunnelPlayer*)obj;
+}
+
+void TunnelPlayer::AudioFlingerTunnelDecodeClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(pBaseClass->mAudioFlingerLock);
+
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}*/
+
+/*void TunnelPlayer::AudioFlingerTunnelDecodeClient::ioConfigChanged(int event, int ioHandle, void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+
+
+ if (event != AudioSystem::A2DP_OUTPUT_STATE) {
+ return;
+ }
+
+ switch ( event ) {
+ case AudioSystem::A2DP_OUTPUT_STATE:
+ {
+ if ( -1 == ioHandle ) {
+ if ( pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = false;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Disabled");
+ }
+ } else {
+ if ( !pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = true;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Enabled");
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ ALOGV("ioConfigChanged Out");
+}*/
+
+/*void TunnelPlayer::handleA2DPSwitch() {
+ //TODO: Implement
+}
+*/
+
+void TunnelPlayer::setSource(const sp<MediaSource> &source) {
+ CHECK(mSource == NULL);
+ ALOGD("Setting source from Tunnel Player");
+ mSource = source;
+}
+
+status_t TunnelPlayer::start(bool sourceAlreadyStarted) {
+ CHECK(!mStarted);
+ CHECK(mSource != NULL);
+
+ ALOGD("start: sourceAlreadyStarted %d", sourceAlreadyStarted);
+ //Check if the source is started, start it
+ status_t err;
+ if (!sourceAlreadyStarted) {
+ err = mSource->start();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ //Create decoder and a2dp notification thread and initialize all the
+ //mutexes and coditional variables
+ createThreads();
+ ALOGV("All Threads Created.");
+
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ MediaSource::ReadOptions options;
+ if (mSeeking) {
+ options.setSeekTo(mSeekTimeUs);
+ mSeeking = false;
+ }
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ ALOGV("INFO_FORMAT_CHANGED!!!");
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ if (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC)) {
+ mFormat = AUDIO_FORMAT_AAC;
+ }
+ if (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
+ mFormat = AUDIO_FORMAT_AMR_WB;
+ ALOGV("TunnelPlayer::start AUDIO_FORMAT_AMR_WB");
+ }
+// if (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)) {
+// mFormat = AUDIO_FORMAT_AMR_WB_PLUS;
+// ALOGV("TunnelPlayer::start AUDIO_FORMAT_AMR_WB_PLUS");
+// }
+
+ CHECK(success);
+
+ success = format->findInt32(kKeySampleRate, &mSampleRate);
+ CHECK(success);
+
+ success = format->findInt32(kKeyChannelCount, &numChannels);
+ CHECK(success);
+
+ if(!format->findInt32(kKeyChannelMask, &mChannelMask)) {
+ // log only when there's a risk of ambiguity of channel mask selection
+ ALOGI_IF(numChannels > 2,
+ "source format didn't specify channel mask, using (%d) channel order", numChannels);
+ mChannelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_TUNNEL |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ ALOGV("mAudiosink->open() mSampleRate %d, numChannels %d, mChannelMask %d, flags %d",mSampleRate, numChannels, mChannelMask, flags);
+ err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, mFormat,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &TunnelPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags));
+
+ if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ ALOGE("Opening a routing session failed");
+ return err;
+ }
+
+ if (!mIsA2DPEnabled) {
+ acquireWakeLock();
+ }
+
+ mIsAudioRouted = true;
+ mStarted = true;
+ mAudioSink->start();
+ ALOGV("Waking up decoder thread");
+ pthread_cond_signal(&extractor_cv);
+
+ return OK;
+}
+
+status_t TunnelPlayer::seekTo(int64_t time_us) {
+
+ ALOGV("seekTo: time_us %lld", time_us);
+ if ( mReachedEOS ) {
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ }
+ mSeeking = true;
+ mSeekTimeUs = time_us;
+ ALOGV("In seekTo(), mSeekTimeUs %lld",mSeekTimeUs);
+ mAudioSink->flush();
+ pthread_cond_signal(&extractor_cv);
+ //TODO: Update the mPauseTime
+ return OK;
+}
+void TunnelPlayer::pause(bool playPendingSamples) {
+ CHECK(mStarted);
+ if (mPaused) {
+ return;
+ }
+ ALOGV("pause: playPendingSamples %d", playPendingSamples);
+ mPaused = true;
+ A2DPState state;
+ if(!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, TUNNEL_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_DISABLED);
+ if (mAudioSink.get() != NULL) {
+ ALOGV("AudioSink pause");
+ mAudioSink->pause();
+ }
+}
+
+void TunnelPlayer::resume() {
+ ALOGV("resume: mPaused %d",mPaused);
+ if ( mPaused) {
+ CHECK(mStarted);
+ if (!mIsA2DPEnabled) {
+ if(mPauseEventPending) {
+ ALOGV("Resume(): Cancelling the puaseTimeout event");
+ mPauseEventPending = false;
+ mQueue.cancelEvent(mPauseEvent->eventID());
+ }
+
+ }
+ audio_format_t format;
+
+ if (!mIsAudioRouted) {
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_TUNNEL |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ status_t err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, mFormat,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &TunnelPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags ));
+ if (err != NO_ERROR) {
+ ALOGE("Audio sink open failed.");
+ }
+ mIsAudioRouted = true;
+ }
+ mPaused = false;
+ mAudioSink->start();
+ pthread_cond_signal(&extractor_cv);
+ }
+}
+
+//static
+size_t TunnelPlayer::AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+ if (buffer == NULL && size == AudioTrack::EVENT_UNDERRUN) {
+ TunnelPlayer *me = (TunnelPlayer *)cookie;
+ me->mReachedEOS = true;
+ me->mReachedOutputEOS = true;
+ ALOGV("postAudioEOS");
+ me->mObserver->postAudioEOS(0);
+ }
+ return 1;
+}
+
+void TunnelPlayer::reset() {
+
+ mReachedEOS = true;
+
+ // make sure Decoder thread has exited
+ requestAndWaitForExtractorThreadExit();
+
+ // Close the audiosink after all the threads exited to make sure
+ mAudioSink->stop();
+ mAudioSink->close();
+ //TODO: Release Wake lock
+
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("AudioPlayer releasing input buffer.");
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mSource;
+ mSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+
+ mPositionTimeMediaUs = -1;
+ mPositionTimeRealUs = -1;
+ mSeeking = false;
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mFinalStatus = OK;
+ mStarted = false;
+}
+
+
+bool TunnelPlayer::isSeeking() {
+ Mutex::Autolock autoLock(mLock);
+ return mSeeking;
+}
+
+bool TunnelPlayer::reachedEOS(status_t *finalStatus) {
+ *finalStatus = OK;
+ Mutex::Autolock autoLock(mLock);
+ *finalStatus = mFinalStatus;
+ return mReachedOutputEOS;
+}
+
+
+void *TunnelPlayer::extractorThreadWrapper(void *me) {
+ static_cast<TunnelPlayer *>(me)->extractorThreadEntry();
+ return NULL;
+}
+
+
+void TunnelPlayer::extractorThreadEntry() {
+
+ pthread_mutex_lock(&extractor_mutex);
+ uint32_t BufferSizeToUse = MEM_BUFFER_SIZE;
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"Tunnel DecodeThread", 0, 0, 0);
+
+ ALOGV("extractorThreadEntry wait for signal \n");
+ if (!mStarted) {
+ pthread_cond_wait(&extractor_cv, &extractor_mutex);
+ }
+ ALOGV("extractorThreadEntry ready to work \n");
+ pthread_mutex_unlock(&extractor_mutex);
+ if (killExtractorThread) {
+ return;
+ }
+ if(mSource != NULL) {
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ }
+ void* local_buf = malloc(BufferSizeToUse);
+ int bytesWritten = 0;
+ while (!killExtractorThread) {
+
+ if (mReachedEOS || mPaused || !mIsAudioRouted) {
+ pthread_mutex_lock(&extractor_mutex);
+ pthread_cond_wait(&extractor_cv, &extractor_mutex);
+ pthread_mutex_unlock(&extractor_mutex);
+ continue;
+ }
+
+ if (!mIsA2DPEnabled) {
+ ALOGW("FillBuffer: MemBuffer size %d", BufferSizeToUse);
+ ALOGV("Fillbuffer started");
+ bytesWritten = fillBuffer(local_buf, BufferSizeToUse);
+ ALOGV("FillBuffer completed bytesToWrite %d", bytesWritten);
+ if(!killExtractorThread) {
+ mAudioSink->write(local_buf, bytesWritten);
+ if(mReachedEOS && bytesWritten)
+ mAudioSink->write(local_buf, 0);
+ }
+ }
+ }
+
+ free(local_buf);
+
+ //TODO: Call fillbuffer with different size and write to mAudioSink()
+}
+void TunnelPlayer::createThreads() {
+
+ //Initialize all the Mutexes and Condition Variables
+ pthread_mutex_init(&extractor_mutex, NULL);
+ pthread_cond_init (&extractor_cv, NULL);
+
+ // Create 4 threads Effect, decoder, event and A2dp
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ killExtractorThread = false;
+
+ extractorThreadAlive = true;
+
+ ALOGV("Creating decoder Thread");
+ pthread_create(&extractorThread, &attr, extractorThreadWrapper, this);
+
+ pthread_attr_destroy(&attr);
+}
+size_t TunnelPlayer::fillBuffer(void *data, size_t size) {
+
+ if (mReachedEOS) {
+ return 0;
+ }
+
+ bool postSeekComplete = false;
+
+ size_t size_done = 0;
+ size_t size_remaining = size;
+
+ while (size_remaining > 0) {
+ MediaSource::ReadOptions options;
+ {
+ Mutex::Autolock autoLock(mLock);
+ if(mSeeking) {
+ mInternalSeeking = false;
+ }
+
+ if (mSeeking || mInternalSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
+ MediaSource::ReadOptions::SeekMode seekMode;
+ seekMode = MediaSource::ReadOptions::SEEK_CLOSEST_SYNC;
+ options.setSeekTo(mSeekTimeUs, seekMode );
+ if (mInputBuffer != NULL) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ // This is to ignore the data already filled in the output buffer
+ size_done = 0;
+ size_remaining = size;
+
+ mSeeking = false;
+ if (mObserver && !mInternalSeeking) {
+ ALOGD("fillBuffer: Posting audio seek complete event");
+ postSeekComplete = true;
+ }
+ mInternalSeeking = false;
+ }
+ }
+ if (mInputBuffer == NULL) {
+ status_t err;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ err = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+ err = mSource->read(&mInputBuffer, &options);
+ }
+
+ CHECK((err == OK && mInputBuffer != NULL)
+ || (err != OK && mInputBuffer == NULL));
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ if (err != OK) {
+ ALOGD("fill buffer - reached eos true");
+ mReachedEOS = true;
+ mFinalStatus = err;
+ break;
+ }
+ }
+
+ }
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ continue;
+ }
+
+ size_t copy = size_remaining;
+ if (copy > mInputBuffer->range_length()) {
+ copy = mInputBuffer->range_length();
+ }
+ memcpy((char *)data + size_done,
+ (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ copy);
+
+ mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
+ mInputBuffer->range_length() - copy);
+
+ size_done += copy;
+ size_remaining -= copy;
+ }
+ if(mReachedEOS)
+ memset((char *)data + size_done, 0x0, size_remaining);
+ ALOGV("fill buffer size_done = %d",size_done);
+
+ if (postSeekComplete) {
+ mObserver->postAudioSeekComplete();
+ }
+
+ return size_done;
+}
+
+int64_t TunnelPlayer::getRealTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ return getRealTimeUsLocked();
+}
+
+
+int64_t TunnelPlayer::getRealTimeUsLocked(){
+ //Used for AV sync: irrelevant API for Tunnel.
+ return 0;
+}
+
+int64_t TunnelPlayer::getTimeStamp(A2DPState state) {
+ uint64_t timestamp = 0;
+ switch (state) {
+ case A2DP_ENABLED:
+ case A2DP_DISCONNECT:
+ ALOGV("Get timestamp for A2DP");
+ break;
+ case A2DP_DISABLED:
+ case A2DP_CONNECT: {
+ mAudioSink->getTimeStamp(&timestamp);
+ break;
+ }
+ default:
+ break;
+ }
+ ALOGV("timestamp %lld ", timestamp);
+ return timestamp;
+}
+
+int64_t TunnelPlayer::getMediaTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("getMediaTimeUs() mPaused %d mSeekTimeUs %lld mPauseTime %lld", mPaused, mSeekTimeUs, mPauseTime);
+ if (mPaused) {
+ return mPauseTime;
+ } else {
+ A2DPState state = mIsA2DPEnabled ? A2DP_ENABLED : A2DP_DISABLED;
+ return (mSeekTimeUs + getTimeStamp(state));
+ }
+}
+
+bool TunnelPlayer::getMediaTimeMapping(
+ int64_t *realtime_us, int64_t *mediatime_us) {
+ Mutex::Autolock autoLock(mLock);
+
+ *realtime_us = mPositionTimeRealUs;
+ *mediatime_us = mPositionTimeMediaUs;
+
+ return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
+}
+
+void TunnelPlayer::requestAndWaitForExtractorThreadExit() {
+
+ if (!extractorThreadAlive)
+ return;
+ mAudioSink->flush();
+ killExtractorThread = true;
+ pthread_cond_signal(&extractor_cv);
+ pthread_join(extractorThread,NULL);
+ ALOGD("Extractor thread killed");
+}
+
+void TunnelPlayer::onPauseTimeOut() {
+ ALOGV("onPauseTimeOut");
+ if (!mPauseEventPending) {
+ return;
+ }
+ mPauseEventPending = false;
+ if(!mIsA2DPEnabled) {
+ // 1.) Set seek flags
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mSeekTimeUs += getTimeStamp(A2DP_DISABLED);
+ mInternalSeeking = true;
+
+ // 2.) Close routing Session
+ mAudioSink->close();
+ mIsAudioRouted = false;
+
+ // 3.) Release Wake Lock
+ releaseWakeLock();
+ }
+
+}
+
+} //namespace android
diff --git a/media/libstagefright/WAVEWriter.cpp b/media/libstagefright/WAVEWriter.cpp
new file mode 100644
index 0000000..9700fa7
--- /dev/null
+++ b/media/libstagefright/WAVEWriter.cpp
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WAVEWriter"
+#include <utils/Log.h>
+
+#include <media/stagefright/WAVEWriter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/mediarecorder.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace android {
+
+static struct wav_header hdr;
+
+
+WAVEWriter::WAVEWriter(const char *filename)
+ : mFd(-1),
+ mInitCheck(NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false) {
+
+ mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (mFd >= 0) {
+ mInitCheck = OK;
+ }
+}
+
+WAVEWriter::WAVEWriter(int fd)
+ : mFd(dup(fd)),
+ mInitCheck(mFd < 0? NO_INIT: OK),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false) {
+}
+
+WAVEWriter::~WAVEWriter() {
+ if (mStarted) {
+ stop();
+ }
+
+ if (mFd != -1) {
+ close(mFd);
+ mFd = -1;
+ }
+}
+
+status_t WAVEWriter::initCheck() const {
+ return mInitCheck;
+}
+
+status_t WAVEWriter::addSource(const sp<MediaSource> &source) {
+ uint32_t count;
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource != NULL) {
+ ALOGE("A source already exists, return");
+ return UNKNOWN_ERROR;
+ }
+
+ sp<MetaData> meta = source->getFormat();
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ int32_t channelCount;
+ int32_t sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+ memset(&hdr, 0, sizeof(struct wav_header));
+ hdr.riff_id = ID_RIFF;
+ hdr.riff_fmt = ID_WAVE;
+ hdr.fmt_id = ID_FMT;
+ hdr.fmt_sz = 16;
+ hdr.audio_format = FORMAT_PCM;
+ hdr.num_channels = channelCount;
+ hdr.sample_rate = sampleRate;
+ hdr.bits_per_sample = 16;
+ hdr.byte_rate = (sampleRate * channelCount * hdr.bits_per_sample) / 8;
+ hdr.block_align = ( hdr.bits_per_sample * channelCount ) / 8;
+ hdr.data_id = ID_DATA;
+ hdr.data_sz = 0;
+ hdr.riff_sz = hdr.data_sz + 44 - 8;
+
+ if (write(mFd, &hdr, sizeof(hdr)) != sizeof(hdr)) {
+ ALOGE("Write header error, return ERROR_IO");
+ return -ERROR_IO;
+ }
+
+ mSource = source;
+
+ return OK;
+}
+
+status_t WAVEWriter::start(MetaData *params) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource == NULL) {
+ ALOGE("NULL Source");
+ return UNKNOWN_ERROR;
+ }
+
+ if (mStarted && mPaused) {
+ mPaused = false;
+ mResumed = true;
+ return OK;
+ } else if (mStarted) {
+ ALOGE("Already startd, return");
+ return OK;
+ }
+
+ status_t err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ mReachedEOS = false;
+ mDone = false;
+
+ pthread_create(&mThread, &attr, ThreadWrapper, this);
+ pthread_attr_destroy(&attr);
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t WAVEWriter::pause() {
+ if (!mStarted) {
+ return OK;
+ }
+ mPaused = true;
+ return OK;
+}
+
+status_t WAVEWriter::stop() {
+ if (!mStarted) {
+ return OK;
+ }
+
+ mDone = true;
+
+ void *dummy;
+ pthread_join(mThread, &dummy);
+
+ status_t err = (status_t) dummy;
+ {
+ status_t status = mSource->stop();
+ if (err == OK &&
+ (status != OK && status != ERROR_END_OF_STREAM)) {
+ err = status;
+ }
+ }
+
+ mStarted = false;
+ return err;
+}
+
+bool WAVEWriter::exceedsFileSizeLimit() {
+ if (mMaxFileSizeLimitBytes == 0) {
+ return false;
+ }
+ return mEstimatedSizeBytes >= mMaxFileSizeLimitBytes;
+}
+
+bool WAVEWriter::exceedsFileDurationLimit() {
+ if (mMaxFileDurationLimitUs == 0) {
+ return false;
+ }
+ return mEstimatedDurationUs >= mMaxFileDurationLimitUs;
+}
+
+// static
+void *WAVEWriter::ThreadWrapper(void *me) {
+ return (void *) static_cast<WAVEWriter *>(me)->threadFunc();
+}
+
+status_t WAVEWriter::threadFunc() {
+ mEstimatedDurationUs = 0;
+ mEstimatedSizeBytes = 0;
+ bool stoppedPrematurely = true;
+ int64_t previousPausedDurationUs = 0;
+ int64_t maxTimestampUs = 0;
+ status_t err = OK;
+
+ prctl(PR_SET_NAME, (unsigned long)"WAVEWriter", 0, 0, 0);
+ hdr.data_sz = 0;
+ while (!mDone) {
+ MediaBuffer *buffer;
+ err = mSource->read(&buffer);
+
+ if (err != OK) {
+ break;
+ }
+
+ if (mPaused) {
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+
+ mEstimatedSizeBytes += buffer->range_length();
+ if (exceedsFileSizeLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ break;
+ }
+
+ int64_t timestampUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timestampUs));
+ if (timestampUs > mEstimatedDurationUs) {
+ mEstimatedDurationUs = timestampUs;
+ }
+ if (mResumed) {
+ previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+ mResumed = false;
+ }
+ timestampUs -= previousPausedDurationUs;
+ ALOGV("time stamp: %lld, previous paused duration: %lld",
+ timestampUs, previousPausedDurationUs);
+ if (timestampUs > maxTimestampUs) {
+ maxTimestampUs = timestampUs;
+ }
+
+ if (exceedsFileDurationLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+ break;
+ }
+ ssize_t n = write(mFd,
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+
+ hdr.data_sz += (ssize_t)buffer->range_length();
+ hdr.riff_sz = hdr.data_sz + 44 - 8;
+
+ if (n < (ssize_t)buffer->range_length()) {
+ buffer->release();
+ buffer = NULL;
+
+ break;
+ }
+
+ if (stoppedPrematurely) {
+ stoppedPrematurely = false;
+ }
+
+ buffer->release();
+ buffer = NULL;
+ }
+
+ if (stoppedPrematurely) {
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS, UNKNOWN_ERROR);
+ }
+
+ lseek(mFd, 0, SEEK_SET);
+ write(mFd, &hdr, sizeof(hdr));
+ lseek(mFd, 0, SEEK_END);
+
+ close(mFd);
+ mFd = -1;
+ mReachedEOS = true;
+ if (err == ERROR_END_OF_STREAM) {
+ return OK;
+ }
+ return err;
+}
+
+bool WAVEWriter::reachedEOS() {
+ return mReachedEOS;
+}
+
+} // namespace android
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 1422687..107c5da 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -200,6 +200,9 @@ private:
bool mWatchForAudioSeekComplete;
bool mWatchForAudioEOS;
+#ifdef QCOM_ENHANCED_AUDIO
+ static int mTunnelAliveAP;
+#endif
sp<TimedEventQueue::Event> mVideoEvent;
bool mVideoEventPending;
@@ -339,6 +342,11 @@ private:
size_t countTracks() const;
+#ifdef QCOM_ENHANCED_AUDIO
+ //Flag to check if tunnel mode audio is enabled
+ bool mIsTunnelAudio;
+#endif
+
AwesomePlayer(const AwesomePlayer &);
AwesomePlayer &operator=(const AwesomePlayer &);
};
diff --git a/media/libstagefright/include/ExtendedExtractor.h b/media/libstagefright/include/ExtendedExtractor.h
new file mode 100644
index 0000000..e7d8704
--- /dev/null
+++ b/media/libstagefright/include/ExtendedExtractor.h
@@ -0,0 +1,58 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EXTENDED_EXTRACTOR_
+#define EXTENDED_EXTRACTOR_
+
+#include <media/stagefright/DataSource.h>
+
+namespace android {
+
+class MediaExtractor;
+
+typedef MediaExtractor* (*MediaExtractorFactory)(const sp<DataSource> &source, const char* mime);
+
+static const char* MEDIA_CREATE_EXTRACTOR = "CreateExtractor";
+
+typedef bool (*ExtendedExtractorSniffers)(const sp<DataSource> &source, String8 *mimeType,
+ float *confidence,sp<AMessage> *meta);
+
+static const char* EXTENDED_EXTRACTOR_SNIFFERS = "SniffExtendedExtractor";
+
+class ExtendedExtractor
+{
+public:
+ static MediaExtractor* CreateExtractor(const sp<DataSource> &source, const char *mime);
+};
+
+bool SniffExtendedExtractor(const sp<DataSource> &source, String8 *mimeType,
+ float *confidence,sp<AMessage> *meta);
+
+} // namespace android
+
+#endif //EXTENDED_EXTRACTOR_
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1ce47a3..c7fbb09 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1,6 +1,11 @@
/*
**
** Copyright 2007, The Android Open Source Project
+** Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+**
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -100,6 +105,11 @@
#define ALOGVV(a...) do { } while(0)
#endif
+#ifdef QCOM_HARDWARE
+#define DIRECT_TRACK_EOS 1
+static const char lockName[] = "DirectTrack";
+#endif
+
namespace android {
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
@@ -220,6 +230,14 @@ out:
return rc;
}
+static uint32_t getInputChannelCount(uint32_t channels) {
+#ifdef QCOM_HARDWARE
+ // only mono or stereo and 5.1 are supported for input sources
+ return popcount((channels) & (AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_MONO | AUDIO_CHANNEL_IN_5POINT1));
+#else
+ return popcount(channels);
+#endif
+}
// ----------------------------------------------------------------------------
AudioFlinger::AudioFlinger()
@@ -241,6 +259,11 @@ void AudioFlinger::onFirstRef()
Mutex::Autolock _l(mLock);
/* TODO: move all this work into an Init() function */
+#ifdef QCOM_HARDWARE
+ mLPASessionId = -2; // -2 is invalid session ID
+ mIsEffectConfigChanged = false;
+ mLPAEffectChain = NULL;
+#endif
char val_str[PROPERTY_VALUE_MAX] = { 0 };
if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
uint32_t int_val;
@@ -546,9 +569,218 @@ Exit:
return trackHandle;
}
+#ifdef QCOM_HARDWARE
+sp<IDirectTrack> AudioFlinger::createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient *client,
+ audio_stream_type_t streamType,
+ status_t *status)
+{
+ *status = NO_ERROR;
+ status_t lStatus = NO_ERROR;
+ sp<IDirectTrack> track = NULL;
+ DirectAudioTrack* directTrack = NULL;
+ Mutex::Autolock _l(mLock);
+
+ ALOGV("createDirectTrack() sessionId: %d sampleRate %d channelMask %d",
+ *sessionId, sampleRate, channelMask);
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc == NULL) {
+ ALOGE("Error: Invalid output (%d) to create direct audio track", output);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ desc->mStreamType = streamType;
+ if (desc->flag & AUDIO_OUTPUT_FLAG_LPA) {
+ if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
+ // Check if the session ID is already associated with a track
+ uint32_t sessions = t->hasAudioSession(*sessionId);
+
+ // check if an effect with same session ID is waiting for a ssession to be created
+ ALOGV("check if an effect with same session ID is waiting for a ssession to be created");
+ if ((mLPAEffectChain == NULL) && (sessions & PlaybackThread::EFFECT_SESSION)) {
+ // Clear reference to previous effect chain if any
+ t->mLock.lock();
+ ALOGV("getting the LPA effect chain and setting LPA flag to true.");
+ mLPAEffectChain = t->getEffectChain_l(*sessionId);
+ t->mLock.unlock();
+ }
+ }
+ mLPASessionId = *sessionId;
+ if (mLPAEffectChain != NULL) {
+ mLPAEffectChain->setLPAFlag(true);
+ // For LPA, the volume will be applied in DSP. No need for volume
+ // control in the Effect chain, so setting it to unity.
+ uint32_t volume = 0x1000000; // Equals to 1.0 in 8.24 format
+ mLPAEffectChain->setVolume_l(&volume,&volume);
+ } else {
+ ALOGW("There was no effectChain created for the sessionId(%d)", mLPASessionId);
+ }
+ mLPASampleRate = sampleRate;
+ mLPANumChannels = popcount(channelMask);
+ } else {
+ if(sessionId != NULL) {
+ ALOGE("Error: Invalid sessionID (%d) for direct audio track", *sessionId);
+ }
+ }
+ }
+ mLock.unlock();
+ directTrack = new DirectAudioTrack(this, output, desc, client, desc->flag);
+ desc->trackRefPtr = dynamic_cast<void *>(directTrack);
+ mLock.lock();
+ if (directTrack != 0) {
+ track = dynamic_cast<IDirectTrack *>(directTrack);
+ AudioEventObserver* obv = dynamic_cast<AudioEventObserver *>(directTrack);
+ ALOGE("setting observer mOutputDesc track %p, obv %p", track.get(), obv);
+ desc->stream->set_observer(desc->stream, reinterpret_cast<void *>(obv));
+ } else {
+ lStatus = BAD_VALUE;
+ }
+Exit:
+ if(lStatus) {
+ if (track != NULL) {
+ track.clear();
+ }
+ *status = lStatus;
+ }
+ return track;
+}
+
+void AudioFlinger::deleteEffectSession()
+{
+ Mutex::Autolock _l(mLock);
+ ALOGV("deleteSession");
+ // -2 is invalid session ID
+ mLPASessionId = -2;
+ if (mLPAEffectChain != NULL) {
+ mLPAEffectChain->setLPAFlag(false);
+ size_t i, numEffects = mLPAEffectChain->getNumEffects();
+ for(i = 0; i < numEffects; i++) {
+ sp<EffectModule> effect = mLPAEffectChain->getEffectFromIndex_l(i);
+ effect->setInBuffer(mLPAEffectChain->inBuffer());
+ if (i == numEffects-1) {
+ effect->setOutBuffer(mLPAEffectChain->outBuffer());
+ } else {
+ effect->setOutBuffer(mLPAEffectChain->inBuffer());
+ }
+ effect->configure();
+ }
+ mLPAEffectChain.clear();
+ mLPAEffectChain = NULL;
+ }
+}
+
+// ToDo: Should we go ahead with this frameCount?
+#define DEAFULT_FRAME_COUNT 1200
+void AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer, int16_t *outBuffer, int size)
+{
+ ALOGV("applyEffectsOn: inBuf %p outBuf %p size %d token %p", inBuffer, outBuffer, size, token);
+ // This might be the first buffer to apply effects after effect config change
+ // should not skip effects processing
+ mIsEffectConfigChanged = false;
+
+ volatile size_t numEffects = 0;
+ if(mLPAEffectChain != NULL) {
+ numEffects = mLPAEffectChain->getNumEffects();
+ }
+
+ if( numEffects > 0) {
+ size_t i = 0;
+ int16_t *pIn = inBuffer;
+ int16_t *pOut = outBuffer;
+
+ int frameCount = size / (sizeof(int16_t) * mLPANumChannels);
+
+ while(frameCount > 0) {
+ if(mLPAEffectChain == NULL) {
+ ALOGV("LPA Effect Chain is removed - No effects processing !!");
+ numEffects = 0;
+ break;
+ }
+ mLPAEffectChain->lock();
+
+ numEffects = mLPAEffectChain->getNumEffects();
+ if(!numEffects) {
+ ALOGV("applyEffectsOn: All the effects are removed - nothing to process");
+ mLPAEffectChain->unlock();
+ break;
+ }
+
+ int outFrameCount = (frameCount > DEAFULT_FRAME_COUNT ? DEAFULT_FRAME_COUNT: frameCount);
+ bool isEffectEnabled = false;
+ for(i = 0; i < numEffects; i++) {
+ // If effect configuration is changed while applying effects do not process further
+ if(mIsEffectConfigChanged) {
+ mLPAEffectChain->unlock();
+ ALOGV("applyEffectsOn: mIsEffectConfigChanged is set - no further processing");
+ return;
+ }
+ sp<EffectModule> effect = mLPAEffectChain->getEffectFromIndex_l(i);
+ if(effect == NULL) {
+ ALOGE("getEffectFromIndex_l(%d) returned NULL ptr", i);
+ mLPAEffectChain->unlock();
+ return;
+ }
+ if(i == 0) {
+ // For the first set input and output buffers different
+ isEffectEnabled = effect->isProcessEnabled();
+ effect->setInBuffer(pIn);
+ effect->setOutBuffer(pOut);
+ } else {
+ // For the remaining use previous effect's output buffer as input buffer
+ effect->setInBuffer(pOut);
+ effect->setOutBuffer(pOut);
+ }
+ // true indicates that it is being applied on LPA output
+ effect->configure(true, mLPASampleRate, mLPANumChannels, outFrameCount);
+ }
+
+ if(isEffectEnabled) {
+ // Clear the output buffer
+ memset(pOut, 0, (outFrameCount * mLPANumChannels * sizeof(int16_t)));
+ } else {
+ // Copy input buffer content to the output buffer
+ memcpy(pOut, pIn, (outFrameCount * mLPANumChannels * sizeof(int16_t)));
+ }
+
+ mLPAEffectChain->process_l();
+
+ mLPAEffectChain->unlock();
+
+ // Update input and output buffer pointers
+ pIn += (outFrameCount * mLPANumChannels);
+ pOut += (outFrameCount * mLPANumChannels);
+ frameCount -= outFrameCount;
+ }
+ }
+
+ if (!numEffects) {
+ ALOGV("applyEffectsOn: There are no effects to be applied");
+ if(inBuffer != outBuffer) {
+ // No effect applied so just copy input buffer to output buffer
+ memcpy(outBuffer, inBuffer, size);
+ }
+ }
+}
+#endif
+
uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
{
Mutex::Autolock _l(mLock);
+#ifdef QCOM_HARDWARE
+ if (!mDirectAudioTracks.isEmpty()) {
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc != NULL) {
+ return desc->stream->common.get_sample_rate(&desc->stream->common);
+ }
+ }
+#endif
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
ALOGW("sampleRate() unknown thread %d", output);
@@ -560,6 +792,12 @@ uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
int AudioFlinger::channelCount(audio_io_handle_t output) const
{
Mutex::Autolock _l(mLock);
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc != NULL) {
+ return desc->stream->common.get_channels(&desc->stream->common);
+ }
+#endif
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
ALOGW("channelCount() unknown thread %d", output);
@@ -582,6 +820,12 @@ audio_format_t AudioFlinger::format(audio_io_handle_t output) const
size_t AudioFlinger::frameCount(audio_io_handle_t output) const
{
Mutex::Autolock _l(mLock);
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc != NULL) {
+ return desc->stream->common.get_buffer_size(&desc->stream->common);
+ }
+#endif
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
ALOGW("frameCount() unknown thread %d", output);
@@ -785,12 +1029,35 @@ status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
}
AutoMutex lock(mLock);
+#ifdef QCOM_HARDWARE
+ ALOGV("setStreamVolume stream %d, output %d, value %f",stream, output, value);
+ AudioSessionDescriptor *desc = NULL;
+ if (!mDirectAudioTracks.isEmpty()) {
+ desc = mDirectAudioTracks.valueFor(output);
+ if (desc != NULL) {
+ ALOGV("setStreamVolume for mAudioTracks size %d desc %p",mDirectAudioTracks.size(),desc);
+ if (desc->mStreamType == stream) {
+ mStreamTypes[stream].volume = value;
+ desc->stream->set_volume(desc->stream,
+ desc->mVolumeLeft * mStreamTypes[stream].volume,
+ desc->mVolumeRight* mStreamTypes[stream].volume);
+ return NO_ERROR;
+ }
+ }
+ }
+#endif
PlaybackThread *thread = NULL;
if (output) {
thread = checkPlaybackThread_l(output);
if (thread == NULL) {
+#ifdef QCOM_HARDWARE
+ if (desc != NULL) {
+ return NO_ERROR;
+ }
+#endif
return BAD_VALUE;
}
+
}
mStreamTypes[stream].volume = value;
@@ -917,6 +1184,18 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
return final_result;
}
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = NULL;
+ if (!mDirectAudioTracks.isEmpty()) {
+ desc = mDirectAudioTracks.valueFor(ioHandle);
+ if (desc != NULL) {
+ ALOGV("setParameters for mAudioTracks size %d desc %p",mDirectAudioTracks.size(),desc);
+ desc->stream->common.set_parameters(&desc->stream->common, keyValuePairs.string());
+ return NO_ERROR;
+ }
+ }
+#endif
+
// hold a strong ref on thread in case closeOutput() or closeInput() is called
// and the thread is exited once the lock is released
sp<ThreadBase> thread;
@@ -929,13 +1208,17 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
// indicate output device change to all input threads for pre processing
AudioParameter param = AudioParameter(keyValuePairs);
int value;
+ DefaultKeyedVector< int, sp<RecordThread> > recordThreads = mRecordThreads;
+ mLock.unlock();
if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
(value != 0)) {
- for (size_t i = 0; i < mRecordThreads.size(); i++) {
- mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
+ for (size_t i = 0; i < recordThreads.size(); i++) {
+ recordThreads.valueAt(i)->setParameters(keyValuePairs);
}
}
+ mLock.lock();
}
+ mLock.unlock();
}
if (thread != 0) {
return thread->setParameters(keyValuePairs);
@@ -1056,14 +1339,14 @@ void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
Mutex::Autolock _l(mLock);
- pid_t pid = IPCThreadState::self()->getCallingPid();
- if (mNotificationClients.indexOfKey(pid) < 0) {
+ sp<IBinder> binder = client->asBinder();
+ if (mNotificationClients.indexOfKey(binder) < 0) {
sp<NotificationClient> notificationClient = new NotificationClient(this,
client,
- pid);
- ALOGV("registerClient() client %p, pid %d", notificationClient.get(), pid);
+ binder);
+ ALOGV("registerClient() client %p, binder %d", notificationClient.get(), binder.get);
- mNotificationClients.add(pid, notificationClient);
+ mNotificationClients.add(binder, notificationClient);
sp<IBinder> binder = client->asBinder();
binder->linkToDeath(notificationClient);
@@ -1080,12 +1363,30 @@ void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
}
}
-void AudioFlinger::removeNotificationClient(pid_t pid)
+#ifdef QCOM_HARDWARE
+status_t AudioFlinger::deregisterClient(const sp<IAudioFlingerClient>& client)
{
+ ALOGV("deregisterClient() %p, tid %d, calling tid %d", client.get(), gettid(), IPCThreadState::self()->getCallingPid());
Mutex::Autolock _l(mLock);
- mNotificationClients.removeItem(pid);
+ sp<IBinder> binder = client->asBinder();
+ int index = mNotificationClients.indexOfKey(binder);
+ if (index >= 0) {
+ mNotificationClients.removeItemsAt(index);
+ return true;
+ }
+ return false;
+}
+#endif
+
+void AudioFlinger::removeNotificationClient(sp<IBinder> binder)
+{
+ Mutex::Autolock _l(mLock);
+
+ mNotificationClients.removeItem(binder);
+
+ int pid = IPCThreadState::self()->getCallingPid();
ALOGV("%d died, releasing its sessions", pid);
size_t num = mAudioSessionRefs.size();
bool removed = false;
@@ -1110,6 +1411,12 @@ void AudioFlinger::removeNotificationClient(pid_t pid)
// audioConfigChanged_l() must be called with AudioFlinger::mLock held
void AudioFlinger::audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2)
{
+#ifdef QCOM_HARDWARE
+ ALOGV("AudioFlinger::audioConfigChanged_l: event %d", event);
+ if (event == AudioSystem::EFFECT_CONFIG_CHANGED) {
+ mIsEffectConfigChanged = true;
+ }
+#endif
size_t size = mNotificationClients.size();
for (size_t i = 0; i < size; i++) {
mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioHandle,
@@ -1212,6 +1519,15 @@ status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
return status;
}
+#ifdef QCOM_HARDWARE
+void AudioFlinger::ThreadBase::effectConfigChanged() {
+ mAudioFlinger->mLock.lock();
+ ALOGV("New effect is being added to LPA chain, Notifying LPA Direct Track");
+ mAudioFlinger->audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
+ mAudioFlinger->mLock.unlock();
+}
+#endif
+
void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
{
Mutex::Autolock _l(mLock);
@@ -2660,7 +2976,10 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// only process effects if we're going to write
if (sleepTime == 0) {
for (size_t i = 0; i < effectChains.size(); i ++) {
- effectChains[i]->process_l();
+#ifdef QCOM_HARDWARE
+ if (effectChains[i] != mAudioFlinger->mLPAEffectChain)
+#endif
+ effectChains[i]->process_l();
}
}
@@ -5785,8 +6104,8 @@ void AudioFlinger::Client::releaseTimedTrack()
AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<IAudioFlingerClient>& client,
- pid_t pid)
- : mAudioFlinger(audioFlinger), mPid(pid), mAudioFlingerClient(client)
+ sp<IBinder> binder)
+ : mAudioFlinger(audioFlinger), mBinder(binder), mAudioFlingerClient(client)
{
}
@@ -5797,9 +6116,309 @@ AudioFlinger::NotificationClient::~NotificationClient()
void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
{
sp<NotificationClient> keep(this);
- mAudioFlinger->removeNotificationClient(mPid);
+ mAudioFlinger->removeNotificationClient(mBinder);
+}
+
+// ----------------------------------------------------------------------------
+#ifdef QCOM_HARDWARE
+AudioFlinger::DirectAudioTrack::DirectAudioTrack(const sp<AudioFlinger>& audioFlinger,
+ int output, AudioSessionDescriptor *outputDesc,
+ IDirectTrackClient* client, audio_output_flags_t outflag)
+ : BnDirectTrack(), mIsPaused(false), mAudioFlinger(audioFlinger), mOutput(output), mOutputDesc(outputDesc),
+ mClient(client), mEffectConfigChanged(false), mKillEffectsThread(false), mFlag(outflag)
+{
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ createEffectThread();
+
+ mAudioFlingerClient = new AudioFlingerDirectTrackClient(this);
+ mAudioFlinger->registerClient(mAudioFlingerClient);
+
+ allocateBufPool();
+ }
+ mDeathRecipient = new PMDeathRecipient(this);
+ acquireWakeLock();
+}
+
+AudioFlinger::DirectAudioTrack::~DirectAudioTrack() {
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ requestAndWaitForEffectsThreadExit();
+ mAudioFlinger->deregisterClient(mAudioFlingerClient);
+ mAudioFlinger->deleteEffectSession();
+ deallocateBufPool();
+ }
+ releaseWakeLock();
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = mPowerManager->asBinder();
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+ AudioSystem::releaseOutput(mOutput);
+}
+
+status_t AudioFlinger::DirectAudioTrack::start() {
+ if(mIsPaused) {
+ mIsPaused = false;
+ mOutputDesc->stream->start(mOutputDesc->stream);
+ }
+ mOutputDesc->mActive = true;
+ AudioSystem::startOutput(mOutput, (audio_stream_type_t)mOutputDesc->mStreamType);
+ return NO_ERROR;
+}
+
+void AudioFlinger::DirectAudioTrack::stop() {
+ mOutputDesc->mActive = false;
+ mOutputDesc->stream->stop(mOutputDesc->stream);
+ AudioSystem::stopOutput(mOutput, (audio_stream_type_t)mOutputDesc->mStreamType);
+}
+
+void AudioFlinger::DirectAudioTrack::pause() {
+ if(!mIsPaused) {
+ mIsPaused = true;
+ mOutputDesc->stream->pause(mOutputDesc->stream);
+ mOutputDesc->mActive = false;
+ AudioSystem::stopOutput(mOutput, (audio_stream_type_t)mOutputDesc->mStreamType);
+ }
+}
+
+ssize_t AudioFlinger::DirectAudioTrack::write(const void *buffer, size_t size) {
+ ALOGV("Writing to AudioSessionOut");
+ int isAvail = 0;
+ mOutputDesc->stream->is_buffer_available(mOutputDesc->stream, &isAvail);
+ if (!isAvail) {
+ return 0;
+ }
+
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ mEffectLock.lock();
+ List<BufferInfo>::iterator it = mEffectsPool.begin();
+ BufferInfo buf = *it;
+ mEffectsPool.erase(it);
+ memcpy((char *) buf.localBuf, (char *)buffer, size);
+ buf.bytesToWrite = size;
+ mEffectsPool.push_back(buf);
+ mAudioFlinger->applyEffectsOn(static_cast<void *>(this), (int16_t*)buf.localBuf,(int16_t*)buffer,(int)size);
+ mEffectLock.unlock();
+ }
+ return mOutputDesc->stream->write(mOutputDesc->stream, buffer, size);
+}
+
+void AudioFlinger::DirectAudioTrack::flush() {
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ mEffectsPool.clear();
+ mEffectsPool = mBufPool;
+ }
+ mOutputDesc->stream->flush(mOutputDesc->stream);
+}
+
+void AudioFlinger::DirectAudioTrack::mute(bool muted) {
+}
+
+void AudioFlinger::DirectAudioTrack::setVolume(float left, float right) {
+ mOutputDesc->mVolumeLeft = 1.0;
+ mOutputDesc->mVolumeRight = 1.0;
+}
+
+int64_t AudioFlinger::DirectAudioTrack::getTimeStamp() {
+ int64_t time;
+ mOutputDesc->stream->get_next_write_timestamp(mOutputDesc->stream, &time);
+ ALOGV("Timestamp %lld",time);
+ return time;
+}
+
+void AudioFlinger::DirectAudioTrack::postEOS(int64_t delayUs) {
+ ALOGV("Notify Audio Track of EOS event");
+ mClient->notify(DIRECT_TRACK_EOS);
+}
+
+void AudioFlinger::DirectAudioTrack::allocateBufPool() {
+ void *dsp_buf = NULL;
+ void *local_buf = NULL;
+
+ //1. get the ion buffer information
+ struct buf_info* buf = NULL;
+ mOutputDesc->stream->get_buffer_info(mOutputDesc->stream, &buf);
+ ALOGV("get buffer info %p",buf);
+ if (!buf) {
+ ALOGV("buffer is NULL");
+ return;
+ }
+ int nSize = buf->bufsize;
+ int bufferCount = buf->nBufs;
+
+ //2. allocate the buffer pool, allocate local buffers
+ for (int i = 0; i < bufferCount; i++) {
+ dsp_buf = (void *)buf->buffers[i];
+ local_buf = malloc(nSize);
+ memset(local_buf, 0, nSize);
+ // Store this information for internal mapping / maintanence
+ BufferInfo buf(local_buf, dsp_buf, nSize);
+ buf.bytesToWrite = 0;
+ mBufPool.push_back(buf);
+ mEffectsPool.push_back(buf);
+
+ ALOGV("The MEM that is allocated buffer is %x, size %d",(unsigned int)dsp_buf,nSize);
+ }
+ free(buf);
+}
+
+void AudioFlinger::DirectAudioTrack::deallocateBufPool() {
+
+ //1. Deallocate the local memory
+ //2. Remove all the buffers from bufpool
+ while (!mBufPool.empty()) {
+ List<BufferInfo>::iterator it = mBufPool.begin();
+ BufferInfo &memBuffer = *it;
+ // free the local buffer corresponding to mem buffer
+ if (memBuffer.localBuf) {
+ free(memBuffer.localBuf);
+ memBuffer.localBuf = NULL;
+ }
+ ALOGV("Removing from bufpool");
+ mBufPool.erase(it);
+ }
+}
+
+status_t AudioFlinger::DirectAudioTrack::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnDirectTrack::onTransact(code, data, reply, flags);
+}
+
+void *AudioFlinger::DirectAudioTrack::EffectsThreadWrapper(void *me) {
+ static_cast<DirectAudioTrack *>(me)->EffectsThreadEntry();
+ return NULL;
+}
+
+void AudioFlinger::DirectAudioTrack::EffectsThreadEntry() {
+ while(1) {
+ mEffectLock.lock();
+ if (!mEffectConfigChanged && !mKillEffectsThread) {
+ mEffectCv.wait(mEffectLock);
+ }
+
+ if(mKillEffectsThread) {
+ mEffectLock.unlock();
+ break;
+ }
+
+ if (mEffectConfigChanged) {
+ mEffectConfigChanged = false;
+ for ( List<BufferInfo>::iterator it = mEffectsPool.begin();
+ it != mEffectsPool.end(); it++) {
+ ALOGV("Apply effects on the buffer dspbuf %p, mEffectsPool.size() %d",it->dspBuf,mEffectsPool.size());
+ mAudioFlinger->applyEffectsOn(static_cast<void *>(this),
+ (int16_t *)it->localBuf,
+ (int16_t *)it->dspBuf,
+ it->bytesToWrite);
+ if (mEffectConfigChanged) {
+ break;
+ }
+ }
+
+ }
+ mEffectLock.unlock();
+ }
+ ALOGV("Effects thread is dead");
+ mEffectsThreadAlive = false;
+}
+
+void AudioFlinger::DirectAudioTrack::requestAndWaitForEffectsThreadExit() {
+ if (!mEffectsThreadAlive)
+ return;
+ mKillEffectsThread = true;
+ mEffectCv.signal();
+ pthread_join(mEffectsThread,NULL);
+ ALOGV("effects thread killed");
+}
+
+void AudioFlinger::DirectAudioTrack::createEffectThread() {
+ //Create the effects thread
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+ mEffectsThreadAlive = true;
+ ALOGV("Creating Effects Thread");
+ pthread_create(&mEffectsThread, &attr, EffectsThreadWrapper, this);
+}
+AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient::AudioFlingerDirectTrackClient(void *obj)
+{
+ ALOGV("AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient");
+ pBaseClass = (DirectAudioTrack*)obj;
+}
+
+void AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient::binderDied(const wp<IBinder>& who) {
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}
+
+void AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient
+ ::ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+ if (event == AudioSystem::EFFECT_CONFIG_CHANGED) {
+ ALOGV("Received notification for change in effect module");
+ // Seek to current media time - flush the decoded buffers with the driver
+ pBaseClass->mEffectConfigChanged = true;
+ // Signal effects thread to re-apply effects
+ ALOGV("Signalling Effects Thread");
+ pBaseClass->mEffectCv.signal();
+
+ }
+ ALOGV("ioConfigChanged Out");
+}
+
+void AudioFlinger::DirectAudioTrack::acquireWakeLock()
+{
+ Mutex::Autolock _l(pmLock);
+
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("Thread %s cannot connect to the power manager service", lockName);
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != 0 && mWakeLockToken == 0) {
+ sp<IBinder> binder = new BBinder();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16(lockName));
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
+ ALOGV("acquireWakeLock() status %d", status);
+ }
}
+void AudioFlinger::DirectAudioTrack::releaseWakeLock()
+{
+ Mutex::Autolock _l(pmLock);
+
+ if (mWakeLockToken != 0) {
+ ALOGV("releaseWakeLock()");
+ if (mPowerManager != 0) {
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ }
+ mWakeLockToken.clear();
+ }
+}
+
+void AudioFlinger::DirectAudioTrack::clearPowerManager()
+{
+ Mutex::Autolock _l(pmLock);
+ releaseWakeLock();
+ mPowerManager.clear();
+}
+
+void AudioFlinger::DirectAudioTrack::PMDeathRecipient::binderDied(const wp<IBinder>& who)
+{
+ parentClass->clearPowerManager();
+ ALOGW("power manager service died !!!");
+}
+#endif
+
// ----------------------------------------------------------------------------
AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
@@ -6002,7 +6621,7 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
// mRsmpInIndex and mInputBytes set by readInputParameters()
- mReqChannelCount(popcount(channelMask)),
+ mReqChannelCount(getInputChannelCount(channelMask)),
mReqSampleRate(sampleRate)
// mBytesRead is only meaningful while active, and so is cleared in start()
// (but might be better to also clear here for dump?)
@@ -6617,7 +7236,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l()
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
- reqChannelCount = popcount(value);
+ reqChannelCount = getInputChannelCount(value);
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
@@ -6678,7 +7297,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l()
reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) &&
- popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
+ getInputChannelCount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
(reqChannelCount <= FCC_2)) {
status = NO_ERROR;
}
@@ -6749,7 +7368,7 @@ void AudioFlinger::RecordThread::readInputParameters()
mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
- mChannelCount = (uint16_t)popcount(mChannelMask);
+ mChannelCount = (uint16_t)getInputChannelCount(mChannelMask);
mFormat = mInput->stream->common.get_format(&mInput->stream->common);
mFrameSize = audio_stream_frame_size(&mInput->stream->common);
mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
@@ -7023,7 +7642,18 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
if (status == NO_ERROR && outStream != NULL) {
AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
-
+#ifdef QCOM_HARDWARE
+ if (flags & AUDIO_OUTPUT_FLAG_LPA || flags & AUDIO_OUTPUT_FLAG_TUNNEL ) {
+ AudioSessionDescriptor *desc = new AudioSessionDescriptor(hwDevHal, outStream, flags);
+ desc->mActive = true;
+ //TODO: no stream type
+ //desc->mStreamType = streamType;
+ desc->mVolumeLeft = 1.0;
+ desc->mVolumeRight = 1.0;
+ desc->device = *pDevices;
+ mDirectAudioTracks.add(id, desc);
+ } else
+#endif
if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
(config.format != AUDIO_FORMAT_PCM_16_BIT) ||
(config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
@@ -7033,16 +7663,30 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
thread = new MixerThread(this, output, id, *pDevices);
ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
}
- mPlaybackThreads.add(id, thread);
+#ifdef QCOM_HARDWARE
+ if (thread != NULL)
+#endif
+ mPlaybackThreads.add(id, thread);
if (pSamplingRate != NULL) *pSamplingRate = config.sample_rate;
if (pFormat != NULL) *pFormat = config.format;
if (pChannelMask != NULL) *pChannelMask = config.channel_mask;
- if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
-
- // notify client processes of the new output creation
- thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
-
+#ifdef QCOM_HARDWARE
+ if (thread != NULL) {
+#endif
+ if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
+ // notify client processes of the new output creation
+ thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
+#ifdef QCOM_HARDWARE
+ }
+ else {
+ *pLatencyMs = 0;
+ if ((flags & AUDIO_OUTPUT_FLAG_LPA) || (flags & AUDIO_OUTPUT_FLAG_TUNNEL)) {
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(id);
+ *pLatencyMs = desc->stream->get_latency(desc->stream);
+ }
+ }
+#endif
// the first primary output opened designates the primary hw device
if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
ALOGI("Using module %d has the primary audio interface", module);
@@ -7089,6 +7733,21 @@ status_t AudioFlinger::closeOutput_nonvirtual(audio_io_handle_t output)
{
// keep strong reference on the playback thread so that
// it is not destroyed while exit() is executed
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if (desc) {
+ ALOGV("Closing DirectTrack output %d", output);
+ desc->mActive = false;
+ desc->stream->common.standby(&desc->stream->common);
+ desc->hwDev->close_output_stream(desc->hwDev, desc->stream);
+ desc->trackRefPtr = NULL;
+ mDirectAudioTracks.removeItem(output);
+ audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
+ delete desc;
+ return NO_ERROR;
+ }
+#endif
+
sp<PlaybackThread> thread;
{
Mutex::Autolock _l(mLock);
@@ -7210,7 +7869,7 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
if (status == BAD_VALUE &&
reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
(config.sample_rate <= 2 * reqSamplingRate) &&
- (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
+ (getInputChannelCount(config.channel_mask) <= FCC_2) && (getInputChannelCount(reqChannels) <= FCC_2)) {
ALOGV("openInput() reopening with proposed sampling rate and channel mask");
inStream = NULL;
#ifndef ICS_AUDIO_BLOB
@@ -7833,6 +8492,21 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
addEffectChain_l(chain);
chain->setStrategy(getStrategyForSession_l(sessionId));
chainCreated = true;
+#ifdef QCOM_HARDWARE
+ if(sessionId == mAudioFlinger->mLPASessionId) {
+ // Clear reference to previous effect chain if any
+ if(mAudioFlinger->mLPAEffectChain.get()) {
+ mAudioFlinger->mLPAEffectChain.clear();
+ }
+ ALOGV("New EffectChain is created for LPA session ID %d", sessionId);
+ mAudioFlinger->mLPAEffectChain = chain;
+ chain->setLPAFlag(true);
+ // For LPA, the volume will be applied in DSP. No need for volume
+ // control in the Effect chain, so setting it to unity.
+ uint32_t volume = 0x1000000; // Equals to 1.0 in 8.24 format
+ chain->setVolume_l(&volume,&volume);
+ }
+#endif
} else {
effect = chain->getEffectFromDesc_l(desc);
}
@@ -7863,6 +8537,11 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
effect->setDevice(mInDevice);
effect->setMode(mAudioFlinger->getMode());
effect->setAudioSource(mAudioSource);
+#ifdef QCOM_HARDWARE
+ if(chain == mAudioFlinger->mLPAEffectChain) {
+ effect->setLPAFlag(true);
+ }
+#endif
}
// create effect handle and connect it to effect module
handle = new EffectHandle(effect, client, effectClient, priority);
@@ -7969,7 +8648,10 @@ void AudioFlinger::ThreadBase::lockEffectChains_l(
{
effectChains = mEffectChains;
for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->lock();
+#ifdef QCOM_HARDWARE
+ if (mEffectChains[i] != mAudioFlinger->mLPAEffectChain)
+#endif
+ mEffectChains[i]->lock();
}
}
@@ -7977,7 +8659,10 @@ void AudioFlinger::ThreadBase::unlockEffectChains(
const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
{
for (size_t i = 0; i < effectChains.size(); i++) {
- effectChains[i]->unlock();
+#ifdef QCOM_HARDWARE
+ if (mEffectChains[i] != mAudioFlinger->mLPAEffectChain)
+#endif
+ effectChains[i]->unlock();
}
}
@@ -8209,6 +8894,9 @@ AudioFlinger::EffectModule::EffectModule(ThreadBase *thread,
// mMaxDisableWaitCnt is set by configure() and not used before then
// mDisableWaitCnt is set by process() and updateState() and not used before then
mSuspended(false)
+#ifdef QCOM_HARDWARE
+ ,mIsForLPA(false)
+#endif
{
ALOGV("Constructor %p", this);
int lStatus;
@@ -8334,6 +9022,9 @@ AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l()
size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast)
{
+#ifdef QCOM_HARDWARE
+ setEnabled(false);
+#endif
ALOGV("disconnect() %p handle %p", this, handle);
// keep a strong reference on this EffectModule to avoid calling the
// destructor before we exit
@@ -8440,8 +9131,19 @@ void AudioFlinger::EffectModule::reset_l()
(*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
}
+#ifndef QCOM_HARDWARE
status_t AudioFlinger::EffectModule::configure()
{
+#else
+status_t AudioFlinger::EffectModule::configure(bool isForLPA, int sampleRate, int channelCount, int frameCount)
+{
+ uint32_t channels;
+
+ // Acquire lock here to make sure that any other thread does not delete
+ // the effect handle and release the effect module.
+ Mutex::Autolock _l(mLock);
+#endif
+
if (mEffectInterface == NULL) {
return NO_INIT;
}
@@ -8453,6 +9155,23 @@ status_t AudioFlinger::EffectModule::configure()
// TODO: handle configuration of effects replacing track process
audio_channel_mask_t channelMask = thread->channelMask();
+#ifdef QCOM_HARDWARE
+ mIsForLPA = isForLPA;
+ if(isForLPA) {
+ if (channelCount == 1) {
+ channels = AUDIO_CHANNEL_OUT_MONO;
+ } else {
+ channels = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ ALOGV("%s: LPA ON - channels %d", __func__, channels);
+ } else {
+ if (thread->channelCount() == 1) {
+ channels = AUDIO_CHANNEL_OUT_MONO;
+ } else {
+ channels = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ }
+#endif
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
@@ -8462,7 +9181,13 @@ status_t AudioFlinger::EffectModule::configure()
mConfig.outputCfg.channels = channelMask;
mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
- mConfig.inputCfg.samplingRate = thread->sampleRate();
+#ifdef QCOM_HARDWARE
+ if(isForLPA){
+ mConfig.inputCfg.samplingRate = sampleRate;
+ ALOGV("%s: LPA ON - sampleRate %d", __func__, sampleRate);
+ } else
+#endif
+ mConfig.inputCfg.samplingRate = thread->sampleRate();
mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
mConfig.inputCfg.bufferProvider.cookie = NULL;
mConfig.inputCfg.bufferProvider.getBuffer = NULL;
@@ -8487,7 +9212,13 @@ status_t AudioFlinger::EffectModule::configure()
}
mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
- mConfig.inputCfg.buffer.frameCount = thread->frameCount();
+#ifdef QCOM_HARDWARE
+ if(isForLPA) {
+ mConfig.inputCfg.buffer.frameCount = frameCount;
+ ALOGV("%s: LPA ON - frameCount %d", __func__, frameCount);
+ } else
+#endif
+ mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
ALOGV("configure() %p thread %p buffer %p framecount %d",
@@ -8667,10 +9398,15 @@ status_t AudioFlinger::EffectModule::setEnabled(bool enabled)
// must be called with EffectModule::mLock held
status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
{
-
+#ifdef QCOM_HARDWARE
+ bool effectStateChanged = false;
+#endif
ALOGV("setEnabled %p enabled %d", this, enabled);
if (enabled != isEnabled()) {
+#ifdef QCOM_HARDWARE
+ effectStateChanged = true;
+#endif
status_t status = AudioSystem::setEffectEnabled(mId, enabled);
if (enabled && status != NO_ERROR) {
return status;
@@ -8708,6 +9444,16 @@ status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
}
}
}
+#ifdef QCOM_HARDWARE
+ /*
+ Send notification event to LPA Player when an effect for
+ LPA output is enabled or disabled.
+ */
+ if (effectStateChanged && mIsForLPA) {
+ sp<ThreadBase> thread = mThread.promote();
+ thread->effectConfigChanged();
+ }
+#endif
return NO_ERROR;
}
@@ -9157,6 +9903,18 @@ status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode,
return disable();
}
+#ifdef QCOM_HARDWARE
+ ALOGV("EffectHandle::command: isOnLPA %d", mEffect->isOnLPA());
+ if(mEffect->isOnLPA() &&
+ ((cmdCode == EFFECT_CMD_SET_PARAM) || (cmdCode == EFFECT_CMD_SET_PARAM_DEFERRED) ||
+ (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) || (cmdCode == EFFECT_CMD_SET_DEVICE) ||
+ (cmdCode == EFFECT_CMD_SET_VOLUME) || (cmdCode == EFFECT_CMD_SET_AUDIO_MODE)) ) {
+ // Notify Direct track for the change in Effect module
+ // TODO: check if it is required to send mLPAHandle
+ ALOGV("Notifying Direct Track for the change in effect config");
+ mClient->audioFlinger()->audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
+ }
+#endif
return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
}
@@ -9225,6 +9983,9 @@ AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
: mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
+#ifdef QCOM_HARDWARE
+ ,mIsForLPATrack(false)
+#endif
{
mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
if (thread == NULL) {
@@ -9269,6 +10030,20 @@ sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int
return 0;
}
+#ifdef QCOM_HARDWARE
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromIndex_l(int idx)
+{
+ sp<EffectModule> effect = NULL;
+ if(idx < 0 || idx >= mEffects.size()) {
+ ALOGE("EffectChain::getEffectFromIndex_l: invalid index %d", idx);
+ }
+ if(mEffects.size() > 0){
+ effect = mEffects[idx];
+ }
+ return effect;
+}
+#endif
+
// getEffectFromType_l() must be called with ThreadBase::mLock held
sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l(
const effect_uuid_t *type)
@@ -9335,7 +10110,11 @@ void AudioFlinger::EffectChain::process_l()
}
size_t size = mEffects.size();
+#ifdef QCOM_HARDWARE
+ if (doProcess || isForLPATrack()) {
+#else
if (doProcess) {
+#endif
for (size_t i = 0; i < size; i++) {
mEffects[i]->process();
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 49e2b2c..d2daae7 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -1,6 +1,9 @@
/*
**
** Copyright 2007, The Android Open Source Project
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -26,6 +29,10 @@
#include <media/IAudioFlinger.h>
#include <media/IAudioFlingerClient.h>
+#ifdef QCOM_HARDWARE
+#include <media/IDirectTrack.h>
+#include <media/IDirectTrackClient.h>
+#endif
#include <media/IAudioTrack.h>
#include <media/IAudioRecord.h>
#include <media/AudioSystem.h>
@@ -52,6 +59,7 @@
#include "AudioWatchdog.h"
#include <powermanager/IPowerManager.h>
+#include <utils/List.h>
namespace android {
@@ -99,6 +107,19 @@ public:
pid_t tid,
int *sessionId,
status_t *status);
+#ifdef QCOM_HARDWARE
+ virtual sp<IDirectTrack> createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient* client,
+ audio_stream_type_t streamType,
+ status_t *status);
+
+ virtual void deleteEffectSession();
+#endif
virtual sp<IAudioRecord> openRecord(
pid_t pid,
@@ -141,7 +162,9 @@ public:
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
virtual void registerClient(const sp<IAudioFlingerClient>& client);
-
+#ifdef QCOM_HARDWARE
+ virtual status_t deregisterClient(const sp<IAudioFlingerClient>& client);
+#endif
virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask) const;
@@ -216,6 +239,13 @@ public:
Parcel* reply,
uint32_t flags);
+#ifdef QCOM_HARDWARE
+ void applyEffectsOn(void *token,
+ int16_t *buffer1,
+ int16_t *buffer2,
+ int size);
+#endif
+
// end of IAudioFlinger interface
class SyncEvent;
@@ -314,7 +344,7 @@ private:
public:
NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<IAudioFlingerClient>& client,
- pid_t pid);
+ sp<IBinder> binder);
virtual ~NotificationClient();
sp<IAudioFlingerClient> audioFlingerClient() const { return mAudioFlingerClient; }
@@ -327,7 +357,7 @@ private:
NotificationClient& operator = (const NotificationClient&);
const sp<AudioFlinger> mAudioFlinger;
- const pid_t mPid;
+ sp<IBinder> mBinder;
const sp<IAudioFlingerClient> mAudioFlingerClient;
};
@@ -343,6 +373,9 @@ private:
class EffectModule;
class EffectHandle;
class EffectChain;
+#ifdef QCOM_HARDWARE
+ struct AudioSessionDescriptor;
+#endif
struct AudioStreamOut;
struct AudioStreamIn;
@@ -570,6 +603,9 @@ private:
virtual status_t setParameters(const String8& keyValuePairs);
virtual String8 getParameters(const String8& keys) = 0;
virtual void audioConfigChanged_l(int event, int param = 0) = 0;
+#ifdef QCOM_HARDWARE
+ void effectConfigChanged();
+#endif
void sendIoConfigEvent(int event, int param = 0);
void sendIoConfigEvent_l(int event, int param = 0);
void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
@@ -1400,6 +1436,115 @@ private:
sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId);
// server side of the client's IAudioTrack
+#ifdef QCOM_HARDWARE
+ class DirectAudioTrack : public android::BnDirectTrack,
+ public AudioEventObserver
+ {
+ public:
+ DirectAudioTrack(const sp<AudioFlinger>& audioFlinger,
+ int output, AudioSessionDescriptor *outputDesc,
+ IDirectTrackClient* client, audio_output_flags_t outflag);
+ virtual ~DirectAudioTrack();
+ virtual status_t start();
+ virtual void stop();
+ virtual void flush();
+ virtual void mute(bool);
+ virtual void pause();
+ virtual ssize_t write(const void *buffer, size_t bytes);
+ virtual void setVolume(float left, float right);
+ virtual int64_t getTimeStamp();
+ virtual void postEOS(int64_t delayUs);
+
+ virtual status_t onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+ private:
+
+ IDirectTrackClient* mClient;
+ AudioSessionDescriptor *mOutputDesc;
+ int mOutput;
+ bool mIsPaused;
+ audio_output_flags_t mFlag;
+
+ class BufferInfo {
+ public:
+ BufferInfo(void *buf1, void *buf2, int32_t nSize) :
+ localBuf(buf1), dspBuf(buf2), memBufsize(nSize)
+ {}
+
+ void *localBuf;
+ void *dspBuf;
+ uint32_t memBufsize;
+ uint32_t bytesToWrite;
+ };
+ List<BufferInfo> mBufPool;
+ List<BufferInfo> mEffectsPool;
+
+ void allocateBufPool();
+ void deallocateBufPool();
+
+ //******Effects*************
+ static void *EffectsThreadWrapper(void *me);
+ void EffectsThreadEntry();
+ // make sure the Effects thread also exited
+ void requestAndWaitForEffectsThreadExit();
+ void createEffectThread();
+ Condition mEffectCv;
+ Mutex mEffectLock;
+ pthread_t mEffectsThread;
+ bool mKillEffectsThread;
+ bool mEffectsThreadAlive;
+ bool mEffectConfigChanged;
+
+ //Structure to recieve the Effect notification from the flinger.
+ class AudioFlingerDirectTrackClient: public IBinder::DeathRecipient, public BnAudioFlingerClient {
+ public:
+ AudioFlingerDirectTrackClient(void *obj);
+
+ DirectAudioTrack *pBaseClass;
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+
+ friend class DirectAudioTrack;
+ };
+ // helper function to obtain AudioFlinger service handle
+ sp<AudioFlinger> mAudioFlinger;
+ sp<AudioFlingerDirectTrackClient> mAudioFlingerClient;
+
+ void clearPowerManager();
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(void *obj){parentClass = (DirectAudioTrack *)obj;}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ DirectAudioTrack *parentClass;
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ friend class DirectAudioTrack;
+ };
+
+ friend class PMDeathRecipient;
+
+ Mutex pmLock;
+ void acquireWakeLock();
+ void releaseWakeLock();
+
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ sp<PMDeathRecipient> mDeathRecipient;
+ };
+#endif
+
class TrackHandle : public android::BnAudioTrack {
public:
TrackHandle(const sp<PlaybackThread::Track>& track);
@@ -1424,7 +1569,7 @@ private:
};
void removeClient_l(pid_t pid);
- void removeNotificationClient(pid_t pid);
+ void removeNotificationClient(sp<IBinder> binder);
// record thread
@@ -1636,7 +1781,14 @@ private:
void *pReplyData);
void reset_l();
+#ifdef QCOM_HARDWARE
+ status_t configure(bool isForLPA = false,
+ int sampleRate = 0,
+ int channelCount = 0,
+ int frameCount = 0);
+#else
status_t configure();
+#endif
status_t init();
effect_state state() const {
return mState;
@@ -1683,7 +1835,10 @@ private:
bool purgeHandles();
void lock() { mLock.lock(); }
void unlock() { mLock.unlock(); }
-
+#ifdef QCOM_HARDWARE
+ bool isOnLPA() { return mIsForLPA;}
+ void setLPAFlag(bool isForLPA) {mIsForLPA = isForLPA; }
+#endif
void dump(int fd, const Vector<String16>& args);
protected:
@@ -1715,6 +1870,9 @@ mutable Mutex mLock; // mutex for process, commands and handl
// sending disable command.
uint32_t mDisableWaitCnt; // current process() calls count during disable period.
bool mSuspended; // effect is suspended: temporarily disabled by framework
+#ifdef QCOM_HARDWARE
+ bool mIsForLPA;
+#endif
};
// The EffectHandle class implements the IEffect interface. It provides resources
@@ -1823,12 +1981,18 @@ mutable Mutex mLock; // mutex for process, commands and handl
status_t addEffect_l(const sp<EffectModule>& handle);
size_t removeEffect_l(const sp<EffectModule>& handle);
+#ifdef QCOM_HARDWARE
+ size_t getNumEffects() { return mEffects.size(); }
+#endif
int sessionId() const { return mSessionId; }
void setSessionId(int sessionId) { mSessionId = sessionId; }
sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
sp<EffectModule> getEffectFromId_l(int id);
+#ifdef QCOM_HARDWARE
+ sp<EffectModule> getEffectFromIndex_l(int idx);
+#endif
sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type);
bool setVolume_l(uint32_t *left, uint32_t *right);
void setDevice_l(audio_devices_t device);
@@ -1874,6 +2038,10 @@ mutable Mutex mLock; // mutex for process, commands and handl
void clearInputBuffer();
void dump(int fd, const Vector<String16>& args);
+#ifdef QCOM_HARDWARE
+ bool isForLPATrack() {return mIsForLPATrack; }
+ void setLPAFlag(bool flag) {mIsForLPATrack = flag;}
+#endif
protected:
friend class AudioFlinger; // for mThread, mEffects
@@ -1922,6 +2090,9 @@ mutable Mutex mLock; // mutex for process, commands and handl
uint32_t mNewLeftVolume; // new volume on left channel
uint32_t mNewRightVolume; // new volume on right channel
uint32_t mStrategy; // strategy for this effect chain
+#ifdef QCOM_HARDWARE
+ bool mIsForLPATrack;
+#endif
// mSuspendedEffects lists all effects currently suspended in the chain.
// Use effect type UUID timelow field as key. There is no real risk of identical
// timeLow fields among effect type UUIDs.
@@ -1983,7 +2154,21 @@ mutable Mutex mLock; // mutex for process, commands and handl
AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in) :
audioHwDev(dev), stream(in) {}
};
-
+#ifdef QCOM_HARDWARE
+ struct AudioSessionDescriptor {
+ bool mActive;
+ int mStreamType;
+ float mVolumeLeft;
+ float mVolumeRight;
+ audio_hw_device_t *hwDev;
+ audio_stream_out_t *stream;
+ audio_output_flags_t flag;
+ void *trackRefPtr;
+ audio_devices_t device;
+ AudioSessionDescriptor(audio_hw_device_t *dev, audio_stream_out_t *out, audio_output_flags_t outflag) :
+ hwDev(dev), stream(out), flag(outflag) {}
+ };
+#endif
// for mAudioSessionRefs only
struct AudioSessionRef {
AudioSessionRef(int sessionid, pid_t pid) :
@@ -2043,14 +2228,25 @@ mutable Mutex mLock; // mutex for process, commands and handl
DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> > mRecordThreads;
- DefaultKeyedVector< pid_t, sp<NotificationClient> > mNotificationClients;
+ DefaultKeyedVector< sp<IBinder>, sp<NotificationClient> > mNotificationClients;
volatile int32_t mNextUniqueId; // updated by android_atomic_inc
audio_mode_t mMode;
bool mBtNrecIsOff;
-
+#ifdef QCOM_HARDWARE
+ DefaultKeyedVector<audio_io_handle_t, AudioSessionDescriptor *> mDirectAudioTracks;
+#endif
// protected by mLock
+#ifdef QCOM_HARDWARE
+ volatile bool mIsEffectConfigChanged;
+#endif
Vector<AudioSessionRef*> mAudioSessionRefs;
-
+#ifdef QCOM_HARDWARE
+ sp<EffectChain> mLPAEffectChain;
+ int mLPASessionId;
+ int mLPASampleRate;
+ int mLPANumChannels;
+ volatile bool mAllChainsLocked;
+#endif
float masterVolume_l() const;
bool masterMute_l() const;
audio_module_handle_t loadHwModule_l(const char *name);