summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKrishnankutty Kolathappilly <kkolat@codeaurora.org>2012-08-22 19:24:48 -0700
committerSteve Kondik <shade@chemlab.org>2013-06-18 19:17:54 -0700
commit1216ed8d00080619bb04c398bb0072945c58511b (patch)
tree82b758f07572e1cd2c4af8b50fecd777937fbe1f
parenteaa35b4fdd81af7dd6ad35a10d9eb27ff9758739 (diff)
downloadframeworks_av-1216ed8d00080619bb04c398bb0072945c58511b.zip
frameworks_av-1216ed8d00080619bb04c398bb0072945c58511b.tar.gz
frameworks_av-1216ed8d00080619bb04c398bb0072945c58511b.tar.bz2
Squashed commit of A/V changes from CodeAurora
* Brings us current with AU_LINUX_ANDROID_JB_2.5.04.02.02.040.367 Camera: Fix deadlock due to mLock in pcb and takepicture In non-zsl case of takepicture, we do streamoff for preview stream which is waiting on preview callback thread to exit. By that time the lock has already been acquired by takePicture. So preivew callback will not exit until it acquires lock and takePicture cannot continue until PCB call back is returned. Fix: Avoid the mLock at services when both Preview cb & Compressed cb are enabled. Change-Id: I6c264928bf1540c7b51f1add65f9c3e968506e15 CRs-fixed: 479419 audioflinger: Fix the LPA-AudioEffects crash issue - Issue:crash is observed during LPA playback on enabling effects followed by plug-out->plug-in of wired headset - Rootcause: while deleteing the effectchain in deleteEffect EffctChain is being unlocked after clearing the chain which leads to accessing the lock which might already deleted. - Fix: first unlock the effectChain and then call clear CRs-Fixed: 491774 Change-Id: I518ff086c5ad71486cd29142563145137ebc15b6 libstagefright: Fix for crash in sound recorder during device switch -Crash seen in sound recorder during frequent insertion and removal of wired headset -During device switch some time Codec's input buffers are too small to accomodate buffer read from source. Omx codec doesn't read the fix size buffer from source, during device switch scenario sometime buffer read from source exceeds input buffer size so it goes in error state which leads to crash. -Increasing the input buffer size fix this issue Change-Id: Id15378670880d0c3c0bd4408841b28be963549a0 CRs-Fixed: 488449 libstagefright: Fix for FPS drop issue during A-V playback. Issues: -The AAC decoder was not updating the timestamp when EOS is reached. -Logic to smoothen the real time update in AudioPlayer uses system time. This introduces corrupt timestamp during EOS. Fix: -Update the timestamp in AAC decoder when EOS is reached. -Extrapolate realtime using system time in AudioPlayer when EOS is reached. Cap the value to realtime if extrapolated time becomes greater than realtime. CRs-Fixed: 384183 Change-Id: Ice54501436431d2527fcd3d710d65d9732fcffdd libstagefright: Reset buffer size value with SurfaceTexture - OMXCodec explicitly sets the decoder output buffer size using the native window perform API. (to accomodate extra-data) - This size is reset only when the SurfaceTexture is destroyed. - Unless reset, this size will be assumed for all output buffers if the SurfaceTexture is re-used. CRs-Fixed: 337660, 432309 Change-Id: I28aed12ad02adeac61caffbb00e3082640a5f6d4 audio: Add support for tunnel mode recording - Add support for tunnel mode recording. Change-Id: I95cdfff729affd784141487521c9f2f714221d11 audio: Add support for non-pcm VOIP vocoders - Add support for non-pcm VOIP vocoders - non-pcm vocoders use AUDIO_SOURCE_VOICE_COMMUNICATION as inputSource. Add check to verify inputSource and then configure framecount accordingly Change-Id: Ia38da4f6ba0ee40c794d3c97325327cdb7dcb32a CRs-Fixed: 467850 frameworks/av: Add metadata mode changes to LPAPlayer -Seek to EOS was causing playback to hang for 3 seconds before switching to the next clip. -This is because the lpa driver works on period size. Partial buffers are not handled. -Add support for metadata mode changes to LPAPlayer to support partial frames. CRs-Fixed: 458904 Change-Id: I8673756b54ae7bca18855d326c85ae1064652514 libstagefright: Add support for WMA in ACodec - WMA support is not there in ACodec - In the case of wma format, since not getting the complete information of wma version so instead of allocating the component in onAllocateComponent function it will create in onConfigureCompoenent function. bitspersample is find as "bsps" from AMessage while configuring the WMA10PRO and WMALOSSLESS format CRs-Fixed: 453951 Change-Id: I98baa701dbf8a5c012f4be5e83831c0be2111dcc libstagefright: Flush the pending buffers when EOS is received For the use case where the first frame in the buffer is EOS, decode the aac config frame buffer to update the sample rate and channel mode and flush out the buffer. Change-Id: I0354802cdbf61ac1ba0fecbbdf616705806b0f4a CRs-Fixed: 459334 audio: Fix The Linux Foundation copyright - Fix copyright format based on The Linux Foundation copyright template Change-Id: I100a5c86302d1a1a3d79543d95e242734daae746 media, audioflinger: check for divide by zero possibilities and err When output stream is not available to audioflinger due to any reason , sampleRate and frameCount have zero values when trying to create new Audiotrack. This might result in divide by 0 situation. Change-Id: Ic13cb51facb8497e68ab596abb027b44f496b907 CRs-Fixed: 478480 framewroks/av:Fix ANR at the end of video recording - While doing video recording, when the recording ends ANR observed while doing stress test for many hours - When the recording is stopped, audio HAL receives error from driver and audio HAL propagates this error to AudioFlinger. But AudioFlinger is not sending error status to audio source to stop recording. Because of this audiorecord thread keeps on waiting for buffers which is resulting in ANR. - To avoid indefinite wait, a timeout of 1 sec is set for buffer in audioSource and after timeout, -ETIMEDOUT is returned to recorder thread. CRs-Fixed: 479968 Change-Id: I91aba6922086e711992d9d991dea9c35d33eaee9 audioflinger: Integrate SRS TruMedia Change-Id: If61ae91556120ddd5f5ebcc6dbbfe6583c7df67d audioflinger: Fix apply SRS effects if tones diabled in tunnel mode For the use case of SRS post processing in Tunnel mode, the API's of SRS are called only from write. With the huge buffering for tunnel mode, once EOS is received there would not be further write. With system tone enabled, the SRS API's are called during the check for Parameters change through normal mixer thread. With system tones disabled, SRS will not be applied after EOS as no write and mixer thread would not be active. Fix the issue by adding the Effects Thread for SRS in Tunnel mode. Fix the compilation issue with ALOGV messages enabled Change-Id: Ic7e62894840f786119dfe8ae471c5d24812917d7 audioflinger: Enhance LPA-effect logic to handle rapid config. -Issue:Rapid Config events cause pops/glitches, raw data playback. -Rootcause1:Raw data leakage to DSP: applyEffectsOn() applies effects chunk by chunk in a loop, if effects change during this time the loop exits and this results in creation of a buffer in which part of it is effects processed and rest raw, this causes raw data to leak to DSP. -RootCause2:Effectsthread directly works on the DSP buffers, while DSP is rendering from there, so that effect application is instantaneous and for this it gives the DSP buffers as output to effects chain, this means that all the effects in the chain update the DSP buffers one after the other, this can create unpredictable rendering patterns. RootCause1 and 2 combined seem to fragment memory with parts of it with effects and parts with raw data etc. -Fix1:Dont update DSP mem unless the effects are applied completely on a buffer. -Fix2:Effectschain will work on a temp scrath buffer instead of DSP mem and when effects are applied completely on this scrath buffer, memcpy this to DSP mem with this DSP mem is updated in one shot. -Remove repetetive logs which clutter the logcat if msgs are enabled in audioflinger. Change-Id: I9051e7b8531aa5c8cb3dcfafe0be3136a2cf0f9d CRs-Fixed: 463880 frameworks/av: Update framecount and buffersize values -framecount should be calculated based on mMaxBufferSize returned from HAL -update the buffersize with the value returned from HAL CRs-Fixed: 482744 Change-Id: I90dd9c3ebbbc8a9f1f2f92c5347ae9cb01719e13 audioflinger: Fix the LPA-AudioEffects dead lock issue. - Issue:Deadlock occurs when the LPA clips are subjected to rapid next from BT device and simultaneously on/off the audio effects. - Rootcause:some times flinger thread processing LPAPlayer/directtrack next deadlocks with the thread working on effect configuration as both of them contend for the audioflinger::mlock and effectmodule::mlock. - Fix1:AudioFlinger::deleteEffectSession() not to acquire audioflinger:mLock instead take the mLPAEffectChain.mlock. - Fix2:ThreadBase::effectConfigChanged() not to acquire audioflinger::mlock. Change-Id: I056c8297802f81644fa1371836db42bdbd3825fd CRs-Fixed: 477511 libstagefright: Add support for High Frame Rate Encoding - Based on kkeyhfr key value from meta data, add support in OMXCodec and MPEG4Writer for HFR mode - Assume normal mode recording if kKeyHfr is absent - Increase bit rate for high frame rate (HFR) recording feature to reflect the corresponding increase in frame rate Change-Id: I0a69f8d9322a768677781d08dd910dc5772c5292 libstagefright: Support some userdefine properties - support property to disable audio - support property to change recorder profile mode - support b frame encoding Change-Id: I175decec83f6027cbd7988caf680f7fec2836f83 CRs-Fixed: 443327 libstagefright: Add support for H/W AAC decoder - Currently, only software AAC decoding is supported. - Add support for H/W AAC decoding by including it in the list of available decoders and use it for decoding only if the property 'media.aaccodectype' is set to 0. Change-Id: I4bb9df1bd10bd8ee91e63dadd6c473fc4e29813a CRs-Fixed: 449145 libstagefright: Move checks for creating new extractor to ExtendedExtractor - Move all the checks and creation of the extended extractor into ExtendedExtractor. - Restrict creation of new extractor to the following conditions o default extractor is NULL o default extractor says the content is video only or has an unrecognized audio stream o the audio stream is a amr-wb (plus). - This change is being added to avoid unnecessary creation of two extractors thereby improving the startup latency. CRs-Fixed: 462087 Change-Id: Ia87eca73c4f81d37697fa85fd4f7c8cc8d406104 [StageFright] Enable 4 channel support This patches enables 4 channel WAV audio support and fixes invalid data size in WAV header field if it exceeds the actual source size. This patch is needed to support WebAudio in WebKit as some of the chrome demos use 4 channel WAV audio and bogus header information. Change-Id: I307026107ab4e4342b1c0d7bb64761a416fb2c65 audioflinger: Fix crash on LPA shutdown * Decrement the refcount after unlocking the mutex Change-Id: Ic3210700e0aaf5e8df78f85f501621a455058e24 libstagefright: Accept vendor specific NV12 colorformat from component - Accept OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar32m color format which is NV12 + 32 aligned stride and slice. - This is different from vanilla NV12 which is 16 aligned. Change-Id: I6de2ec3a78215dbcc28a6006b746e3e0afe69c3c libstagefright: various fixes for avc_utils - skip seq_scaling_matrix_present_flag assertion if checking for interlaced property. - correct interlace check to outside of if-block Change-Id: Ia5854110feb1c56ddc86b312d2ba2dbb73d37804 CRs-Fixed: 445527, 445692 libstagefright: print stats at end of playback - prints statistics before reset at the end of playback onto logcat - print statistics after each pause and seek Change-Id: I68edcc3153a04209e7382e4d3fba0bf734f3e33f CRs-Fixed: 457926, 447109 frameworks/base : Fix to play a specific Mp4 clip due to SYNCH_LOST_ERROR. -Unable to play a Specific Mp4 clip. -Mp3 playback is stopped if the Decoder errors out with SYNCH_LOST_ERROR. -Ignore the frame with SYNCH_LOST_ERROR and play silence instead. Change-Id: I6b94a83cf89e8bc6792d8ee3804042d629aa505b Add checks before removing an active buffer in OMXNodeInstance With this change, OMXNodeInstance will remove a buffer from it's active list only if OMX_FreeBuffer returns successfully. Change-Id: I685b39ac7ba762a2fc1b64d7f6c1efd391513598 libstagefright: Add interlaced video support - Adds call to set output buffer size on the native window Change-Id: If4a67b3f877bef557c46bb67b29d1e7051553335 audio: fix for AMRWB param overwritten issue - Overwrite AMRWB params with default value only when setParameters is not invoked CRs-Fixed: 456459 Change-Id: I3fa6b56101ca408ed5b5b82707c6dc75a9d9f17b audio: fix encoder parameters for AMRWB format - AMRWB encoder only accepts SampleRate 16k and channel count 1. Always overwrite AMRWB SampleRate and channel count to default values. - AMRWB encoder accepts BitRate from 6.6k to 23.85k, only overwrite AMRWB BitRate to default(23.85k)if setParameters() is not invoked Change-Id: I75a96b54ef04bc59dab9074ec112071e62fd51aa CRs-Fixed: 460931 stagefright: Add QCOM_BSP ifdefs for interlaced video handling Change-Id: I856ae4a97f1bf13ab18d386b3486e742a4804b2a Camera : Changes to support camcorder profiles. Change-Id: I9c4bf14f273839fd36d5f52db0f215873e8291a0 av: Ifdef all the things! Change-Id: If9dd6c6442e9d2ac9e55e48369f2da85f5f951f7 Camera: Add profiles for camcorder. Change-Id: Icdaf1fae0018de1fb04f41125cfbe34a91b5eda7 libvideoeditor: use vWidth and vHeight for buffer allocation - video editor detects crop information from decoder, crop width and height will override metadata width and height. - decoder is capable of sending crop information where crop width and height are smaller than actual resolution. - use actual metadata width and height for calculating buffer size. Change-Id: Id1d77c316e3892e6d51a00418052f256629f495f CRs-Fixed: 452511 Add ifdefs around enhanced media types Change-Id: I64b8853660ac4fe90ddb218b237f63b635cdb47b
-rw-r--r--include/media/MediaPlayerInterface.h6
-rwxr-xr-x[-rw-r--r--]include/media/MediaProfiles.h17
-rw-r--r--include/media/stagefright/AudioSource.h14
-rw-r--r--include/media/stagefright/MediaCodecList.h4
-rw-r--r--include/media/stagefright/MediaDefs.h24
-rw-r--r--include/media/stagefright/OMXCodec.h4
-rw-r--r--include/media/stagefright/QCOMXCodec.h2
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp5
-rw-r--r--media/libmedia/AudioRecord.cpp93
-rw-r--r--media/libmedia/AudioTrack.cpp49
-rwxr-xr-x[-rw-r--r--]media/libmedia/MediaProfiles.cpp28
-rw-r--r--media/libmediaplayerservice/Android.mk2
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h6
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp87
-rw-r--r--media/libstagefright/ACodec.cpp220
-rwxr-xr-xmedia/libstagefright/Android.mk3
-rw-r--r--media/libstagefright/AudioPlayer.cpp13
-rw-r--r--media/libstagefright/AudioSource.cpp195
-rw-r--r--media/libstagefright/AwesomePlayer.cpp58
-rwxr-xr-xmedia/libstagefright/CameraSource.cpp10
-rw-r--r--media/libstagefright/LPAPlayerALSA.cpp5
-rwxr-xr-xmedia/libstagefright/MPEG4Writer.cpp14
-rw-r--r--media/libstagefright/MediaCodecList.cpp12
-rw-r--r--media/libstagefright/MediaExtractor.cpp53
-rw-r--r--media/libstagefright/OMXCodec.cpp108
-rw-r--r--media/libstagefright/QCOMXCodec.cpp13
-rw-r--r--media/libstagefright/QCUtilityClass.cpp356
-rw-r--r--media/libstagefright/SampleTable.cpp5
-rw-r--r--media/libstagefright/Utils.cpp51
-rw-r--r--media/libstagefright/WAVExtractor.cpp8
-rw-r--r--media/libstagefright/avc_utils.cpp14
-rw-r--r--media/libstagefright/codecs/aacdec/SoftAAC2.cpp9
-rw-r--r--media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp2
-rw-r--r--media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp2
-rw-r--r--media/libstagefright/codecs/mp3dec/SoftMP3.cpp13
-rw-r--r--media/libstagefright/include/AwesomePlayer.h1
-rw-r--r--media/libstagefright/include/QCUtilityClass.h111
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp13
-rw-r--r--media/mediaserver/Android.mk4
-rw-r--r--services/audioflinger/Android.mk8
-rw-r--r--services/audioflinger/AudioFlinger.cpp405
-rw-r--r--services/audioflinger/AudioFlinger.h19
-rw-r--r--services/camera/libcameraservice/CameraClient.cpp6
43 files changed, 1859 insertions, 213 deletions
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 5f9eb01..e17bbbe 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -1,8 +1,8 @@
/*
- * Copyright (C) 2007 The Android Open Source Project
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- * Not a Contribution, Apache license notifications and license are retained
- * for attribution purposes only.
+ * Not a Contribution.
+ *
+ * Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 0df9fd4..9ea3449 100644..100755
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -33,12 +33,11 @@ enum camcorder_quality {
CAMCORDER_QUALITY_480P = 4,
CAMCORDER_QUALITY_720P = 5,
CAMCORDER_QUALITY_1080P = 6,
- CAMCORDER_QUALITY_QVGA = 11,
- CAMCORDER_QUALITY_FWVGA = 7,
- CAMCORDER_QUALITY_WVGA = 8,
- CAMCORDER_QUALITY_VGA = 9,
- CAMCORDER_QUALITY_WQVGA = 10,
-
+ CAMCORDER_QUALITY_QVGA = 7,
+ CAMCORDER_QUALITY_FWVGA = 8,
+ CAMCORDER_QUALITY_WVGA = 9,
+ CAMCORDER_QUALITY_VGA = 10,
+ CAMCORDER_QUALITY_WQVGA = 11,
CAMCORDER_QUALITY_LIST_END = 11,
CAMCORDER_QUALITY_TIME_LAPSE_LIST_START = 1000,
@@ -50,7 +49,11 @@ enum camcorder_quality {
CAMCORDER_QUALITY_TIME_LAPSE_720P = 1005,
CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006,
CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
- CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1007,
+ CAMCORDER_QUALITY_TIME_LAPSE_FWVGA = 1008,
+ CAMCORDER_QUALITY_TIME_LAPSE_WVGA = 1009,
+ CAMCORDER_QUALITY_TIME_LAPSE_VGA = 1010,
+ CAMCORDER_QUALITY_TIME_LAPSE_WQVGA = 1011,
+ CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1011,
};
/**
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 4489254..33081cc 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -104,6 +105,19 @@ private:
AudioSource(const AudioSource &);
AudioSource &operator=(const AudioSource &);
+
+#ifdef QCOM_HARDWARE
+ //additions for tunnel source
+public:
+ AudioSource(
+ audio_source_t inputSource, const sp<MetaData>& meta );
+
+private:
+ audio_format_t mFormat;
+ String8 mMime;
+ int32_t mMaxBufferSize;
+ int64_t bufferDurationUs( ssize_t n );
+#endif
};
} // namespace android
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index dfb845b..8f2b624 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -99,6 +99,10 @@ private:
status_t addTypeFromAttributes(const char **attrs);
void addType(const char *name);
+#ifdef QCOM_HARDWARE
+ friend class QCUtilityClass;
+#endif
+
DISALLOW_EVIL_CONSTRUCTORS(MediaCodecList);
};
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index f63926c..db8d89e 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +32,6 @@ extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
@@ -43,6 +43,7 @@ extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
@@ -57,6 +58,27 @@ extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
+#ifdef QCOM_HARDWARE
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_LBR;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
+extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_3G2;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AAC;
+extern const char *MEDIA_MIMETYPE_CONTAINER_ASF;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCP;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX311;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX4;
+extern const char *MEDIA_MIMETYPE_VIDEO_WMV;
+#endif
+
} // namespace android
#endif // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index bdd35a4..630f5af 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
- * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010 - 2013, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -392,7 +392,9 @@ private:
#ifdef QCOM_HARDWARE
status_t setWMAFormat(const sp<MetaData> &inputFormat);
void setAC3Format(int32_t numChannels, int32_t sampleRate);
+ bool mNumBFrames;
#endif
+
};
struct CodecCapabilities {
diff --git a/include/media/stagefright/QCOMXCodec.h b/include/media/stagefright/QCOMXCodec.h
index ee6ea88..333487d 100644
--- a/include/media/stagefright/QCOMXCodec.h
+++ b/include/media/stagefright/QCOMXCodec.h
@@ -96,6 +96,8 @@ struct QCOMXCodec {
static void checkIfInterlaced(const uint8_t *ptr, const sp<MetaData> &meta);
+ static bool useHWAACDecoder(const char *mime);
+
};
}
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
index 21d3c30..07e20dc 100755
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
@@ -830,8 +830,13 @@ M4OSA_ERR VideoEditorVideoDecoder_configureFromMetadata(M4OSA_Context pContext,
pDecShellContext->mCropRect.top = cropTop;
pDecShellContext->mCropRect.bottom = cropBottom;
+#ifdef QCOM_HARDWARE
+ width = vWidth;
+ height = vHeight;
+#else
width = cropRight - cropLeft + 1;
height = cropBottom - cropTop + 1;
+#endif
ALOGV("VideoDecoder_configureFromMetadata : W=%d H=%d", width, height);
VIDEOEDITOR_CHECK((0 != width) && (0 != height), M4ERR_PARAMETER);
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 2725b5b..087a567 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -1,7 +1,7 @@
/*
**
** Copyright 2008, The Android Open Source Project
-** Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+** Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -136,6 +136,7 @@ status_t AudioRecord::set(
ALOGV("set(): sampleRate %d, channelMask %#x, frameCount %d",sampleRate, channelMask, frameCount);
AutoMutex lock(mLock);
+ status_t status;
if (mAudioRecord != 0) {
return INVALID_OPERATION;
@@ -185,12 +186,61 @@ status_t AudioRecord::set(
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ size_t inputBuffSizeInBytes = -1;
+ if (AudioSystem::getInputBufferSize(sampleRate, format, channelCount, &inputBuffSizeInBytes)
+ != NO_ERROR) {
+ ALOGE("AudioSystem could not query the input buffer size.");
+ return NO_INIT;
+ }
+ ALOGV("AudioRecord::set() inputBuffSizeInBytes = %d", inputBuffSizeInBytes );
+
+ if (inputBuffSizeInBytes == 0) {
+ ALOGE("Recording parameters are not supported: sampleRate %d, channelCount %d, format %d",
+ sampleRate, channelCount, format);
+ return BAD_VALUE;
+ }
+
+ // Change for Codec type
+ int frameSizeInBytes = 0;
+ if(inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION) {
+ if (audio_is_linear_pcm(format)) {
+ frameSizeInBytes = channelCount * (format == AUDIO_FORMAT_PCM_16_BIT ? sizeof(int16_t)
+: sizeof(int8_t));
+ } else {
+ frameSizeInBytes = channelCount *sizeof(int16_t);
+ }
+ } else {
+ if (format ==AUDIO_FORMAT_AMR_NB) {
+ frameSizeInBytes = channelCount * 32; // Full rate framesize
+ } else if (format ==AUDIO_FORMAT_EVRC) {
+ frameSizeInBytes = channelCount * 23; // Full rate framesize
+ } else if (format ==AUDIO_FORMAT_QCELP) {
+ frameSizeInBytes = channelCount * 35; // Full rate framesize
+ } else if (format ==AUDIO_FORMAT_AAC) {
+ frameSizeInBytes = 2048;
+ } else if ((format ==AUDIO_FORMAT_PCM_16_BIT) || (format ==AUDIO_FORMAT_PCM_8_BIT)) {
+ if (audio_is_linear_pcm(format)) {
+ frameSizeInBytes = channelCount * (format == AUDIO_FORMAT_PCM_16_BIT ? sizeof(int16_t) : sizeof(int8_t));
+ } else {
+ frameSizeInBytes = sizeof(int8_t);
+ }
+ } else if(format == AUDIO_FORMAT_AMR_WB) {
+ frameSizeInBytes = channelCount * 61;
+
+ }
+ }
+ // We use 2* size of input buffer for ping pong use of record buffer.
+ int minFrameCount = 2 * inputBuffSizeInBytes / frameSizeInBytes;
+#else
// validate framecount
int minFrameCount = 0;
- status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask);
+ status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask);
if (status != NO_ERROR) {
return status;
}
+#endif
+
ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);
if (frameCount == 0) {
@@ -203,6 +253,10 @@ status_t AudioRecord::set(
notificationFrames = frameCount/2;
}
+#ifdef QCOM_HARDWARE
+ //update mInputSource before openRecord_l
+ mInputSource = inputSource;
+#endif
// create the IAudioRecord
status = openRecord_l(sampleRate, format, channelMask,
frameCount, input);
@@ -233,7 +287,9 @@ status_t AudioRecord::set(
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
+#ifndef QCOM_HARDWARE
mInputSource = inputSource;
+#endif
mInput = input;
AudioSystem::acquireAudioSessionId(mSessionId);
@@ -269,11 +325,36 @@ uint32_t AudioRecord::frameCount() const
size_t AudioRecord::frameSize() const
{
- if (audio_is_linear_pcm(mFormat)) {
- return channelCount()*audio_bytes_per_sample(mFormat);
+#ifdef QCOM_HARDWARE
+ if(inputSource() == AUDIO_SOURCE_VOICE_COMMUNICATION) {
+ if (audio_is_linear_pcm(mFormat)) {
+ return channelCount()*audio_bytes_per_sample(mFormat);
+ } else {
+ return channelCount()*sizeof(int16_t);
+ }
} else {
- return sizeof(uint8_t);
+ if (format() ==AUDIO_FORMAT_AMR_NB) {
+ return channelCount() * 32; // Full rate framesize
+ } else if (format() == AUDIO_FORMAT_EVRC) {
+ return channelCount() * 23; // Full rate framesize
+ } else if (format() == AUDIO_FORMAT_QCELP) {
+ return channelCount() * 35; // Full rate framesize
+ } else if (format() == AUDIO_FORMAT_AAC) {
+ // Not actual framsize but for variable frame rate AAC encoding,
+ // buffer size is treated as a frame size
+ return 2048;
+ } else if(format() == AUDIO_FORMAT_AMR_WB) {
+ return channelCount() * 61;
+ }
+#endif
+ if (audio_is_linear_pcm(mFormat)) {
+ return channelCount()*audio_bytes_per_sample(mFormat);
+ } else {
+ return sizeof(uint8_t);
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
}
audio_source_t AudioRecord::inputSource() const
@@ -453,7 +534,7 @@ status_t AudioRecord::openRecord_l(
sampleRate, format,
channelMask,
frameCount,
- IAudioFlinger::TRACK_DEFAULT,
+ (int16_t)inputSource(),
tid,
&mSessionId,
&status);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index a2f4348..1025799 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -69,25 +69,31 @@ status_t AudioTrack::getMinFrameCount(
// audio_format_t format
// audio_channel_mask_t channelMask
// audio_output_flags_t flags
- int afSampleRate;
+ int afSampleRate = 0;
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
return NO_INIT;
}
- int afFrameCount;
+ int afFrameCount = 0;
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
return NO_INIT;
}
- uint32_t afLatency;
+ uint32_t afLatency = 0;
if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
return NO_INIT;
}
+ if(!afSampleRate || !afFrameCount) {
+ ALOGW("samplerate or framecount 0");
+ return NO_INIT;
+ }
+
// Ensure that buffer depth covers at least audio hardware latency
uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
if (minBufCount < 2) minBufCount = 2;
*frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
- afFrameCount * minBufCount * sampleRate / afSampleRate;
+ afFrameCount * minBufCount * sampleRate / afSampleRate;
+
ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
*frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
return NO_ERROR;
@@ -455,11 +461,23 @@ uint32_t AudioTrack::frameCount() const
size_t AudioTrack::frameSize() const
{
- if (audio_is_linear_pcm(mFormat)) {
- return channelCount()*audio_bytes_per_sample(mFormat);
+#ifdef QCOM_HARDWARE
+ if ((audio_stream_type_t)mStreamType == AUDIO_STREAM_VOICE_CALL) {
+ if (audio_is_linear_pcm(mFormat)) {
+ return channelCount()*audio_bytes_per_sample(mFormat);
+ } else {
+ return channelCount()*sizeof(int16_t);
+ }
} else {
- return sizeof(uint8_t);
+#endif
+ if (audio_is_linear_pcm(mFormat)) {
+ return channelCount()*audio_bytes_per_sample(mFormat);
+ } else {
+ return sizeof(uint8_t);
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
}
sp<IMemory>& AudioTrack::sharedBuffer()
@@ -980,20 +998,26 @@ status_t AudioTrack::createTrack_l(
} else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
// FIXME move these calculations and associated checks to server
- int afSampleRate;
+ int afSampleRate = 0;
if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
return NO_INIT;
}
- int afFrameCount;
+ int afFrameCount = 0;
if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
return NO_INIT;
}
+ if(!afSampleRate && !afFrameCount) {
+ ALOGW("samplerate or framecount zero");
+ return NO_INIT;
+ }
+
// Ensure that buffer depth covers at least audio hardware latency
uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
if (minBufCount < 2) minBufCount = 2;
- int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+ uint32_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+
ALOGV("minFrameCount: %d, afFrameCount=%d, minBufCount=%d, sampleRate=%d, afSampleRate=%d"
", afLatency=%d",
minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
@@ -1088,7 +1112,10 @@ status_t AudioTrack::createTrack_l(
mCblk->waitTimeMs = 0;
mRemainingFrames = mNotificationFramesAct;
// FIXME don't believe this lie
- mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
+ if(sampleRate)
+ mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
+ else
+ mLatency = afLatency;
// If IAudioTrack is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
if (mCblk->frameCount > mFrameCount) {
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index fa536a6..e1299c2 100644..100755
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -87,31 +87,35 @@ const MediaProfiles::NameToTagMap MediaProfiles::sCamcorderQualityNameMap[] = {
{"timelapse720p", CAMCORDER_QUALITY_TIME_LAPSE_720P},
{"timelapse1080p", CAMCORDER_QUALITY_TIME_LAPSE_1080P},
{"timelapseqvga", CAMCORDER_QUALITY_TIME_LAPSE_QVGA},
+ {"timelapsevga", CAMCORDER_QUALITY_TIME_LAPSE_VGA},
+ {"timelapsewvga", CAMCORDER_QUALITY_TIME_LAPSE_WVGA},
+ {"timelapsefwvga", CAMCORDER_QUALITY_TIME_LAPSE_FWVGA},
+ {"timelapsewqvga", CAMCORDER_QUALITY_TIME_LAPSE_WQVGA},
};
/*static*/ void
MediaProfiles::logVideoCodec(const MediaProfiles::VideoCodec& codec)
{
- ALOGV("video codec:");
- ALOGV("codec = %d", codec.mCodec);
- ALOGV("bit rate: %d", codec.mBitRate);
- ALOGV("frame width: %d", codec.mFrameWidth);
- ALOGV("frame height: %d", codec.mFrameHeight);
- ALOGV("frame rate: %d", codec.mFrameRate);
+ALOGV("video codec:");
+ALOGV("codec = %d", codec.mCodec);
+ALOGV("bit rate: %d", codec.mBitRate);
+ALOGV("frame width: %d", codec.mFrameWidth);
+ALOGV("frame height: %d", codec.mFrameHeight);
+ALOGV("frame rate: %d", codec.mFrameRate);
}
/*static*/ void
MediaProfiles::logAudioCodec(const MediaProfiles::AudioCodec& codec)
{
- ALOGV("audio codec:");
- ALOGV("codec = %d", codec.mCodec);
- ALOGV("bit rate: %d", codec.mBitRate);
- ALOGV("sample rate: %d", codec.mSampleRate);
- ALOGV("number of channels: %d", codec.mChannels);
+ALOGV("audio codec:");
+ALOGV("codec = %d", codec.mCodec);
+ALOGV("bit rate: %d", codec.mBitRate);
+ALOGV("sample rate: %d", codec.mSampleRate);
+ALOGV("number of channels: %d", codec.mChannels);
}
/*static*/ void
-MediaProfiles::logVideoEncoderCap(const MediaProfiles::VideoEncoderCap& cap)
+ MediaProfiles::logVideoEncoderCap(const MediaProfiles::VideoEncoderCap& cap)
{
ALOGV("video encoder cap:");
ALOGV("codec = %d", cap.mCodec);
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 1cc21d6..d978e90 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -41,10 +41,12 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_STATIC_LIBRARIES := \
libstagefright_nuplayer \
libstagefright_rtsp \
+ libmedia_helper \
LOCAL_C_INCLUDES := \
$(call include-path-for, graphics corecg) \
$(TOP)/frameworks/av/media/libstagefright/include \
+ $(TOP)/frameworks/av/include/media \
$(TOP)/frameworks/av/media/libstagefright/rtsp \
$(TOP)/frameworks/av/media/libstagefright/wifi-display \
$(TOP)/frameworks/native/include/media/openmax \
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 972de37..d41c3b5 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -1,9 +1,9 @@
/*
**
-** Copyright 2008, The Android Open Source Project
** Copyright (c) 2013, The Linux Foundation. All rights reserved.
-** Not a Contribution, Apache license notifications and license are retained
-** for attribution purposes only.
+** Not a Contribution.
+**
+** Copyright (C) 2008 The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 5e8d435..4d25277 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -21,7 +21,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "StagefrightRecorder"
#include <utils/Log.h>
-
+#include <media/AudioParameter.h>
#include "StagefrightRecorder.h"
#include <binder/IPCThreadState.h>
@@ -33,13 +33,6 @@
#include <media/stagefright/AudioSource.h>
#include <media/stagefright/AMRWriter.h>
#include <media/stagefright/AACWriter.h>
-#ifdef QCOM_HARDWARE
-#include <media/stagefright/ExtendedWriter.h>
-#include <media/stagefright/WAVEWriter.h>
-#endif
-#ifdef QCOM_FM_ENABLED
-#include <media/stagefright/FMA2DPWriter.h>
-#endif
#include <media/stagefright/CameraSource.h>
#include <media/stagefright/CameraSourceTimeLapse.h>
#include <media/stagefright/MPEG2TSWriter.h>
@@ -53,18 +46,26 @@
#include <camera/ICamera.h>
#include <camera/CameraParameters.h>
#include <gui/Surface.h>
+#include <utils/String8.h>
#include <utils/Errors.h>
#include <sys/types.h>
#include <ctype.h>
#include <unistd.h>
-#include <system/audio.h>
#ifdef QCOM_HARDWARE
+#include <media/stagefright/ExtendedWriter.h>
+#include <media/stagefright/WAVEWriter.h>
#include <QCMediaDefs.h>
#include <cutils/properties.h>
+#include <QCUtilityClass.h>
+#endif
+#ifdef QCOM_FM_ENABLED
+#include <media/stagefright/FMA2DPWriter.h>
#endif
+#include <system/audio.h>
+
#include "ARTPWriter.h"
namespace android {
@@ -117,6 +118,12 @@ status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ if(QCUtilityClass::helper_StagefrightRecoder_checkIfAudioDisable()) {
+ return OK;
+ }
+#endif
+
if (as == AUDIO_SOURCE_DEFAULT) {
mAudioSource = AUDIO_SOURCE_MIC;
} else {
@@ -168,6 +175,12 @@ status_t StagefrightRecorder::setAudioEncoder(audio_encoder ae) {
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ if(QCUtilityClass::helper_StagefrightRecoder_checkIfAudioDisable()) {
+ return OK;
+ }
+#endif
+
if (ae == AUDIO_ENCODER_DEFAULT) {
mAudioEncoder = AUDIO_ENCODER_AMR_NB;
} else {
@@ -187,7 +200,7 @@ status_t StagefrightRecorder::setAudioEncoder(audio_encoder ae) {
} else if(mAudioEncoder == AUDIO_ENCODER_AMR_WB) {
mSampleRate = 16000;
mAudioChannels = 1;
- mAudioBitRate = 23850;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 23850;
} else {
mSampleRate = mSampleRate ? mSampleRate : 8000;
mAudioChannels = mAudioChannels ? mAudioChannels : 1;
@@ -856,6 +869,47 @@ status_t StagefrightRecorder::start() {
}
sp<MediaSource> StagefrightRecorder::createAudioSource() {
+#ifdef QCOM_ENHANCED_AUDIO
+ bool tunneledSource = false;
+ const char *tunnelMime;
+ {
+ AudioParameter param;
+ String8 key("tunneled-input-formats");
+ param.add( key, String8("get") );
+ String8 valueStr = AudioSystem::getParameters( 0, param.toString());
+ AudioParameter result(valueStr);
+ int value;
+ if ( mAudioEncoder == AUDIO_ENCODER_AMR_NB &&
+ result.getInt(String8("AMR"),value) == NO_ERROR ) {
+ tunneledSource = true;
+ tunnelMime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
+ }
+ else if ( mAudioEncoder == AUDIO_ENCODER_QCELP &&
+ result.getInt(String8("QCELP"),value) == NO_ERROR ) {
+ tunneledSource = true;
+ tunnelMime = MEDIA_MIMETYPE_AUDIO_QCELP;
+ }
+ else if ( mAudioEncoder == AUDIO_ENCODER_EVRC &&
+ result.getInt(String8("EVRC"),value) == NO_ERROR ) {
+ tunneledSource = true;
+ tunnelMime = MEDIA_MIMETYPE_AUDIO_EVRC;
+ }
+ }
+
+ if ( tunneledSource ) {
+ sp<AudioSource> audioSource = NULL;
+ sp<MetaData> meta = new MetaData;
+ meta->setInt32(kKeyChannelCount, mAudioChannels);
+ meta->setInt32(kKeySampleRate, mSampleRate);
+ meta->setInt32(kKeyBitRate, mAudioBitRate);
+ if (mAudioTimeScale > 0) {
+ meta->setInt32(kKeyTimeScale, mAudioTimeScale);
+ }
+ meta->setCString( kKeyMIMEType, tunnelMime );
+ audioSource = new AudioSource( mAudioSource, meta);
+ return audioSource->initCheck( ) == OK ? audioSource : NULL;
+ }
+#endif
sp<AudioSource> audioSource =
new AudioSource(
mAudioSource,
@@ -1550,6 +1604,19 @@ status_t StagefrightRecorder::setupVideoEncoder(
if (mVideoTimeScale > 0) {
enc_meta->setInt32(kKeyTimeScale, mVideoTimeScale);
}
+
+#ifdef QCOM_HARDWARE
+ status_t retVal =
+ QCUtilityClass::helper_StageFrightRecoder_hfr(meta,enc_meta, mMaxFileDurationUs,
+ mFrameRate, mVideoEncoder);
+ if(retVal != OK) {
+ return retVal;
+ }
+
+ QCUtilityClass::helper_StagefrightRecoder_setUserprofile(mVideoEncoder,
+ mVideoEncoderProfile);
+#endif
+
if (mVideoEncoderProfile != -1) {
enc_meta->setInt32(kKeyVideoProfile, mVideoEncoderProfile);
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index ddf95dc..a9e344d 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -32,6 +32,16 @@
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/MetaData.h>
+
+#ifdef QCOM_ENHANCED_AUDIO
+#include <QCMediaDefs.h>
+#include <QCMetaData.h>
+#include <QOMX_AudioExtensions.h>
+#include <OMX_QCOMExtns.h>
+#include "include/QCUtilityClass.h"
+#endif
+
#include <media/hardware/HardwareAPI.h>
#include <OMX_Component.h>
@@ -925,6 +935,100 @@ status_t ACodec::configureCodec(
mIsEncoder = encoder;
+#ifdef QCOM_ENHANCED_AUDIO
+ if (!strcasecmp(mime, "audio/x-ms-wma")) {
+ if (mIsEncoder) {
+ ALOGE("WMA encoding not supported");
+ return ERROR_UNSUPPORTED;
+ } else {
+ int32_t version;
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+ QOMX_AUDIO_PARAM_WMA10PROTYPE paramWMA10;
+ CHECK(msg->findInt32("WMA-Version", &version));
+ int32_t numChannels;
+ int32_t bitRate;
+ int32_t sampleRate;
+ int32_t encodeOptions;
+ int32_t blockAlign;
+ int32_t bitspersample;
+ int32_t formattag;
+ int32_t advencopt1;
+ int32_t advencopt2;
+ int32_t VirtualPktSize;
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ CHECK(msg->findInt32("bsps", &bitspersample));
+ CHECK(msg->findInt32("fmtt", &formattag));
+ CHECK(msg->findInt32("ade1",&advencopt1));
+ CHECK(msg->findInt32("ade2",&advencopt2));
+ CHECK(msg->findInt32("vpks",&VirtualPktSize));
+ }
+ if(version==kTypeWMA) {
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = kPortIndexInput;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ InitOMXParams(&paramWMA10);
+ paramWMA10.nPortIndex = kPortIndexInput;
+ }
+ CHECK(msg->findInt32("channel-count", &numChannels));
+ CHECK(msg->findInt32("sample-rate", &sampleRate));
+ CHECK(msg->findInt32("brte", &bitRate));
+ CHECK(msg->findInt32("eopt", &encodeOptions));
+ CHECK(msg->findInt32("blka", &blockAlign));
+ ALOGV("Channels: %d, SampleRate: %d, BitRate; %d"
+ "EncodeOptions: %d, blockAlign: %d", numChannels,
+ sampleRate, bitRate, encodeOptions, blockAlign);
+ if(sampleRate>48000 || numChannels>2){
+ ALOGE("Unsupported samplerate/channels");
+ return ERROR_UNSUPPORTED;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess){
+ ALOGV("Bitspersample: %d, wmaformattag: %d,"
+ "advencopt1: %d, advencopt2: %d VirtualPktSize %d", bitspersample,
+ formattag, advencopt1, advencopt2, VirtualPktSize);
+ }
+ status_t err = OK;
+ OMX_INDEXTYPE index;
+ if(version==kTypeWMA) {
+ err = mOMX->getParameter(
+ mNode,OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ mOMX->getExtensionIndex(mNode,"OMX.Qualcomm.index.audio.wma10Pro",&index);
+ err = mOMX->getParameter(
+ mNode, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ CHECK_EQ(err, (status_t)OK);
+ if(version==kTypeWMA) {
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nEncodeOptions = encodeOptions;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.nChannels = numChannels;
+ paramWMA10.nSamplingRate = sampleRate;
+ paramWMA10.nEncodeOptions = encodeOptions;
+ paramWMA10.nBitRate = bitRate;
+ paramWMA10.nBlockAlign = blockAlign;
+ paramWMA10.advancedEncodeOpt = advencopt1;
+ paramWMA10.advancedEncodeOpt2 = advencopt2;
+ paramWMA10.formatTag = formattag;
+ paramWMA10.validBitsPerSample = bitspersample;
+ paramWMA10.nVirtualPktSize = VirtualPktSize;
+ err = mOMX->setParameter(
+ mNode, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ CHECK_EQ(err, (status_t)OK);
+ int32_t maxInputSize1;
+ if (msg->findInt32("max-input-size", &maxInputSize1)) {
+ setMinBufferSize(kPortIndexInput, (size_t)maxInputSize1);
+ }
+ return err;
+ }
+ }
+#endif
+
status_t err = setComponentRole(encoder /* isEncoder */, mime);
if (err != OK) {
@@ -1482,7 +1586,8 @@ status_t ACodec::setSupportedOutputFormat() {
|| format.eColorFormat == OMX_COLOR_FormatCbYCrY
|| format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
|| format.eColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar
- || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka);
+ || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka
+ || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar32m );
return mOMX->setParameter(
mNode, OMX_IndexParamVideoPortFormat,
@@ -3146,9 +3251,33 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
AString mime;
+ uint32_t quirks = 0;
+#ifdef QCOM_ENHANCED_AUDIO
+ msg->findString("mime", &mime);
+
+ if(!(strcmp(mime.c_str(),"audio/x-ms-wma"))){
+ mCodec->mQuirks = quirks;
+ mCodec->mOMX = omx;
+
+ mCodec->mPortEOS[kPortIndexInput] =
+ mCodec->mPortEOS[kPortIndexOutput] = false;
+
+ mCodec->mInputEOSResult = OK;
+
+ {
+ sp<AMessage> notify = mCodec->mNotify->dup();
+ notify->setInt32("what", ACodec::kWhatComponentAllocated);
+ notify->setString("componentName", mCodec->mComponentName.c_str());
+ notify->post();
+ }
+
+ mCodec->changeState(mCodec->mLoadedState);
+
+ return true;
+ }
+#endif
AString componentName;
- uint32_t quirks = 0;
if (msg->findString("componentName", &componentName)) {
ssize_t index = matchingCodecs.add();
OMXCodec::CodecNameAndQuirks *entry = &matchingCodecs.editItemAt(index);
@@ -3173,7 +3302,6 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
0, // flags
&matchingCodecs);
}
-
sp<CodecObserver> observer = new CodecObserver;
IOMX::node_id node = NULL;
@@ -3327,11 +3455,93 @@ bool ACodec::LoadedState::onConfigureComponent(
const sp<AMessage> &msg) {
ALOGV("onConfigureComponent");
- CHECK(mCodec->mNode != NULL);
-
AString mime;
CHECK(msg->findString("mime", &mime));
+#ifdef QCOM_ENHANCED_AUDIO
+ if (!strcasecmp(mime.c_str(), "audio/x-ms-wma")){
+ OMXClient client;
+ CHECK_EQ(client.connect(), (status_t)OK);
+ sp<IOMX> omx = client.interface();
+ Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
+ AString componentName;
+ uint32_t quirks = 0;
+ int32_t version = -1;
+
+ msg->findInt32("WMA-Version", &version);
+ if(kTypeWMA == version){
+ componentName = "OMX.qcom.audio.decoder.wma";
+ } else if(kTypeWMAPro == version){
+ componentName = "OMX.qcom.audio.decoder.wma10Pro";
+ } else if(kTypeWMALossLess == version){
+ componentName = "OMX.qcom.audio.decoder.wmaLossLess";
+ } else {
+ mCodec->signalError(OMX_ErrorComponentNotFound);
+ return false;
+ }
+
+ int32_t encoder;
+ if (!msg->findInt32("encoder", &encoder)) {
+ encoder = false;
+ }
+
+ OMXCodec::findMatchingCodecs(
+ mime.c_str(),
+ encoder, // createEncoder
+ componentName.c_str(), // matchComponentName
+ 0, // flags
+ &matchingCodecs);
+
+ sp<CodecObserver> observer = new CodecObserver;
+ IOMX::node_id node = NULL;
+
+ for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
+ ++matchIndex) {
+ componentName = matchingCodecs.itemAt(matchIndex).mName.string();
+ quirks = matchingCodecs.itemAt(matchIndex).mQuirks;
+
+ pid_t tid = androidGetTid();
+ int prevPriority = androidGetThreadPriority(tid);
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
+ status_t err = omx->allocateNode(componentName.c_str(), observer, &node);
+ androidSetThreadPriority(tid, prevPriority);
+
+ if (err == OK) {
+ break;
+ }
+ node = NULL;
+ }
+ if (node == NULL) {
+ if (!mime.empty()) {
+ ALOGE("Unable to instantiate a decoder for type '%s'.",
+ mime.c_str());
+ } else {
+ ALOGE("Unable to instantiate decoder '%s'.", componentName.c_str());
+ }
+
+ mCodec->signalError(OMX_ErrorComponentNotFound);
+ return false;
+ }
+ sp<AMessage> notify = new AMessage(kWhatOMXMessage, mCodec->id());
+ observer->setNotificationMessage(notify);
+
+ mCodec->mComponentName = componentName;
+ mCodec->mFlags = 0;
+
+ if (componentName.endsWith(".secure")) {
+ mCodec->mFlags |= kFlagIsSecure;
+ }
+
+ mCodec->mQuirks = quirks;
+ mCodec->mOMX = omx;
+ mCodec->mNode = node;
+
+ mCodec->mPortEOS[kPortIndexInput] =
+ mCodec->mPortEOS[kPortIndexOutput] = false;
+ }
+#endif
+
+ CHECK(mCodec->mNode != NULL);
status_t err = mCodec->configureCodec(mime.c_str(), msg);
if (err != OK) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 193291e..766cf5e 100755
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -92,7 +92,8 @@ LOCAL_SRC_FILES += \
QCMediaDefs.cpp \
QCOMXCodec.cpp \
WAVEWriter.cpp \
- ExtendedExtractor.cpp
+ ExtendedExtractor.cpp \
+ QCUtilityClass.cpp
LOCAL_C_INCLUDES += \
$(TOP)/hardware/qcom/media/mm-core/inc
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index deb6b70..80b64c2 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -500,11 +500,11 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) {
{
Mutex::Autolock autoLock(mLock);
mNumFramesPlayed += size_done / mFrameSize;
- mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
if (mReachedEOS) {
mPinnedTimeUs = mNumFramesPlayedSysTimeUs;
} else {
+ mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
mPinnedTimeUs = -1ll;
}
}
@@ -535,14 +535,21 @@ int64_t AudioPlayer::getRealTimeUsLocked() const {
// compensate using system time.
int64_t diffUs;
if (mPinnedTimeUs >= 0ll) {
- diffUs = mPinnedTimeUs;
+ if(mReachedEOS)
+ diffUs = ALooper::GetNowUs();
+ else
+ diffUs = mPinnedTimeUs;
+
} else {
diffUs = ALooper::GetNowUs();
}
diffUs -= mNumFramesPlayedSysTimeUs;
- return result + diffUs;
+ if(result + diffUs <= mPositionTimeRealUs)
+ return result + diffUs;
+ else
+ return mPositionTimeRealUs;
}
int64_t AudioPlayer::getMediaTimeUs() {
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index bb2d415..2bb721c 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 The Android Open Source Project
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,8 +29,17 @@
#include <cutils/properties.h>
#include <stdlib.h>
-namespace android {
+#ifdef QCOM_ENHANCED_AUDIO
+#define AMR_FRAMESIZE 32
+#define QCELP_FRAMESIZE 35
+#define EVRC_FRAMESIZE 23
+#define AMR_WB_FRAMESIZE 61
+#endif
+namespace android {
+// Treat time out as an error if we have not received any output
+// buffers after 1 seconds
+const static int64_t WaitLockEventTimeOutNs = 1000000000LL;
static void AudioRecordCallbackFunction(int event, void *user, void *info) {
AudioSource *source = (AudioSource *) user;
switch (event) {
@@ -55,7 +64,13 @@ AudioSource::AudioSource(
mSampleRate(sampleRate),
mPrevSampleTimeUs(0),
mNumFramesReceived(0),
- mNumClientOwnedBuffers(0) {
+ mNumClientOwnedBuffers(0)
+#ifdef QCOM_ENHANCED_AUDIO
+ ,mFormat(AUDIO_FORMAT_PCM_16_BIT),
+ mMime(MEDIA_MIMETYPE_AUDIO_RAW)
+#endif
+{
+
ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
CHECK(channelCount == 1 || channelCount == 2 || channelCount == 6);
@@ -64,10 +79,23 @@ AudioSource::AudioSource(
sampleRate,
AUDIO_FORMAT_PCM_16_BIT,
audio_channel_in_mask_from_count(channelCount));
+
+#ifdef QCOM_ENHANCED_AUDIO
+ if ( NO_ERROR != AudioSystem::getInputBufferSize(
+ sampleRate, mFormat, channelCount, (size_t*)&mMaxBufferSize) ) {
+ mMaxBufferSize = kMaxBufferSize;
+ ALOGV("mMaxBufferSize = %d", mMaxBufferSize);
+ }
+#endif
+
if (status == OK) {
// make sure that the AudioRecord callback never returns more than the maximum
// buffer size
+#ifdef QCOM_ENHANCED_AUDIO
+ int frameCount = mMaxBufferSize / sizeof(int16_t) / channelCount;
+#else
int frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount;
+#endif
// make sure that the AudioRecord total buffer size is large enough
int bufCount = 2;
@@ -78,7 +106,11 @@ AudioSource::AudioSource(
mRecord = new AudioRecord(
inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
audio_channel_in_mask_from_count(channelCount),
+#ifdef QCOM_ENHANCED_AUDIO
+ 4 * mMaxBufferSize / sizeof(int16_t), /* Enable ping-pong buffers */
+#else
bufCount * frameCount,
+#endif
AudioRecordCallbackFunction,
this,
frameCount);
@@ -99,6 +131,62 @@ AudioSource::AudioSource(
}
}
+#ifdef QCOM_ENHANCED_AUDIO
+AudioSource::AudioSource( audio_source_t inputSource, const sp<MetaData>& meta )
+ : mStarted(false),
+ mPrevSampleTimeUs(0),
+ mNumFramesReceived(0),
+ mNumClientOwnedBuffers(0),
+ mFormat(AUDIO_FORMAT_PCM_16_BIT),
+ mMime(MEDIA_MIMETYPE_AUDIO_RAW) {
+
+ const char * mime;
+ ALOGE("SK: in AudioSource : inputSource: %d", inputSource);
+ CHECK( meta->findCString( kKeyMIMEType, &mime ) );
+ mMime = mime;
+ int32_t sampleRate = 0; //these are the only supported values
+ int32_t channels = 0; //for the below tunnel formats
+ CHECK( meta->findInt32( kKeyChannelCount, &channels ) );
+ CHECK( meta->findInt32( kKeySampleRate, &sampleRate ) );
+ int32_t frameSize = -1;
+ mSampleRate = sampleRate;
+ if ( !strcasecmp( mime, MEDIA_MIMETYPE_AUDIO_AMR_NB ) ) {
+ mFormat = AUDIO_FORMAT_AMR_NB;
+ frameSize = AMR_FRAMESIZE;
+ mMaxBufferSize = AMR_FRAMESIZE*10;
+ }
+ else if ( !strcasecmp( mime, MEDIA_MIMETYPE_AUDIO_QCELP ) ) {
+ mFormat = AUDIO_FORMAT_QCELP;
+ frameSize = QCELP_FRAMESIZE;
+ mMaxBufferSize = QCELP_FRAMESIZE*10;
+ }
+ else if ( !strcasecmp( mime, MEDIA_MIMETYPE_AUDIO_EVRC ) ) {
+ mFormat = AUDIO_FORMAT_EVRC;
+ frameSize = EVRC_FRAMESIZE;
+ mMaxBufferSize = EVRC_FRAMESIZE*10;
+ }
+ else if ( !strcasecmp( mime, MEDIA_MIMETYPE_AUDIO_AMR_WB ) ) {
+ mFormat = AUDIO_FORMAT_AMR_WB;
+ frameSize = AMR_WB_FRAMESIZE;
+ mMaxBufferSize = AMR_WB_FRAMESIZE*10;
+ }
+ else {
+ CHECK(0);
+ }
+ mAutoRampStartUs = 0;
+ CHECK(channels == 1 || channels == 2);
+
+ mRecord = new AudioRecord(
+ inputSource, sampleRate, mFormat,
+ channels > 1? AUDIO_CHANNEL_IN_STEREO:
+ AUDIO_CHANNEL_IN_MONO,
+ 4*mMaxBufferSize/channels/frameSize,
+ AudioRecordCallbackFunction,
+ this);
+ mInitCheck = mRecord->initCheck();
+}
+#endif
+
AudioSource::~AudioSource() {
if (mStarted) {
reset();
@@ -184,10 +272,17 @@ sp<MetaData> AudioSource::getFormat() {
}
sp<MetaData> meta = new MetaData;
+#ifdef QCOM_ENHANCED_AUDIO
+ meta->setCString(kKeyMIMEType, mMime);
+ meta->setInt32(kKeySampleRate, mRecord->getSampleRate());
+ meta->setInt32(kKeyChannelCount, mRecord->channelCount());
+ meta->setInt32(kKeyMaxInputSize, mMaxBufferSize);
+#else
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
meta->setInt32(kKeySampleRate, mSampleRate);
meta->setInt32(kKeyChannelCount, mRecord->channelCount());
meta->setInt32(kKeyMaxInputSize, kMaxBufferSize);
+#endif
return meta;
}
@@ -234,7 +329,9 @@ status_t AudioSource::read(
}
while (mStarted && mBuffersReceived.empty()) {
- mFrameAvailableCondition.wait(mLock);
+ status_t err = mFrameAvailableCondition.waitRelative(mLock,WaitLockEventTimeOutNs);
+ if(err == -ETIMEDOUT)
+ return (status_t)err;
}
if (!mStarted) {
return OK;
@@ -249,24 +346,33 @@ status_t AudioSource::read(
int64_t timeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
int64_t elapsedTimeUs = timeUs - mStartTimeUs;
- if (elapsedTimeUs < mAutoRampStartUs) {
- memset((uint8_t *) buffer->data(), 0, buffer->range_length());
- } else if (elapsedTimeUs < mAutoRampStartUs + kAutoRampDurationUs) {
- int32_t autoRampDurationFrames =
- (kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL;
-
- int32_t autoRampStartFrames =
- (mAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL;
-
- int32_t nFrames = mNumFramesReceived - autoRampStartFrames;
- rampVolume(nFrames, autoRampDurationFrames,
- (uint8_t *) buffer->data(), buffer->range_length());
+#ifdef QCOM_ENHANCED_AUDIO
+ if ( mFormat == AUDIO_FORMAT_PCM_16_BIT ) {
+#endif
+ if (elapsedTimeUs < mAutoRampStartUs) {
+ memset((uint8_t *) buffer->data(), 0, buffer->range_length());
+ } else if (elapsedTimeUs < mAutoRampStartUs + kAutoRampDurationUs) {
+ int32_t autoRampDurationFrames =
+ (kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL;
+
+ int32_t autoRampStartFrames =
+ (mAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL;
+
+ int32_t nFrames = mNumFramesReceived - autoRampStartFrames;
+ rampVolume(nFrames, autoRampDurationFrames,
+ (uint8_t *) buffer->data(), buffer->range_length());
+ }
+#ifdef QCOM_ENHANCED_AUDIO
}
+#endif
// Track the max recording signal amplitude.
if (mTrackMaxAmplitude) {
- trackMaxAmplitude(
- (int16_t *) buffer->data(), buffer->range_length() >> 1);
+#ifdef QCOM_ENHANCED_AUDIO
+ if (mFormat == AUDIO_FORMAT_PCM_16_BIT)
+#endif
+ trackMaxAmplitude(
+ (int16_t *) buffer->data(), buffer->range_length() >> 1);
}
*out = buffer;
@@ -321,7 +427,9 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
}
CHECK_EQ(numLostBytes & 1, 0u);
+#ifndef QCOM_ENHANCED_AUDIO
CHECK_EQ(audioBuffer.size & 1, 0u);
+#endif
if (numLostBytes > 0) {
// Loss of audio frames should happen rarely; thus the LOGW should
// not cause a logging spam
@@ -359,19 +467,45 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
const size_t bufferSize = buffer->range_length();
const size_t frameSize = mRecord->frameSize();
+#ifdef QCOM_ENHANCED_AUDIO
+ int64_t timestampUs = mPrevSampleTimeUs;
+ int64_t recordDurationUs = 0;
+ if ( mFormat == AUDIO_FORMAT_PCM_16_BIT ){
+ recordDurationUs = ((1000000LL * (bufferSize / (2 * mRecord->channelCount()))) +
+ (mSampleRate >> 1)) / mSampleRate;
+ } else {
+ recordDurationUs = bufferDurationUs(bufferSize);
+ }
+ timestampUs += recordDurationUs;
+#else
const int64_t timestampUs =
mPrevSampleTimeUs +
((1000000LL * (bufferSize / frameSize)) +
(mSampleRate >> 1)) / mSampleRate;
+#endif
if (mNumFramesReceived == 0) {
buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs);
}
buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs);
- buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
+#ifdef QCOM_ENHANCED_AUDIO
+ if (mFormat == AUDIO_FORMAT_PCM_16_BIT) {
+#endif
+ buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
+#ifdef QCOM_ENHANCED_AUDIO
+ } else {
+ int64_t wallClockTimeUs = timeUs - mInitialReadTimeUs;
+ int64_t mediaTimeUs = mStartTimeUs + mPrevSampleTimeUs;
+ buffer->meta_data()->setInt64(kKeyDriftTime, mediaTimeUs - wallClockTimeUs);
+ }
+#endif
mPrevSampleTimeUs = timestampUs;
+#ifdef QCOM_ENHANCED_AUDIO
+ mNumFramesReceived += buffer->range_length() / sizeof(int16_t);
+#else
mNumFramesReceived += bufferSize / frameSize;
+#endif
mBuffersReceived.push_back(buffer);
mFrameAvailableCondition.signal();
}
@@ -399,4 +533,27 @@ int16_t AudioSource::getMaxAmplitude() {
return value;
}
+#ifdef QCOM_ENHANCED_AUDIO
+int64_t AudioSource::bufferDurationUs( ssize_t n ) {
+
+ int64_t dataDurationMs = 0;
+ if (mFormat == AUDIO_FORMAT_AMR_NB) {
+ dataDurationMs = (n/AMR_FRAMESIZE) * 20; //ms
+ }
+ else if (mFormat == AUDIO_FORMAT_EVRC) {
+ dataDurationMs = (n/EVRC_FRAMESIZE) * 20; //ms
+ }
+ else if (mFormat == AUDIO_FORMAT_QCELP) {
+ dataDurationMs = (n/QCELP_FRAMESIZE) * 20; //ms
+ }
+ else if (mFormat == AUDIO_FORMAT_AMR_WB) {
+ dataDurationMs = (n/AMR_WB_FRAMESIZE) * 20; //ms
+ }
+
+ else
+ CHECK(0);
+
+ return dataDurationMs*1000LL;
+}
+#endif
} // namespace android
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index c68b476..ad221d1 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -57,6 +57,9 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXCodec.h>
+#ifdef QCOM_HARDWARE
+#include "include/QCUtilityClass.h"
+#endif
#include <gui/ISurfaceTexture.h>
#include <gui/SurfaceTextureClient.h>
@@ -255,6 +258,46 @@ AwesomePlayer::~AwesomePlayer() {
mClient.disconnect();
}
+void AwesomePlayer::printStats() {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("persist.debug.sf.statistics", value, "0");
+ if (atoi(value) && mVideoSource != NULL) {
+ ALOGE("===========================\n"
+ " videoDimensions(%d x %d)\n"
+ " Total Video Frames Decoded(%lld)\n"
+ " Total Video Frames Rendered(%lld)\n"
+ " Total Playback Duration(%lld ms)\n"
+ " numVideoFramesDropped(%lld)\n"
+ " Average Frames Per Second(%.4f)\n"
+ " Last Seek To Time(%lld ms)\n"
+ " Last Paused Time(%lld ms)\n"
+ " First Frame Latency (%lld ms)\n"
+ " Number of times AV Sync Lost(%u)\n"
+ " Max Video Ahead Time Delta(%u)\n"
+ " Max Video Behind Time Delta(%u)\n"
+ " Max Time Sync Loss(%u)\n"
+ " EOS(%d)\n"
+ " PLAYING(%d)\n"
+ "===========================\n\n",
+ mStats.mVideoWidth,
+ mStats.mVideoHeight,
+ mStats.mNumVideoFramesDecoded,
+ mStats.mTotalFrames,
+ mStats.mTotalTimeUs/1000,
+ mStats.mNumVideoFramesDropped,
+ mStats.mTotalTimeUs > 0 ? ((double)(mStats.mTotalFrames)*1E6)/((double)mStats.mTotalTimeUs) : 0,
+ mStats.mLastSeekToTimeMs,
+ mStats.mLastPausedTimeMs,
+ mStats.mFirstFrameLatencyUs/1000,
+ mStats.mNumTimesSyncLoss,
+ -mStats.mMaxEarlyDelta/1000,
+ mStats.mMaxLateDelta/1000,
+ mStats.mMaxTimeSyncLoss/1000,
+ (mFlags & VIDEO_AT_EOS) > 0,
+ (mFlags & PLAYING) > 0);
+ }
+}
+
void AwesomePlayer::cancelPlayerEvents(bool keepNotifications) {
mQueue.cancelEvent(mVideoEvent->eventID());
mVideoEventPending = false;
@@ -448,7 +491,11 @@ status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
&mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
stat->mMIME = mime.string();
}
- } else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) {
+ } else if (!haveAudio &&
+#ifdef QCOM_HARDWARE
+ !QCUtilityClass::helper_Awesomeplayer_checkIfAudioDisable() &&
+#endif
+ !strncasecmp(mime.string(), "audio/", 6)) {
setAudioSource(extractor->getTrack(i));
haveAudio = true;
mActiveAudioTrackIndex = i;
@@ -573,6 +620,8 @@ void AwesomePlayer::reset_l() {
mVideoRenderer.clear();
+ modifyFlags(PLAYING, CLEAR);
+ printStats();
if (mVideoSource != NULL) {
shutdownVideoDecoder_l();
}
@@ -1039,7 +1088,7 @@ status_t AwesomePlayer::play_l() {
// We don't want to post an error notification at this point,
// the error returned from MediaPlayer::start() will suffice.
bool sendErrorNotification = false;
-#ifdef IS_TUNNEL_MODE
+#ifdef USE_TUNNEL_MODE
if(mIsTunnelAudio) {
// For tunnel Audio error has to be posted to the client
sendErrorNotification = true;
@@ -1080,6 +1129,7 @@ status_t AwesomePlayer::play_l() {
if (mAudioSource != NULL && mVideoSource != NULL) {
postVideoLagEvent_l();
}
+ printStats();
}
if (mFlags & AT_EOS) {
@@ -1296,9 +1346,10 @@ status_t AwesomePlayer::pause_l(bool at_eos) {
Playback::PAUSE, 0);
}
- if(!(mFlags & AT_EOS)){
+ if(!(mFlags & VIDEO_AT_EOS)){
Mutex::Autolock autoLock(mStatsLock);
mStats.mLastPausedTimeMs = mVideoTimeUs/1000;
+ printStats();
}
uint32_t params = IMediaPlayerService::kBatteryDataTrackDecoder;
@@ -1832,6 +1883,7 @@ void AwesomePlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
mStats.mLastSeekToTimeMs = mSeekTimeUs/1000;
logFirstFrame();
}
+ printStats();
}
void AwesomePlayer::onVideoEvent() {
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 9f63ec0..f7d452b 100755
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -30,7 +30,10 @@
#include <gui/Surface.h>
#include <utils/String8.h>
#include <cutils/properties.h>
-
+#ifdef QCOM_HARDWARE
+#include "include/QCUtilityClass.h"
+#include <QCMetaData.h>
+#endif
#ifdef USE_TI_CUSTOM_DOMX
#include <OMX_TI_IVCommon.h>
#endif
@@ -569,6 +572,11 @@ status_t CameraSource::initWithCameraAccess(
mMeta->setInt32(kKeyStride, mVideoSize.width);
mMeta->setInt32(kKeySliceHeight, mVideoSize.height);
mMeta->setInt32(kKeyFrameRate, mVideoFrameRate);
+
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_CameraSource_hfr(params, mMeta);
+#endif
+
return OK;
}
diff --git a/media/libstagefright/LPAPlayerALSA.cpp b/media/libstagefright/LPAPlayerALSA.cpp
index 0aa419c..e2f30ed 100644
--- a/media/libstagefright/LPAPlayerALSA.cpp
+++ b/media/libstagefright/LPAPlayerALSA.cpp
@@ -51,10 +51,9 @@
static const char mName[] = "LPAPlayer";
-#define MEM_PADDING 64
-#define MEM_BUFFER_SIZE (256*1024)
+#define MEM_METADATA_SIZE 64
+#define MEM_BUFFER_SIZE ((256*1024) - MEM_METADATA_SIZE)
#define MEM_BUFFER_COUNT 4
-
#define PCM_FORMAT 2
#define NUM_FDS 2
#define LPA_BUFFER_TIME 1500000
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 6d1f8c6..04b508b 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -40,6 +40,10 @@
#include "include/ESDS.h"
+#ifdef QCOM_HARDWARE
+#include "include/QCUtilityClass.h"
+#endif
+
namespace android {
static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
@@ -2161,6 +2165,12 @@ status_t MPEG4Writer::Track::threadEntry() {
meta_data->findInt32(kKeyIsSyncFrame, &isSync);
CHECK(meta_data->findInt64(kKeyTime, &timestampUs));
+#ifdef QCOM_HARDWARE
+ if(!mIsAudio) {
+ QCUtilityClass::helper_MPEG4Writer_hfr(mMeta, timestampUs);
+ }
+#endif
+
////////////////////////////////////////////////////////////////////////////////
if (mStszTableEntries->count() == 0) {
mFirstSampleTimeRealUs = systemTime() / 1000;
@@ -2190,6 +2200,10 @@ status_t MPEG4Writer::Track::threadEntry() {
*/
int64_t decodingTimeUs;
CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_MPEG4Writer_hfr(mMeta, decodingTimeUs);
+#endif
+
decodingTimeUs -= previousPausedDurationUs;
cttsOffsetTimeUs =
timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index d24337f..f815ec9 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -28,6 +28,10 @@
#include <libexpat/expat.h>
+#ifdef QCOM_HARDWARE
+#include "include/QCUtilityClass.h"
+#endif
+
namespace android {
static Mutex sInitMutex;
@@ -64,6 +68,14 @@ MediaCodecList::MediaCodecList()
addMediaCodec(
false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
+
+#ifdef QCOM_HARDWARE
+ Vector<AString> QcomAACQuirks;
+ QcomAACQuirks.push(AString("requires-allocate-on-input-ports"));
+ QcomAACQuirks.push(AString("requires-allocate-on-output-ports"));
+ QCUtilityClass::helper_addMediaCodec(mCodecInfos, mTypes, false, "OMX.qcom.audio.decoder.multiaac",
+ "audio/mp4a-latm", QCUtilityClass::helper_getCodecSpecificQuirks(mCodecQuirks, QcomAACQuirks));
+#endif
}
#if 0
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 1eb5c19..33e526a 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -35,6 +35,7 @@
#include "include/AACExtractor.h"
#ifdef QCOM_HARDWARE
#include "include/ExtendedExtractor.h"
+#include "include/QCUtilityClass.h"
#endif
#include "matroska/MatroskaExtractor.h"
@@ -60,7 +61,6 @@ uint32_t MediaExtractor::flags() const {
sp<MediaExtractor> MediaExtractor::Create(
const sp<DataSource> &source, const char *mime) {
sp<AMessage> meta;
- bool bCheckExtendedExtractor = false;
String8 tmp;
if (mime == NULL) {
@@ -107,9 +107,6 @@ sp<MediaExtractor> MediaExtractor::Create(
} else {
ret = new MPEG4Extractor(source);
}
-#ifdef QCOM_ENHANCED_AUDIO
- bCheckExtendedExtractor = true;
-#endif
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
ret = new MP3Extractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
@@ -147,49 +144,13 @@ sp<MediaExtractor> MediaExtractor::Create(
}
#ifdef QCOM_HARDWARE
- //If default extractor created and flag is not set to check extended extractor,
- // then pass default extractor.
- if (ret && (!bCheckExtendedExtractor) ) {
- ALOGD("returning default extractor");
- return ret;
- }
-
- //Create Extended Extractor only if default extractor are not selected
- ALOGV("Using ExtendedExtractor");
- sp<MediaExtractor> retextParser = ExtendedExtractor::CreateExtractor(source, mime);
- //if we came here, it means we do not have to use the default extractor, if created above.
- bool bUseDefaultExtractor = false;
-
- if(bCheckExtendedExtractor) {
- ALOGV("bCheckExtendedExtractor is true");
- //bCheckExtendedExtractor is true which means default extractor was found
- // but we want to give preference to extended extractor based on certain
- // codec type.Set bUseDefaultExtractor to true if extended extractor
- //does not return specific codec type that we are looking for.
- bUseDefaultExtractor = true;
- ALOGV(" bCheckExtendedExtractor is true..checking extended extractor");
- for (size_t i = 0; (retextParser!=NULL) && (i < retextParser->countTracks()); ++i) {
- sp<MetaData> meta = retextParser->getTrackMetaData(i);
- const char *mime;
- bool success = meta->findCString(kKeyMIMEType, &mime);
- if( (success == true) && !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)) {
- ALOGV("Discarding default extractor and using the extended one");
- //We found what we were looking for, set bUseDefaultExtractor to false;
- bUseDefaultExtractor = false;
- if(ret) {
- //delete the default extractor as we will be using extended extractor..
- delete ret;
- }
- break;
- }
- }
- }
- if( (retextParser != NULL) && (!bUseDefaultExtractor) ) {
- ALOGV("returning retextParser");
- return retextParser;
- }
-#endif
+ //ret will get deleted within if replaced
+ return QCUtilityClass::helper_MediaExtractor_CreateIfNeeded(ret,
+ source,
+ mime);
+#else
return ret;
+#endif
}
} // namespace android
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index b8b3ec4..26cae65 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -52,7 +52,9 @@
#include <QCMetaData.h>
#include <QOMX_AudioExtensions.h>
#include <OMX_QCOMExtns.h>
+#include "include/QCUtilityClass.h"
#endif
+
#include "include/avc_utils.h"
#ifdef USE_SAMSUNG_COLORFORMAT
@@ -415,7 +417,14 @@ sp<MediaSource> OMXCodec::Create(
CHECK(success);
Vector<CodecNameAndQuirks> matchingCodecs;
- findMatchingCodecs(
+
+#ifdef QCOM_HARDWARE
+ if (QCOMXCodec::useHWAACDecoder(mime)) {
+ findMatchingCodecs(mime, createEncoder,
+ "OMX.qcom.audio.decoder.multiaac", flags, &matchingCodecs);
+ } else
+#endif
+ findMatchingCodecs(
mime, createEncoder, matchComponentName, flags, &matchingCodecs);
if (matchingCodecs.isEmpty()) {
@@ -1054,6 +1063,11 @@ void OMXCodec::setVideoInputFormat(
CHECK(success);
CHECK(stride != 0);
+#ifdef QCOM_HARDWARE
+ int32_t newFrameRate = frameRate;
+ QCUtilityClass::helper_OMXCodec_hfr(meta, frameRate, bitRate, newFrameRate);
+#endif
+
OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
compressionFormat = OMX_VIDEO_CodingAVC;
@@ -1288,7 +1302,11 @@ status_t OMXCodec::setupH263EncoderParameters(const sp<MetaData>& meta) {
h263type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
- h263type.nPFrames = setPFramesSpacing(iFramesInterval, frameRate);
+ int32_t newFrameRate = frameRate;
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_OMXCodec_hfr(meta, frameRate, bitRate, newFrameRate);
+#endif
+ h263type.nPFrames = setPFramesSpacing(iFramesInterval, newFrameRate);
if (h263type.nPFrames == 0) {
h263type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
}
@@ -1339,7 +1357,11 @@ status_t OMXCodec::setupMPEG4EncoderParameters(const sp<MetaData>& meta) {
mpeg4type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
- mpeg4type.nPFrames = setPFramesSpacing(iFramesInterval, frameRate);
+ int32_t newFrameRate = frameRate;
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_OMXCodec_hfr(meta, frameRate, bitRate, newFrameRate);
+#endif
+ mpeg4type.nPFrames = setPFramesSpacing(iFramesInterval, newFrameRate);
if (mpeg4type.nPFrames == 0) {
mpeg4type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
}
@@ -1360,6 +1382,9 @@ status_t OMXCodec::setupMPEG4EncoderParameters(const sp<MetaData>& meta) {
mpeg4type.eProfile = static_cast<OMX_VIDEO_MPEG4PROFILETYPE>(profileLevel.mProfile);
mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(profileLevel.mLevel);
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_OMXCodec_setBFrames(mpeg4type, mNumBFrames);
+#endif
err = mOMX->setParameter(
mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
CHECK_EQ(err, (status_t)OK);
@@ -1397,7 +1422,12 @@ status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
h264type.eProfile = static_cast<OMX_VIDEO_AVCPROFILETYPE>(profileLevel.mProfile);
h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(profileLevel.mLevel);
- // XXX
+ int32_t newFrameRate = frameRate;
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_OMXCodec_hfr(meta, frameRate, bitRate, newFrameRate);
+#endif
+
+#ifndef QCOM_HARDWARE
#ifdef USE_TI_DUCATI_H264_PROFILE
if ((strncmp(mComponentName, "OMX.TI.DUCATI1", 14) != 0)
&& (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline)) {
@@ -1405,19 +1435,22 @@ status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
#endif
ALOGW("Use baseline profile instead of %d for AVC recording",
- h264type.eProfile);
+ h264type.eProfile);
h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
}
+#endif
if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
h264type.nSliceHeaderSpacing = 0;
h264type.bUseHadamard = OMX_TRUE;
h264type.nRefFrames = 1;
h264type.nBFrames = 0;
+#ifndef QCOM_HARDWARE
h264type.nPFrames = setPFramesSpacing(iFramesInterval, frameRate);
if (h264type.nPFrames == 0) {
h264type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
}
+#endif
h264type.nRefIdx10ActiveMinus1 = 0;
h264type.nRefIdx11ActiveMinus1 = 0;
h264type.bEntropyCodingCABAC = OMX_FALSE;
@@ -1428,6 +1461,12 @@ status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
h264type.nCabacInitIdc = 0;
}
+#ifdef QCOM_HARDWARE
+ QCUtilityClass::helper_OMXCodec_setBFrames(h264type,
+ mNumBFrames,
+ iFramesInterval,
+ newFrameRate);
+#endif
if (h264type.nBFrames != 0) {
h264type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB;
}
@@ -1509,6 +1548,7 @@ status_t OMXCodec::setVideoOutputFormat(
|| format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
|| format.eColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar
|| format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka
+ || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar32m
#ifdef USE_SAMSUNG_COLORFORMAT
|| format.eColorFormat == OMX_SEC_COLOR_FormatNV12TPhysicalAddress
|| format.eColorFormat == OMX_SEC_COLOR_FormatNV12Tiled
@@ -1653,7 +1693,11 @@ OMXCodec::OMXCodec(
mNativeWindow(
(!strncmp(componentName, "OMX.google.", 11)
|| !strcmp(componentName, "OMX.Nvidia.mpeg2v.decode"))
- ? NULL : nativeWindow) {
+ ? NULL : nativeWindow)
+#ifdef QCOM_HARDWARE
+ ,mNumBFrames(0)
+#endif
+{
mPortStatus[kPortIndexInput] = ENABLED;
mPortStatus[kPortIndexOutput] = ENABLED;
@@ -2152,6 +2196,15 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
return err;
}
+#ifdef QCOM_BSP
+ err = mNativeWindow.get()->perform(mNativeWindow.get(),
+ NATIVE_WINDOW_SET_BUFFERS_SIZE, def.nBufferSize);
+ if (err != 0) {
+ ALOGE("native_window_set_buffers_size failed: %s (%d)", strerror(-err),
+ -err);
+ return err;
+ }
+#endif
CODEC_LOGV("allocating %lu buffers from a native window of size %lu on "
"output port", def.nBufferCountActual, def.nBufferSize);
@@ -3028,13 +3081,30 @@ void OMXCodec::onStateChange(OMX_STATETYPE newState) {
mPortStatus[kPortIndexInput] = ENABLED;
mPortStatus[kPortIndexOutput] = ENABLED;
- if ((mFlags & kEnableGrallocUsageProtected) &&
- mNativeWindow != NULL) {
- // We push enough 1x1 blank buffers to ensure that one of
- // them has made it to the display. This allows the OMX
- // component teardown to zero out any protected buffers
- // without the risk of scanning out one of those buffers.
- pushBlankBuffersToNativeWindow();
+ if (mNativeWindow != NULL) {
+#ifdef QCOM_BSP
+ /*
+ * reset buffer size field with SurfaceTexture
+ * back to 0. This will ensure proper size
+ * buffers are allocated if the same SurfaceTexture
+ * is re-used in a different decode session
+ */
+ int err =
+ mNativeWindow.get()->perform(mNativeWindow.get(),
+ NATIVE_WINDOW_SET_BUFFERS_SIZE,
+ 0);
+ if (err != 0) {
+ ALOGE("set_buffers_size failed: %s (%d)", strerror(-err),
+ -err);
+ }
+#endif
+ if (mFlags & kEnableGrallocUsageProtected) {
+ // We push enough 1x1 blank buffers to ensure that one of
+ // them has made it to the display. This allows the OMX
+ // component teardown to zero out any protected buffers
+ // without the risk of scanning out one of those buffers.
+ pushBlankBuffersToNativeWindow();
+ }
}
setState(IDLE_TO_LOADED);
@@ -3305,7 +3375,10 @@ void OMXCodec::drainInputBuffers() {
}
if (mFlags & kOnlySubmitOneInputBufferAtOneTime) {
- break;
+#ifdef QCOM_HARDWARE
+ if (i == mNumBFrames)
+#endif
+ break;
}
}
}
@@ -3757,6 +3830,9 @@ void OMXCodec::setRawAudioFormat(
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
+#ifdef QCOM_ENHANCED_AUDIO
+ def.format.audio.cMIMEType = NULL;
+#endif
status_t err = mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
CHECK_EQ(err, (status_t)OK);
@@ -5235,6 +5311,10 @@ void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
if (mNativeWindow != NULL) {
initNativeWindowCrop();
}
+#ifdef QCOM_HARDWARE
+ } else {
+ QCUtilityClass::helper_OMXCodec_hfr(inputFormat, mOutputFormat);
+#endif
}
break;
}
diff --git a/media/libstagefright/QCOMXCodec.cpp b/media/libstagefright/QCOMXCodec.cpp
index fcc57f2..a2f3b35 100644
--- a/media/libstagefright/QCOMXCodec.cpp
+++ b/media/libstagefright/QCOMXCodec.cpp
@@ -30,6 +30,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "QCOMXCodec"
#include <utils/Log.h>
+#include <cutils/properties.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
@@ -593,7 +594,7 @@ void QCOMXCodec::setQCSpecificVideoFormat(const sp<MetaData> &meta, sp<IOMX> OMX
void QCOMXCodec::checkIfInterlaced(const uint8_t *ptr, const sp<MetaData> &meta)
{
uint16_t spsSize = (((uint16_t)ptr[6]) << 8) + (uint16_t)(ptr[7]);
- int32_t width = 0, height = 0, isInterlaced = 0;
+ int32_t width = 0, height = 0, isInterlaced = 1;
const uint8_t *spsStart = &ptr[8];
sp<ABuffer> seqParamSet = new ABuffer(spsSize);
@@ -607,4 +608,14 @@ void QCOMXCodec::checkIfInterlaced(const uint8_t *ptr, const sp<MetaData> &meta)
return;
}
+bool QCOMXCodec::useHWAACDecoder(const char *mime) {
+ char value[PROPERTY_VALUE_MAX];
+ int aaccodectype = property_get("media.aaccodectype", value, NULL);
+ if (!strcmp("0", value) && aaccodectype && !strcmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+ ALOGI("Using Hardware AAC Decoder");
+ return true;
+ }
+ return false;
+}
+
}
diff --git a/media/libstagefright/QCUtilityClass.cpp b/media/libstagefright/QCUtilityClass.cpp
new file mode 100644
index 0000000..1efc040
--- /dev/null
+++ b/media/libstagefright/QCUtilityClass.cpp
@@ -0,0 +1,356 @@
+/*Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "QCUtilClass"
+#include <utils/Log.h>
+
+#include <include/QCUtilityClass.h>
+#include "include/ExtendedExtractor.h"
+#include <media/stagefright/MetaData.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/OMXCodec.h>
+
+namespace android {
+
+//-- START :: HFR Related Changes -----
+
+status_t QCUtilityClass::helper_StageFrightRecoder_hfr(sp<MetaData> &meta, sp<MetaData> &enc_meta,
+ int64_t &maxFileDurationUs, int32_t frameRate,
+ video_encoder videoEncoder) {
+ status_t retVal = OK;
+ int32_t hfr = 0;
+
+ if (!meta->findInt32(kKeyHFR, &hfr)) {
+ ALOGW("hfr not found, default to 0");
+ }
+
+ if (hfr && frameRate) {
+ maxFileDurationUs = maxFileDurationUs * (hfr/frameRate);
+ }
+
+ enc_meta->setInt32(kKeyHFR, hfr);
+ int32_t width = 0, height = 0;
+
+ CHECK(meta->findInt32(kKeyWidth, &width));
+ CHECK(meta->findInt32(kKeyHeight, &height));
+
+ char mDeviceName[100];
+ property_get("ro.board.platform",mDeviceName,"0");
+ if (!strncmp(mDeviceName, "msm7627a", 8)) {
+ if (hfr && (width * height > 432*240)) {
+ ALOGE("HFR mode is supported only upto WQVGA resolution");
+ return INVALID_OPERATION;
+ }
+ } else {
+ if(hfr && ((videoEncoder != VIDEO_ENCODER_H264) || (width * height > 800*480))) {
+ ALOGE("HFR mode is supported only upto WVGA and H264 codec.");
+ return INVALID_OPERATION;
+ }
+ }
+ return retVal;
+}
+
+void QCUtilityClass::helper_CameraSource_hfr(const CameraParameters& params,
+ sp<MetaData> &meta) {
+ const char *hfr_str = params.get("video-hfr");
+ int32_t hfr = -1;
+
+ if (hfr_str != NULL) {
+ hfr = atoi(hfr_str);
+ }
+ if (hfr < 0) {
+ ALOGW("Invalid hfr value(%d) set from app. Disabling HFR.", hfr);
+ hfr = 0;
+ }
+ meta->setInt32(kKeyHFR, hfr);
+}
+
+void QCUtilityClass::helper_MPEG4Writer_hfr(sp<MetaData> &meta,
+ int64_t &timestampUs) {
+ int32_t frameRate = 0, hfr = 0, multiple = 0;
+
+ if (!(meta->findInt32(kKeyFrameRate, &frameRate))) {
+ return;
+ }
+
+ if (!(meta->findInt32(kKeyHFR, &hfr))) {
+ return;
+ }
+
+ multiple = hfr ? (hfr/frameRate) : 1;
+ timestampUs = multiple * timestampUs;
+}
+
+void QCUtilityClass::helper_OMXCodec_hfr(const sp<MetaData> &meta,
+ int32_t &frameRate,
+ int32_t &bitRate,
+ int32_t &newFrameRate) {
+ int32_t hfr = 0, hfrRatio = 0;
+ if (!(meta->findInt32(kKeyHFR, &hfr))) {
+ return;
+ }
+
+ hfrRatio = hfr ? hfr/frameRate : 1;
+ frameRate = hfr?hfr:frameRate;
+ bitRate = hfr ? (hfrRatio*bitRate) : bitRate;
+ newFrameRate = frameRate / hfrRatio;
+}
+
+void QCUtilityClass::helper_OMXCodec_hfr(const sp<MetaData> &inputFormat,
+ sp<MetaData> &outputFormat) {
+ int32_t frameRate = 0, hfr = 0;
+ inputFormat->findInt32(kKeyHFR, &hfr);
+ inputFormat->findInt32(kKeyFrameRate, &frameRate);
+ outputFormat->setInt32(kKeyHFR, hfr);
+ outputFormat->setInt32(kKeyFrameRate, frameRate);
+}
+
+//-- END :: HFR related changes -----
+
+
+//-- START :: AUDIO disable and change in profile base on property -----
+
+bool QCUtilityClass::helper_Awesomeplayer_checkIfAudioDisable() {
+ bool retVal = false;
+ char disableAudio[PROPERTY_VALUE_MAX];
+ property_get("persist.debug.sf.noaudio", disableAudio, "0");
+ if (atoi(disableAudio) == 1) {
+ retVal = true;
+ }
+ return retVal;
+}
+
+bool QCUtilityClass::helper_StagefrightRecoder_checkIfAudioDisable() {
+ bool retVal = false;
+ char disableAudio[PROPERTY_VALUE_MAX];
+ property_get("camcorder.debug.disableaudio", disableAudio, "0");
+ if (atoi(disableAudio) == 1) {
+ retVal = true;
+ }
+ return retVal;
+}
+
+void QCUtilityClass::helper_StagefrightRecoder_setUserprofile(video_encoder &videoEncoder,
+ int32_t &videoEncoderProfile) {
+ char value[PROPERTY_VALUE_MAX];
+ bool customProfile = false;
+ if (!property_get("encoder.video.profile", value, NULL) > 0) {
+ return;
+ }
+
+ switch (videoEncoder) {
+ case VIDEO_ENCODER_H264:
+ if (strncmp("base", value, 4) == 0) {
+ videoEncoderProfile = OMX_VIDEO_AVCProfileBaseline;
+ ALOGI("H264 Baseline Profile");
+ } else if (strncmp("main", value, 4) == 0) {
+ videoEncoderProfile = OMX_VIDEO_AVCProfileMain;
+ ALOGI("H264 Main Profile");
+ } else if (strncmp("high", value, 4) == 0) {
+ videoEncoderProfile = OMX_VIDEO_AVCProfileHigh;
+ ALOGI("H264 High Profile");
+ } else {
+ ALOGW("Unsupported H264 Profile");
+ }
+ break;
+ case VIDEO_ENCODER_MPEG_4_SP:
+ if (strncmp("simple", value, 5) == 0 ) {
+ videoEncoderProfile = OMX_VIDEO_MPEG4ProfileSimple;
+ ALOGI("MPEG4 Simple profile");
+ } else if (strncmp("asp", value, 3) == 0 ) {
+ videoEncoderProfile = OMX_VIDEO_MPEG4ProfileAdvancedSimple;
+ ALOGI("MPEG4 Advanced Simple Profile");
+ } else {
+ ALOGW("Unsupported MPEG4 Profile");
+ }
+ break;
+ default:
+ ALOGW("No custom profile support for other codecs");
+ break;
+ }
+}
+
+void QCUtilityClass::helper_OMXCodec_setBFrames(OMX_VIDEO_PARAM_MPEG4TYPE &mpeg4type,
+ bool &numBFrames) {
+ if (mpeg4type.eProfile > OMX_VIDEO_MPEG4ProfileSimple) {
+ mpeg4type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB;
+ mpeg4type.nBFrames = 1;
+ mpeg4type.nPFrames /= (mpeg4type.nBFrames + 1);
+ numBFrames = mpeg4type.nBFrames;
+ }
+ return;
+}
+
+void QCUtilityClass::helper_OMXCodec_setBFrames(OMX_VIDEO_PARAM_AVCTYPE &h264type,
+ bool &numBFrames,
+ int32_t iFramesInterval,
+ int32_t frameRate) {
+ OMX_U32 val = 0;
+ if (iFramesInterval < 0) {
+ val = 0xFFFFFFFF;
+ } else if (iFramesInterval == 0) {
+ val = 0;
+ } else {
+ val = frameRate * iFramesInterval - 1;
+ CHECK(val > 1);
+ }
+
+ h264type.nPFrames = val;
+
+ if (h264type.nPFrames == 0) {
+ h264type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
+ }
+
+ if (h264type.eProfile > OMX_VIDEO_AVCProfileBaseline) {
+ h264type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB;
+ h264type.nBFrames = 1;
+ h264type.nPFrames /= (h264type.nBFrames + 1);
+ numBFrames = h264type.nBFrames;
+ }
+ return;
+}
+//-- END :: AUDIO disable and change in profile base on property -----
+void QCUtilityClass::helper_addMediaCodec(Vector<MediaCodecList::CodecInfo> &mCodecInfos,
+ KeyedVector<AString, size_t> &mTypes,
+ bool encoder, const char *name,
+ const char *type, uint32_t quirks) {
+ mCodecInfos.push();
+ MediaCodecList::CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+ info->mName = name;
+ info->mIsEncoder = encoder;
+ ssize_t index = mTypes.indexOfKey(type);
+ uint32_t bit = mTypes.valueAt(index);
+ info->mTypes |= 1ul << bit;
+ info->mQuirks = quirks;
+}
+
+uint32_t QCUtilityClass::helper_getCodecSpecificQuirks(KeyedVector<AString, size_t> &mCodecQuirks,
+ Vector<AString> quirks) {
+ size_t i = 0, numQuirks = quirks.size();
+ uint32_t bit = 0, value = 0;
+ for (i = 0; i < numQuirks; i++)
+ {
+ ssize_t index = mCodecQuirks.indexOfKey(quirks.itemAt(i));
+ bit = mCodecQuirks.valueAt(index);
+ value |= 1ul << bit;
+ }
+ return value;
+}
+
+//- returns NULL if we dont really need a new extractor (or cannot),
+// valid extractor is returned otherwise
+//- caller needs to check for NULL
+// defaultExt - the existing extractor
+// source - file source
+// mime - container mime
+// Note that defaultExt will be deleted in this function if the new parser is taken
+sp<MediaExtractor> QCUtilityClass::helper_MediaExtractor_CreateIfNeeded(sp<MediaExtractor> defaultExt,
+ const sp<DataSource> &source,
+ const char *mime) {
+ bool bCheckExtendedExtractor = false;
+ bool videoOnly = true;
+ bool amrwbAudio = false;
+ if (defaultExt != NULL) {
+ for (size_t i = 0; i < defaultExt->countTracks(); ++i) {
+ sp<MetaData> meta = defaultExt->getTrackMetaData(i);
+ const char *_mime;
+ CHECK(meta->findCString(kKeyMIMEType, &_mime));
+
+ String8 mime = String8(_mime);
+
+ if (!strncasecmp(mime.string(), "audio/", 6)) {
+ videoOnly = false;
+
+ amrwbAudio = !strncasecmp(mime.string(),
+ MEDIA_MIMETYPE_AUDIO_AMR_WB,
+ strlen(MEDIA_MIMETYPE_AUDIO_AMR_WB));
+ if (amrwbAudio) break;
+ }
+ }
+ bCheckExtendedExtractor = videoOnly || amrwbAudio;
+ } else {
+ bCheckExtendedExtractor = true;
+ }
+
+ if (!bCheckExtendedExtractor) {
+ ALOGD("extended extractor not needed, return default");
+ return defaultExt;
+ }
+
+ sp<MediaExtractor> retextParser;
+
+ //Create Extended Extractor only if default extractor are not selected
+ ALOGD("Try creating ExtendedExtractor");
+ retextParser = ExtendedExtractor::CreateExtractor(source, mime);
+
+ if (retextParser == NULL) {
+ ALOGD("Couldn't create the extended extractor, return default one");
+ return defaultExt;
+ }
+
+ if (defaultExt == NULL) {
+ ALOGD("default one is NULL, return extended extractor");
+ return retextParser;
+ }
+
+ //bCheckExtendedExtractor is true which means default extractor was found
+ //but we want to give preference to extended extractor based on certain
+ //conditions.
+
+ //needed to prevent a leak in case both extractors are valid
+ //but we still dont want to use the extended one. we need
+ //to delete the new one
+ bool bUseDefaultExtractor = true;
+
+ for (size_t i = 0; (i < retextParser->countTracks()); ++i) {
+ sp<MetaData> meta = retextParser->getTrackMetaData(i);
+ const char *mime;
+ bool success = meta->findCString(kKeyMIMEType, &mime);
+ if ((success == true) && !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)) {
+ ALOGD("Discarding default extractor and using the extended one");
+ bUseDefaultExtractor = false;
+ break;
+ }
+ }
+
+ if (bUseDefaultExtractor) {
+ ALOGD("using default extractor inspite of having a new extractor");
+ retextParser.clear();
+ return defaultExt;
+ } else {
+ defaultExt.clear();
+ return retextParser;
+ }
+}
+
+}
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index d9858d7..4a42a70 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -360,8 +360,9 @@ status_t SampleTable::setCompositionTimeToSampleParams(
return ERROR_IO;
}
- if (U32_AT(header) != 0) {
- // Expected version = 0, flags = 0.
+ if (U32_AT(header) != 0 &&
+ U32_AT(header) != 0x01000000) {
+ // Expected version = 0/1, flags = 0.
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 74e9222..7bd2032 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -28,6 +28,17 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#ifdef QCOM_ENHANCED_AUDIO
+#include <QCMediaDefs.h>
+#include <QCMetaData.h>
+#include <QOMX_AudioExtensions.h>
+#include <OMX_QCOMExtns.h>
+#include "include/avc_utils.h"
+
+#include "include/QCUtilityClass.h"
+#endif
+
+
namespace android {
uint16_t U16_AT(const uint8_t *ptr) {
@@ -111,6 +122,46 @@ status_t convertMetaDataToMessage(
if (meta->findInt32(kKeyIsADTS, &isADTS)) {
msg->setInt32("is-adts", true);
}
+#ifdef QCOM_ENHANCED_AUDIO
+ int32_t keyWMAVersion;
+ if (meta->findInt32(kKeyWMAVersion, &keyWMAVersion)) {
+ msg->setInt32("WMA-Version", keyWMAVersion);
+ }
+ int32_t bitRate;
+ int32_t encodeOptions;
+ int32_t blockAlign;
+ int32_t bitspersample;
+ int32_t formattag;
+ int32_t advencopt1;
+ int32_t advencopt2;
+ int32_t VirtualPktSize;
+
+ if (meta->findInt32(kKeyWMABitspersample, &bitspersample)) {
+ msg->setInt32("bsps", bitspersample);
+ }
+ if (meta->findInt32(kKeyWMAFormatTag, &formattag)) {
+ msg->setInt32("fmtt", formattag);
+ }
+ if (meta->findInt32(kKeyWMAAdvEncOpt1, &advencopt1)) {
+ msg->setInt32("ade1", advencopt1);
+ }
+
+ if (meta->findInt32(kKeyWMAAdvEncOpt2, &advencopt2)) {
+ msg->setInt32("ade2", advencopt2);
+ }
+ if (meta->findInt32(kKeyWMAVirPktSize, &VirtualPktSize)) {
+ msg->setInt32("vpks", VirtualPktSize);
+ }
+ if (meta->findInt32(kKeyBitRate, &bitRate)) {
+ msg->setInt32("brte", bitRate);
+ }
+ if (meta->findInt32(kKeyWMAEncodeOpt, &encodeOptions)) {
+ msg->setInt32("eopt", encodeOptions);
+ }
+ if (meta->findInt32(kKeyWMABlockAlign, &blockAlign)) {
+ msg->setInt32("blka", blockAlign);
+ }
+#endif
}
int32_t maxInputSize;
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index a38400b..2640319 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -192,8 +192,8 @@ status_t WAVExtractor::init() {
mNumChannels = U16_LE_AT(&formatSpec[2]);
if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
- if (mNumChannels != 1 && mNumChannels != 2) {
- ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
+ if (mNumChannels != 1 && mNumChannels != 2 && mNumChannels != 4) {
+ ALOGW("More than 4 channels (%d) in non-WAVE_EXT, unknown channel mask",
mNumChannels);
}
} else {
@@ -271,6 +271,10 @@ status_t WAVExtractor::init() {
if (mValidFormat) {
mDataOffset = offset;
mDataSize = chunkSize;
+ off64_t dataSourceSize = 0;
+
+ if (OK == mDataSource->getSize(&dataSourceSize) && mDataSize > (dataSourceSize - offset))
+ mDataSize = dataSourceSize - offset;
mTrackMeta = new MetaData;
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index fbe98f1..7bfa375 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -60,7 +60,11 @@ void FindAVCDimensions(
parseUE(&br); // bit_depth_luma_minus8
parseUE(&br); // bit_depth_chroma_minus8
br.skipBits(1); // qpprime_y_zero_transform_bypass_flag
- CHECK_EQ(br.getBits(1), 0u); // seq_scaling_matrix_present_flag
+ bool seq_scaling_matrix_present = (br.getBits(1) != 0u);
+ if (isInterlaced != NULL && seq_scaling_matrix_present) {
+ return;
+ }
+ CHECK_EQ(seq_scaling_matrix_present, false); // seq_scaling_matrix_present_flag
}
parseUE(&br); // log2_max_frame_num_minus4
@@ -128,11 +132,11 @@ void FindAVCDimensions(
(frame_crop_left_offset + frame_crop_right_offset) * cropUnitX;
*height -=
(frame_crop_top_offset + frame_crop_bottom_offset) * cropUnitY;
-
- if (isInterlaced != NULL) {
- *isInterlaced = !frame_mbs_only_flag;
- }
}
+ if (isInterlaced != NULL) {
+ *isInterlaced = !frame_mbs_only_flag;
+ }
+
}
status_t getNextNALUnit(
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index d88813e..606a43d 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -358,9 +358,13 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) {
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
- if (!mIsFirst) {
+ if (!mIsFirst || mInputBufferCount) {
// flush out the decoder's delayed data by calling DecodeFrame
// one more time, with the AACDEC_FLUSH flag set
+
+ // for the use case where the first frame in the buffer is EOS,
+ // decode the header to update the sample rate and channel mode
+ // and flush out the buffer.
INT_PCM *outBuffer =
reinterpret_cast<INT_PCM *>(
outHeader->pBuffer + outHeader->nOffset);
@@ -392,6 +396,9 @@ void SoftAAC2::onQueueFilled(OMX_U32 portIndex) {
}
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ outHeader->nTimeStamp =
+ mAnchorTimeUs
+ + (mNumSamplesOutput * 1000000ll) / mStreamInfo->sampleRate;
outQueue.erase(outQueue.begin());
outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
index 07f8b4f..27dea92 100644
--- a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
@@ -71,7 +71,7 @@ void SoftAMRNBEncoder::initPorts() {
def.eDir = OMX_DirInput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t);
+ def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t) * 4;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainAudio;
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
index 9ccb49c..afd2b32 100644
--- a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
@@ -78,7 +78,7 @@ void SoftAMRWBEncoder::initPorts() {
def.eDir = OMX_DirInput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t);
+ def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t) * 4;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainAudio;
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index fb1135c..aade29c 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -110,7 +110,7 @@ void SoftMP3::initPorts() {
void SoftMP3::initDecoder() {
mConfig->equalizerType = flat;
mConfig->crcEnabled = false;
-
+ mConfig->samplingRate = mSamplingRate;
uint32_t memRequirements = pvmp3_decoderMemRequirements();
mDecoderBuf = malloc(memRequirements);
@@ -237,10 +237,13 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) {
if (decoderErr != NO_ENOUGH_MAIN_DATA_ERROR
&& decoderErr != SIDE_INFO_ERROR) {
ALOGE("mp3 decoder returned error %d", decoderErr);
-
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
- mSignalledError = true;
- return;
+ if(decoderErr == SYNCH_LOST_ERROR) {
+ mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
+ } else {
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ mSignalledError = true;
+ return;
+ }
}
if (mConfig->outputFrameSize == 0) {
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 8bda7f8..b77cc86 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -308,6 +308,7 @@ private:
void logCatchUp(int64_t ts, int64_t clock, int64_t delta);
void logLate(int64_t ts, int64_t clock, int64_t delta);
void logOnTime(int64_t ts, int64_t clock, int64_t delta);
+ void printStats();
int64_t getTimeOfDayUs();
#ifdef QCOM_HARDWARE
void checkTunnelExceptions();
diff --git a/media/libstagefright/include/QCUtilityClass.h b/media/libstagefright/include/QCUtilityClass.h
new file mode 100644
index 0000000..91b184f
--- /dev/null
+++ b/media/libstagefright/include/QCUtilityClass.h
@@ -0,0 +1,111 @@
+/*Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef QC_UTIL_CLASS_H_
+#define QC_UTIL_CLASS_H_
+
+#include <QCMetaData.h>
+#include <cutils/properties.h>
+#include <QCMediaDefs.h>
+
+#include <media/Metadata.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/MediaCodecList.h>
+
+#include <utils/Errors.h>
+#include <sys/types.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <utils/StrongPointer.h>
+
+#include <media/MediaRecorderBase.h>
+#include <camera/CameraParameters.h>
+
+#include <OMX_Video.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+struct QCUtilityClass
+{
+ // helper function to enable Stagefright Recoder to recalculate fileduration
+ // when hfr property is set
+ static status_t helper_StageFrightRecoder_hfr(sp<MetaData> &meta, sp<MetaData> &enc_meta,
+ int64_t &maxFileDurationUs, int32_t frameRate,
+ video_encoder videoEncoder);
+
+ // helper function to enable camera source to set kKeyHFR when video-hfr is enabled
+ static void helper_CameraSource_hfr(const CameraParameters& params, sp<MetaData> &meta);
+
+ // helper function to enable MPEG4Writer to compute timestamp when hfr is enable
+ static void helper_MPEG4Writer_hfr(sp<MetaData> &meta, int64_t &timestampUs);
+
+ // helper function to enable OMXCodec to recalculate frameRate, bitrate when hfr is enable
+ static void helper_OMXCodec_hfr(const sp<MetaData> &meta, int32_t &frameRate,
+ int32_t &bitRate, int32_t &newFrameRate);
+
+ // helper function to enable OMXCodec to set HFR and FrameRate on output format when
+ // present on input format
+ static void helper_OMXCodec_hfr(const sp<MetaData> &inputFormat, sp<MetaData> &outputFormat);
+
+ // helper function to disable audio when decode audio disable prop is set
+ static bool helper_Awesomeplayer_checkIfAudioDisable();
+
+ // helper function to disable audio when encode audio disable prop is set
+ static bool helper_StagefrightRecoder_checkIfAudioDisable();
+
+ //helper function to set encoding profiles
+ static void helper_StagefrightRecoder_setUserprofile(video_encoder &videoEncoder,
+ int32_t &videoEncoderProfile);
+ //helper function to setBframe related info for MPEG4type
+ static void helper_OMXCodec_setBFrames(OMX_VIDEO_PARAM_MPEG4TYPE &mpeg4type, bool &numBFrames);
+
+ //helper function to setBframe related info for H264 type
+ static void helper_OMXCodec_setBFrames(OMX_VIDEO_PARAM_AVCTYPE &h264type, bool &numBFrames,
+ int32_t iFramesInterval, int32_t frameRate);
+
+ //helper function to add media codecs with specific quirks
+ static void helper_addMediaCodec(Vector<MediaCodecList::CodecInfo> &mCodecInfos,
+ KeyedVector<AString, size_t> &mTypes,
+ bool encoder, const char *name,
+ const char *type, uint32_t quirks);
+
+ //helper function to calculate the value of quirks from strings
+ static uint32_t helper_getCodecSpecificQuirks(KeyedVector<AString, size_t> &mCodecQuirks,
+ Vector<AString> quirks);
+ static sp<MediaExtractor> helper_MediaExtractor_CreateIfNeeded(sp<MediaExtractor> defaultExt,
+ const sp<DataSource> &source,
+ const char *mime);
+};
+
+}
+#endif //QC_UTIL_CLASS
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index bff3def..e41b342 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -599,15 +599,20 @@ status_t OMXNodeInstance::freeBuffer(
OMX_U32 portIndex, OMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
- removeActiveBuffer(portIndex, buffer);
-
OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)buffer;
BufferMeta *buffer_meta = static_cast<BufferMeta *>(header->pAppPrivate);
OMX_ERRORTYPE err = OMX_FreeBuffer(mHandle, portIndex, header);
- delete buffer_meta;
- buffer_meta = NULL;
+ if (err != OMX_ErrorNone) {
+ ALOGW("OMX_FreeBuffer failed w/ err %x, do not remove from active buffer list", err);
+ } else {
+ ALOGI("OMX_FreeBuffer for buffer header %p successful", header);
+ removeActiveBuffer(portIndex, buffer);
+
+ delete buffer_meta;
+ buffer_meta = NULL;
+ }
return StatusFromOMXError(err);
}
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 1ff87c8..3dde663 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -16,6 +16,10 @@ ifeq ($(BOARD_USE_SECTVOUT),true)
LOCAL_SHARED_LIBRARIES += libTVOut
endif
+ifeq ($(TARGET_QCOM_AUDIO_VARIANT),caf)
+ LOCAL_CFLAGS += -DQCOM_ENHANCED_AUDIO
+endif
+
# FIXME The duplicate audioflinger is temporary
LOCAL_C_INCLUDES := \
frameworks/av/media/libmediaplayerservice \
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index a14c205..771b8c9 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -51,6 +51,14 @@ LOCAL_SHARED_LIBRARIES := \
libdl \
libpowermanager
+# SRS Processing
+ifeq ($(strip $(BOARD_USES_SRS_TRUEMEDIA)),true)
+LOCAL_SHARED_LIBRARIES += libsrsprocessing
+LOCAL_CFLAGS += -DSRS_PROCESSING
+LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio/audio-effects
+endif
+# SRS Processing
+
LOCAL_STATIC_LIBRARIES := \
libscheduling_policy \
libcpustats \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 3c04c1c..0c3cb14 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -88,6 +88,10 @@
#include <media/nbaio/SourceAudioBufferProvider.h>
#include "SchedulingPolicyService.h"
+#ifdef SRS_PROCESSING
+#include "srs_processing.h"
+#include "postpro_patch_ics.h"
+#endif
// ----------------------------------------------------------------------------
@@ -125,7 +129,7 @@ static const int8_t kMaxTrackStartupRetries = 50;
// allow less retry attempts on direct output thread.
// direct outputs can be a scarce resource in audio hardware and should
// be released as quickly as possible.
-static const int8_t kMaxTrackRetriesDirect = 2;
+static const int8_t kMaxTrackRetriesDirect = 5;
static const int kDumpLockRetries = 50;
static const int kDumpLockSleepUs = 20000;
@@ -667,11 +671,11 @@ Exit:
void AudioFlinger::deleteEffectSession()
{
- Mutex::Autolock _l(mLock);
ALOGV("deleteSession");
// -2 is invalid session ID
mLPASessionId = -2;
if (mLPAEffectChain != NULL) {
+ mLPAEffectChain->lock();
mLPAEffectChain->setLPAFlag(false);
size_t i, numEffects = mLPAEffectChain->getNumEffects();
for(i = 0; i < numEffects; i++) {
@@ -684,6 +688,7 @@ void AudioFlinger::deleteEffectSession()
}
effect->configure();
}
+ mLPAEffectChain->unlock();
mLPAEffectChain.clear();
mLPAEffectChain = NULL;
}
@@ -691,7 +696,8 @@ void AudioFlinger::deleteEffectSession()
// ToDo: Should we go ahead with this frameCount?
#define DEAFULT_FRAME_COUNT 1200
-void AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer, int16_t *outBuffer, int size)
+bool AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer,
+ int16_t *outBuffer, int size, bool force)
{
ALOGV("applyEffectsOn: inBuf %p outBuf %p size %d token %p", inBuffer, outBuffer, size, token);
// This might be the first buffer to apply effects after effect config change
@@ -699,6 +705,12 @@ void AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer, int16_t *outBu
mIsEffectConfigChanged = false;
volatile size_t numEffects = 0;
+
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_ICS_OUTPROC_DIRECT_SAMPLES(token, AUDIO_FORMAT_PCM_16_BIT, outBuffer, size,
+ mLPASampleRate, mLPANumChannels);
+#endif
+
if(mLPAEffectChain != NULL) {
numEffects = mLPAEffectChain->getNumEffects();
}
@@ -729,16 +741,17 @@ void AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer, int16_t *outBu
bool isEffectEnabled = false;
for(i = 0; i < numEffects; i++) {
// If effect configuration is changed while applying effects do not process further
- if(mIsEffectConfigChanged) {
+
+ if(mIsEffectConfigChanged && !force) {
mLPAEffectChain->unlock();
- ALOGV("applyEffectsOn: mIsEffectConfigChanged is set - no further processing");
- return;
+ ALOGV("applyEffectsOn: mIsEffectConfigChanged is set - no further processing %d",frameCount);
+ return false;
}
sp<EffectModule> effect = mLPAEffectChain->getEffectFromIndex_l(i);
if(effect == NULL) {
ALOGE("getEffectFromIndex_l(%d) returned NULL ptr", i);
mLPAEffectChain->unlock();
- return;
+ return false;
}
if(i == 0) {
// For the first set input and output buffers different
@@ -773,13 +786,14 @@ void AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer, int16_t *outBu
}
}
- if (!numEffects) {
+ if (!numEffects && !force) {
ALOGV("applyEffectsOn: There are no effects to be applied");
if(inBuffer != outBuffer) {
// No effect applied so just copy input buffer to output buffer
memcpy(outBuffer, inBuffer, size);
}
}
+ return true;
}
#endif
@@ -1154,6 +1168,11 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
// ioHandle == 0 means the parameters are global to the audio hardware interface
if (ioHandle == 0) {
Mutex::Autolock _l(mLock);
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_ICS_PARAMS_SET(keyValuePairs);
+ if (!mDirectAudioTracks.isEmpty())
+ audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
+#endif
status_t final_result = NO_ERROR;
{
AutoMutex lock(mHardwareLock);
@@ -1232,6 +1251,12 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
String8 key = String8(AudioParameter::keyRouting);
int device;
if (param.getInt(key, device) == NO_ERROR) {
+
+#ifdef SRS_PROCESSING
+ ALOGV("setParameters:: routing change to device %d", device);
+ desc->device = (audio_devices_t)device;
+ POSTPRO_PATCH_ICS_OUTPROC_MIX_ROUTE(desc->trackRefPtr, param, device);
+#endif
if(mLPAEffectChain != NULL){
mLPAEffectChain->setDevice_l(device);
audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
@@ -1281,6 +1306,9 @@ String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& k
if (ioHandle == 0) {
String8 out_s8;
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_ICS_PARAMS_GET(keys, out_s8);
+#endif
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
char *s;
{
@@ -1596,10 +1624,8 @@ status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
#ifdef QCOM_HARDWARE
void AudioFlinger::ThreadBase::effectConfigChanged() {
- mAudioFlinger->mLock.lock();
ALOGV("New effect is being added to LPA chain, Notifying LPA Direct Track");
mAudioFlinger->audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
- mAudioFlinger->mLock.unlock();
}
#endif
@@ -2176,7 +2202,9 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac
// This is probably too conservative, but legacy application code may depend on it.
// If you change this calculation, also review the start threshold which is related.
uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
- uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
+ uint32_t minBufCount = 0;
+ if(mSampleRate)
+ minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
if (minBufCount < 2) {
minBufCount = 2;
}
@@ -2188,7 +2216,17 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac
}
if (mType == DIRECT) {
- if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
+#ifdef QCOM_ENHANCED_AUDIO
+ if (((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM)
+ ||((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_AMR_NB)
+ ||((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_AMR_WB)
+ ||((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_EVRC)
+ ||((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_EVRCB)
+ ||((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_EVRCWB))
+#else
+ if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM)
+#endif
+ {
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
"for output %p with format %d",
@@ -2274,7 +2312,10 @@ uint32_t AudioFlinger::MixerThread::correctLatency(uint32_t latency) const
{
if (mFastMixer != NULL) {
MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
- latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
+ if(mSampleRate)
+ latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
+ else
+ ALOGW("SampleRate is 0");
}
return latency;
}
@@ -2950,6 +2991,13 @@ bool AudioFlinger::PlaybackThread::threadLoop()
standbyTime = systemTime();
+#ifdef SRS_PROCESSING
+if (mType == MIXER) {
+ POSTPRO_PATCH_ICS_OUTPROC_MIX_INIT(this, gettid());
+ } else if (mType == DUPLICATING) {
+ POSTPRO_PATCH_ICS_OUTPROC_DUPE_INIT(this, gettid());
+ }
+#endif
// MIXER
nsecs_t lastWarning = 0;
@@ -3064,6 +3112,15 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// sleepTime == 0 means we must write to audio hardware
if (sleepTime == 0) {
+#ifdef SRS_PROCESSING
+ if (mType == MIXER) {
+ POSTPRO_PATCH_ICS_OUTPROC_MIX_SAMPLES(this, mFormat, mMixBuffer,
+ mixBufferSize, mSampleRate, mChannelCount);
+ } else if (mType == DUPLICATING) {
+ POSTPRO_PATCH_ICS_OUTPROC_DUPE_SAMPLES(this, mFormat, mMixBuffer,
+ mixBufferSize, mSampleRate, mChannelCount);
+ }
+#endif
threadLoop_write();
if (mType == MIXER) {
@@ -3115,6 +3172,13 @@ if (mType == MIXER) {
}
}
+#ifdef SRS_PROCESSING
+ if (mType == MIXER) {
+ POSTPRO_PATCH_ICS_OUTPROC_MIX_EXIT(this, gettid());
+ } else if (mType == DUPLICATING) {
+ POSTPRO_PATCH_ICS_OUTPROC_DUPE_EXIT(this, gettid());
+ }
+#endif
releaseWakeLock();
ALOGV("Thread %p type %d exiting", this, mType);
@@ -3516,7 +3580,12 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
minFrames = mNormalFrameCount;
} else {
// +1 for rounding and +1 for additional sample needed for interpolation
- minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
+ if(mSampleRate)
+ minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
+ else {
+ minFrames = 2;
+ ALOGW("SampleRate is 0");
+ }
// add frames already consumed but not yet released by the resampler
// because cblk->framesReady() will include these frames
minFrames += mAudioMixer->getUnreleasedFrames(track->name());
@@ -3873,6 +3942,9 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l()
String8 keyValuePair = mNewParameters[0];
AudioParameter param = AudioParameter(keyValuePair);
int value;
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_ICS_OUTPROC_MIX_ROUTE(this, param, value);
+#endif
if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
reconfig = true;
@@ -4462,7 +4534,10 @@ void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
{
Mutex::Autolock _l(mLock);
// FIXME explain this formula
- int frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
+ int sampleRate = thread->sampleRate();
+ int frameCount = 0;
+ if (sampleRate)
+ frameCount = (3 * mNormalFrameCount * mSampleRate) / sampleRate;
OutputTrack *outputTrack = new OutputTrack(thread,
this,
mSampleRate,
@@ -4548,6 +4623,9 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
audio_format_t format,
audio_channel_mask_t channelMask,
int frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ uint32_t flags,
+#endif
const sp<IMemory>& sharedBuffer,
int sessionId)
: RefBase(),
@@ -4561,6 +4639,9 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
mSampleRate(sampleRate),
mFormat(format),
mStepServerFailed(false),
+#ifdef QCOM_ENHANCED_AUDIO
+ mFlags(0),
+#endif
mSessionId(sessionId)
// mChannelCount
// mChannelMask
@@ -4570,7 +4651,40 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
// ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
size_t size = sizeof(audio_track_cblk_t);
uint8_t channelCount = popcount(channelMask);
+#ifdef QCOM_ENHANCED_AUDIO
+ size_t bufferSize = 0;
+ if ((int16_t)flags == 0x1) {
+ bufferSize = frameCount*channelCount*sizeof(int16_t);
+ } else {
+ if ( (format == AUDIO_FORMAT_PCM_16_BIT) ||
+ (format == AUDIO_FORMAT_PCM_8_BIT))
+ {
+ bufferSize = frameCount*channelCount*sizeof(int16_t);
+ }
+ else if (format == AUDIO_FORMAT_AMR_NB)
+ {
+ bufferSize = frameCount*channelCount*32; // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_EVRC)
+ {
+ bufferSize = frameCount*channelCount*23; // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_QCELP)
+ {
+ bufferSize = frameCount*channelCount*35; // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_AAC)
+ {
+ bufferSize = frameCount*2048; // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_AMR_WB)
+ {
+ bufferSize = frameCount*channelCount*61; // full rate frame size
+ }
+ }
+#else
size_t bufferSize = frameCount*channelCount*sizeof(int16_t);
+#endif
if (sharedBuffer == 0) {
size += bufferSize;
}
@@ -4593,7 +4707,39 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
mChannelMask = channelMask;
if (sharedBuffer == 0) {
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
+#ifdef QCOM_ENHANCED_AUDIO
+ if ((int16_t)flags == 0x1) {
+ bufferSize = frameCount*channelCount*sizeof(int16_t);
+ } else {
+ if ((format == AUDIO_FORMAT_PCM_16_BIT) ||
+ (format == AUDIO_FORMAT_PCM_8_BIT))
+ {
+ memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
+ }
+ else if (format == AUDIO_FORMAT_AMR_NB)
+ {
+ memset(mBuffer, 0, frameCount*channelCount*32); // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_EVRC)
+ {
+ memset(mBuffer, 0, frameCount*channelCount*23); // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_QCELP)
+ {
+ memset(mBuffer, 0, frameCount*channelCount*35); // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_AAC)
+ {
+ memset(mBuffer, 0, frameCount*2048); // full rate frame size
+ }
+ else if (format == AUDIO_FORMAT_AMR_WB)
+ {
+ memset(mBuffer, 0, frameCount*channelCount*61); // full rate frame size
+ }
+ }
+#else
memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
+#endif
// Force underrun condition to avoid false underrun callback until first data is
// written to buffer (other flags are cleared)
mCblk->flags = CBLK_UNDERRUN_ON;
@@ -4696,13 +4842,21 @@ void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t f
int8_t *bufferEnd = bufferStart + frames * frameSize;
// Check validity of returned pointer in case the track control block would have been corrupted.
+#ifdef QCOM_ENHANCED_AUDIO
+ if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd){
+ ALOGE("TrackBase::getBuffer buffer out of range:\n start: %p, end %p , mBuffer %p mBufferEnd %p\n \
+ server %u, serverBase %u, user %u, userBase %u",
+ bufferStart, bufferEnd, mBuffer, mBufferEnd,
+ cblk->server, cblk->serverBase, cblk->user, cblk->userBase);
+ return 0;
+ }
+#else
ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
"TrackBase::getBuffer buffer out of range:\n"
" start: %p, end %p , mBuffer %p mBufferEnd %p\n"
" server %u, serverBase %u, user %u, userBase %u, frameSize %d",
- bufferStart, bufferEnd, mBuffer, mBufferEnd,
cblk->server, cblk->serverBase, cblk->user, cblk->userBase, frameSize);
-
+#endif
return bufferStart;
}
@@ -4726,7 +4880,11 @@ AudioFlinger::PlaybackThread::Track::Track(
const sp<IMemory>& sharedBuffer,
int sessionId,
IAudioFlinger::track_flags_t flags)
- : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId),
+ : TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ ((audio_stream_type_t)streamType == AUDIO_STREAM_VOICE_CALL)?0x1:0x0,
+#endif
+ sharedBuffer, sessionId),
mMute(false),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
@@ -4745,7 +4903,12 @@ AudioFlinger::PlaybackThread::Track::Track(
if (mCblk != NULL) {
// NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
// 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
- mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) : sizeof(uint8_t);
+#ifdef QCOM_ENHANCED_AUDIO
+ if ((audio_stream_type_t)streamType == AUDIO_STREAM_VOICE_CALL)
+ mCblk->frameSize = mChannelCount * sizeof(int16_t);
+ else
+#endif
+ mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) : sizeof(uint8_t);
// to avoid leaking a track name, do not allocate one unless there is an mCblk
mName = thread->getTrackName_l(channelMask, sessionId);
mCblk->mName = mName;
@@ -5776,12 +5939,44 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack(
audio_format_t format,
audio_channel_mask_t channelMask,
int frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ uint32_t flags,
+#endif
int sessionId)
- : TrackBase(thread, client, sampleRate, format,
- channelMask, frameCount, 0 /*sharedBuffer*/, sessionId),
+ : TrackBase(thread, client, sampleRate, format,channelMask,frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ ((audio_source_t)((int16_t)flags) == AUDIO_SOURCE_VOICE_COMMUNICATION) ?
+ ((flags & 0xffff0000)| 0x1) : ((flags & 0xffff0000)),
+#endif
+ 0 /*sharedBuffer*/, sessionId),
mOverflow(false)
{
+ uint8_t channelCount = popcount(channelMask);
if (mCblk != NULL) {
+#ifdef QCOM_ENHANCED_AUDIO
+ ALOGV("RecordTrack constructor, size %d flags %d", (int)mBufferEnd - (int)mBuffer,flags);
+ if ((audio_source_t)((int16_t)flags) == AUDIO_SOURCE_VOICE_COMMUNICATION) {
+ mCblk->frameSize = mChannelCount * sizeof(int16_t);
+ } else {
+ if (format == AUDIO_FORMAT_AMR_NB) {
+ mCblk->frameSize = channelCount * 32;
+ } else if (format == AUDIO_FORMAT_EVRC) {
+ mCblk->frameSize = channelCount * 23;
+ } else if (format == AUDIO_FORMAT_QCELP) {
+ mCblk->frameSize = channelCount * 35;
+ } else if (format == AUDIO_FORMAT_AAC) {
+ mCblk->frameSize = 2048;
+ } else if (format == AUDIO_FORMAT_PCM_16_BIT) {
+ mCblk->frameSize = mChannelCount * sizeof(int16_t);
+ } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
+ mCblk->frameSize = mChannelCount * sizeof(int8_t);
+ } else if (format == AUDIO_FORMAT_AMR_WB) {
+ mCblk->frameSize = channelCount * 61;
+ } else {
+ mCblk->frameSize = sizeof(int8_t);
+ }
+ }
+#else
ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
if (format == AUDIO_FORMAT_PCM_16_BIT) {
mCblk->frameSize = mChannelCount * sizeof(int16_t);
@@ -5790,6 +5985,7 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack(
} else {
mCblk->frameSize = sizeof(int8_t);
}
+#endif
}
}
@@ -6200,8 +6396,14 @@ AudioFlinger::DirectAudioTrack::DirectAudioTrack(const sp<AudioFlinger>& audioFl
int output, AudioSessionDescriptor *outputDesc,
IDirectTrackClient* client, audio_output_flags_t outflag)
: BnDirectTrack(), mIsPaused(false), mAudioFlinger(audioFlinger), mOutput(output), mOutputDesc(outputDesc),
- mClient(client), mEffectConfigChanged(false), mKillEffectsThread(false), mFlag(outflag)
+ mClient(client), mEffectConfigChanged(false), mKillEffectsThread(false), mFlag(outflag),
+ mEffectsThreadScratchBuffer(NULL)
{
+#ifdef SRS_PROCESSING
+ ALOGD("SRS_Processing - DirectAudioTrack - OutNotify_Init: %p TID %d\n", this, gettid());
+ POSTPRO_PATCH_ICS_OUTPROC_DIRECT_INIT(this, gettid());
+ SRS_Processing::ProcessOutRoute(SRS_Processing::AUTO, this, outputDesc->device);
+#endif
if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
createEffectThread();
@@ -6209,6 +6411,13 @@ AudioFlinger::DirectAudioTrack::DirectAudioTrack(const sp<AudioFlinger>& audioFl
mAudioFlinger->registerClient(mAudioFlingerClient);
allocateBufPool();
+#ifdef SRS_PROCESSING
+ } else if (mFlag & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ ALOGV("create effects thread for TUNNEL");
+ createEffectThread();
+ mAudioFlingerClient = new AudioFlingerDirectTrackClient(this);
+ mAudioFlinger->registerClient(mAudioFlingerClient);
+#endif
}
outputDesc->mVolumeScale = 1.0;
mDeathRecipient = new PMDeathRecipient(this);
@@ -6216,11 +6425,21 @@ AudioFlinger::DirectAudioTrack::DirectAudioTrack(const sp<AudioFlinger>& audioFl
}
AudioFlinger::DirectAudioTrack::~DirectAudioTrack() {
+#ifdef SRS_PROCESSING
+ ALOGD("SRS_Processing - DirectAudioTrack - OutNotify_Init: %p TID %d\n", this, gettid());
+ POSTPRO_PATCH_ICS_OUTPROC_DIRECT_EXIT(this, gettid());
+#endif
if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
requestAndWaitForEffectsThreadExit();
mAudioFlinger->deregisterClient(mAudioFlingerClient);
mAudioFlinger->deleteEffectSession();
deallocateBufPool();
+#ifdef SRS_PROCESSING
+ } else if (mFlag & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ requestAndWaitForEffectsThreadExit();
+ mAudioFlinger->deregisterClient(mAudioFlingerClient);
+ mAudioFlinger->deleteEffectSession();
+#endif
}
AudioSystem::releaseOutput(mOutput);
releaseWakeLock();
@@ -6276,9 +6495,11 @@ ssize_t AudioFlinger::DirectAudioTrack::write(const void *buffer, size_t size) {
memcpy((char *) buf.localBuf, (char *)buffer, size);
buf.bytesToWrite = size;
mEffectsPool.push_back(buf);
- mAudioFlinger->applyEffectsOn(static_cast<void *>(this), (int16_t*)buf.localBuf,(int16_t*)buffer,(int)size);
+ mAudioFlinger->applyEffectsOn(static_cast<void *>(this),
+ (int16_t*)buf.localBuf, (int16_t*)buffer, (int)size, true);
mEffectLock.unlock();
}
+ ALOGV("out of Writing to AudioSessionOut");
return mOutputDesc->stream->write(mOutputDesc->stream, buffer, size);
}
@@ -6352,6 +6573,10 @@ void AudioFlinger::DirectAudioTrack::allocateBufPool() {
ALOGV("The MEM that is allocated buffer is %x, size %d",(unsigned int)dsp_buf,nSize);
}
+
+ mEffectsThreadScratchBuffer = malloc(nSize);
+ ALOGV("effectsThreadScratchBuffer = %x",mEffectsThreadScratchBuffer);
+
free(buf);
}
@@ -6370,6 +6595,9 @@ void AudioFlinger::DirectAudioTrack::deallocateBufPool() {
ALOGV("Removing from bufpool");
mBufPool.erase(it);
}
+
+ free(mEffectsThreadScratchBuffer);
+ mEffectsThreadScratchBuffer = NULL;
}
status_t AudioFlinger::DirectAudioTrack::onTransact(
@@ -6397,18 +6625,40 @@ void AudioFlinger::DirectAudioTrack::EffectsThreadEntry() {
if (mEffectConfigChanged) {
mEffectConfigChanged = false;
- for ( List<BufferInfo>::iterator it = mEffectsPool.begin();
- it != mEffectsPool.end(); it++) {
- ALOGV("Apply effects on the buffer dspbuf %p, mEffectsPool.size() %d",it->dspBuf,mEffectsPool.size());
- mAudioFlinger->applyEffectsOn(static_cast<void *>(this),
- (int16_t *)it->localBuf,
- (int16_t *)it->dspBuf,
- it->bytesToWrite);
- if (mEffectConfigChanged) {
- break;
- }
- }
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ for ( List<BufferInfo>::iterator it = mEffectsPool.begin();
+ it != mEffectsPool.end(); it++) {
+ ALOGV("ete: calling applyEffectsOn buff %x",it->localBuf);
+ bool isEffectsApplied = mAudioFlinger->applyEffectsOn(
+ static_cast<void *>(this),
+ (int16_t *)it->localBuf,
+ (int16_t *)mEffectsThreadScratchBuffer,
+ it->bytesToWrite,
+ false);
+ if (isEffectsApplied == true){
+ ALOGV("ete:dsp updated for local buf %x",it->localBuf);
+ memcpy(it->dspBuf, mEffectsThreadScratchBuffer, it->bytesToWrite);
+ }
+ else
+ ALOGV("ete:dsp updated for local buf %x SKIPPED",it->localBuf);
+ if (mEffectConfigChanged) {
+ ALOGE("ete:effects changed, abort effects application");
+ break;
+ }
+ }
+#ifdef SRS_PROCESSING
+ } else if (mFlag & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ ALOGV("applying effects for TUNNEL");
+ char buffer[2];
+ //dummy buffer to ensure the SRS processing takes place
+ // The API mandates Sample rate and channel mode. Hence
+ // defaulted the sample rate channel mode to 48000 and 2 respectively
+ POSTPRO_PATCH_ICS_OUTPROC_DIRECT_SAMPLES(static_cast<void *>(this),
+ AUDIO_FORMAT_PCM_16_BIT,
+ (int16_t*)buffer, 2, 48000, 2);
+#endif
+ }
}
mEffectLock.unlock();
}
@@ -6614,6 +6864,10 @@ sp<IAudioRecord> AudioFlinger::openRecord(
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
+#ifdef QCOM_ENHANCED_AUDIO
+ size_t inputBufferSize = 0;
+ uint32_t channelCount = popcount(channelMask);
+#endif
status_t lStatus;
RecordThread *thread;
size_t inFrameCount;
@@ -6625,6 +6879,15 @@ sp<IAudioRecord> AudioFlinger::openRecord(
goto Exit;
}
+#ifdef QCOM_ENHANCED_AUDIO
+ // Check that audio input stream accepts requested audio parameters
+ inputBufferSize = getInputBufferSize(sampleRate, format, channelCount);
+ if (inputBufferSize == 0) {
+ lStatus = BAD_VALUE;
+ ALOGE("Bad audio input parameters: sampling rate %u, format %d, channels %d", sampleRate, format, channelCount);
+ goto Exit;
+ }
+#endif
// add client to list
{ // scope for mLock
Mutex::Autolock _l(mLock);
@@ -6645,6 +6908,41 @@ sp<IAudioRecord> AudioFlinger::openRecord(
*sessionId = lSessionId;
}
}
+#ifdef QCOM_ENHANCED_AUDIO
+ // frameCount must be a multiple of input buffer size
+ // Change for Codec type
+ uint8_t channelCount = popcount(channelMask);
+ if ((audio_source_t)((int16_t)flags) == AUDIO_SOURCE_VOICE_COMMUNICATION) {
+ inFrameCount = inputBufferSize/channelCount/sizeof(short);
+ } else {
+ if ((format == AUDIO_FORMAT_PCM_16_BIT) ||
+ (format == AUDIO_FORMAT_PCM_8_BIT))
+ {
+ inFrameCount = inputBufferSize/channelCount/sizeof(short);
+ }
+ else if (format == AUDIO_FORMAT_AMR_NB)
+ {
+ inFrameCount = inputBufferSize/channelCount/32;
+ }
+ else if (format == AUDIO_FORMAT_EVRC)
+ {
+ inFrameCount = inputBufferSize/channelCount/23;
+ }
+ else if (format == AUDIO_FORMAT_QCELP)
+ {
+ inFrameCount = inputBufferSize/channelCount/35;
+ }
+ else if (format == AUDIO_FORMAT_AAC)
+ {
+ inFrameCount = inputBufferSize/2048;
+ }
+ else if (format == AUDIO_FORMAT_AMR_WB)
+ {
+ inFrameCount = inputBufferSize/channelCount/61;
+ }
+ }
+ frameCount = ((frameCount - 1)/inFrameCount + 1) * inFrameCount;
+#endif
// create new record track. The record track uses one track in mHardwareMixerThread by convention.
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
frameCount, lSessionId, flags, tid, &lStatus);
@@ -6849,9 +7147,30 @@ bool AudioFlinger::RecordThread::threadLoop()
}
}
if (framesOut && mFrameCount == mRsmpInIndex) {
+#ifdef QCOM_ENHANCED_AUDIO
+ if (((int) framesOut != mFrameCount) &&
+ ((mFormat != AUDIO_FORMAT_PCM_16_BIT)&&
+ ((audio_source_t)mInputSource != AUDIO_SOURCE_VOICE_COMMUNICATION))) {
+ mBytesRead = mInput->stream->read(mInput->stream, buffer.raw,
+ buffer.frameCount * mFrameSize);
+ ALOGV("IR mBytesRead = %d",mBytesRead);
+ if(mBytesRead >= 0 ){
+ buffer.frameCount = mBytesRead/mFrameSize;
+ }
+ framesOut = 0;
+ } else
+#endif
if (framesOut == mFrameCount &&
+#ifdef QCOM_ENHANCED_AUDIO
+ ((audio_source_t)mInputSource != AUDIO_SOURCE_VOICE_COMMUNICATION) &&
+#endif
((int)mChannelCount == mReqChannelCount || mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
mBytesRead = mInput->stream->read(mInput->stream, buffer.raw, mInputBytes);
+#ifdef QCOM_ENHANCED_AUDIO
+ if( mBytesRead >= 0 ){
+ buffer.frameCount = mBytesRead/mFrameSize;
+ }
+#endif
framesOut = 0;
} else {
mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
@@ -6988,7 +7307,11 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR
Mutex::Autolock _l(mLock);
track = new RecordTrack(this, client, sampleRate,
- format, channelMask, frameCount, sessionId);
+ format, channelMask, frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ flags,
+#endif
+ sessionId);
if (track->getCblk() == 0) {
lStatus = NO_MEMORY;
@@ -7796,6 +8119,10 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
ALOGI("Using module %d has the primary audio interface", module);
mPrimaryHardwareDev = outHwDev;
+#ifdef SRS_PROCESSING
+ SRS_Processing::RawDataSet(NULL, "qdsp hook", &mPrimaryHardwareDev,
+ sizeof(&mPrimaryHardwareDev));
+#endif
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_MODE;
hwDevHal->set_mode(hwDevHal, mMode);
@@ -9289,7 +9616,7 @@ status_t AudioFlinger::EffectModule::configure(bool isForLPA, int sampleRate, in
} else {
channels = AUDIO_CHANNEL_OUT_STEREO;
}
- ALOGV("%s: LPA ON - channels %d", __func__, channels);
+// ALOGV("%s: LPA ON - channels %d", __func__, channels);
} else {
if (thread->channelCount() == 1) {
channels = AUDIO_CHANNEL_OUT_MONO;
@@ -9347,8 +9674,8 @@ status_t AudioFlinger::EffectModule::configure(bool isForLPA, int sampleRate, in
mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
- ALOGV("configure() %p thread %p buffer %p framecount %d",
- this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
+// ALOGV("configure() %p thread %p buffer %p framecount %d",
+// this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
status_t cmdStatus;
uint32_t size = sizeof(int);
@@ -10037,7 +10364,7 @@ status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode,
(cmdCode == EFFECT_CMD_SET_VOLUME) || (cmdCode == EFFECT_CMD_SET_AUDIO_MODE)) ) {
// Notify Direct track for the change in Effect module
// TODO: check if it is required to send mLPAHandle
- ALOGV("Notifying Direct Track for the change in effect config");
+ ALOGV("Notifying Direct Track for the change in effect config %d", cmdCode);
mClient->audioFlinger()->audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
}
#endif
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index d6a3815..5b718d7 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -245,10 +245,11 @@ public:
uint32_t flags);
#ifdef QCOM_HARDWARE
- void applyEffectsOn(void *token,
+ bool applyEffectsOn(void *token,
int16_t *buffer1,
int16_t *buffer2,
- int size);
+ int size,
+ bool force);
#endif
// end of IAudioFlinger interface
@@ -427,6 +428,9 @@ private:
audio_format_t format,
audio_channel_mask_t channelMask,
int frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ uint32_t flags,
+#endif
const sp<IMemory>& sharedBuffer,
int sessionId);
virtual ~TrackBase();
@@ -506,6 +510,9 @@ private:
// support dynamic rates, the current value is in control block
const audio_format_t mFormat;
bool mStepServerFailed;
+#ifdef QCOM_ENHANCED_AUDIO
+ uint32_t mFlags;
+#endif
const int mSessionId;
uint8_t mChannelCount;
audio_channel_mask_t mChannelMask;
@@ -1483,6 +1490,7 @@ private:
};
List<BufferInfo> mBufPool;
List<BufferInfo> mEffectsPool;
+ void *mEffectsThreadScratchBuffer;
void allocateBufPool();
void deallocateBufPool();
@@ -1521,7 +1529,8 @@ private:
sp<AudioFlinger> mAudioFlinger;
sp<AudioFlingerDirectTrackClient> mAudioFlingerClient;
- void clearPowerManager();
+ void clearPowerManager();
+
class PMDeathRecipient : public IBinder::DeathRecipient {
public:
PMDeathRecipient(void *obj){parentClass = (DirectAudioTrack *)obj;}
@@ -1592,6 +1601,9 @@ private:
audio_format_t format,
audio_channel_mask_t channelMask,
int frameCount,
+#ifdef QCOM_ENHANCED_AUDIO
+ uint32_t flags,
+#endif
int sessionId);
virtual ~RecordTrack();
@@ -1722,6 +1734,7 @@ private:
// when < 0, maximum frames to drop before starting capture even if sync event is
// not received
ssize_t mFramestoDrop;
+ int16_t mInputSource;
};
// server side of the client's IAudioRecord
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp
index 967562d..1254962 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/CameraClient.cpp
@@ -697,6 +697,12 @@ void CameraClient::disableMsgType(int32_t msgType) {
bool CameraClient::lockIfMessageWanted(int32_t msgType) {
int sleepCount = 0;
while (mMsgEnabled & msgType) {
+ if ((msgType == CAMERA_MSG_PREVIEW_FRAME) &&
+ (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)) {
+ LOG1("lockIfMessageWanted(%d): Don't try to acquire mlock if "
+ "both Preview and Compressed are enabled", msgType);
+ return false;
+ }
if (mLock.tryLock() == NO_ERROR) {
if (sleepCount > 0) {
LOG1("lockIfMessageWanted(%d): waited for %d ms",