summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/Android.mk16
-rw-r--r--camera/CameraParameters.cpp18
-rw-r--r--camera/ICameraClient.cpp17
-rw-r--r--drm/drmserver/DrmManagerService.cpp3
-rw-r--r--drm/libdrmframework/Android.mk2
-rw-r--r--include/camera/CameraParameters.h8
-rw-r--r--include/camera/CameraParametersExtra.h35
-rw-r--r--include/media/AudioParameter.h1
-rw-r--r--include/media/AudioPolicy.h2
-rw-r--r--include/media/AudioSession.h71
-rw-r--r--include/media/AudioSystem.h7
-rw-r--r--include/media/AudioTrack.h5
-rw-r--r--include/media/IAudioPolicyService.h3
-rw-r--r--include/media/IAudioPolicyServiceClient.h3
-rw-r--r--include/media/IMediaPlayer.h10
-rw-r--r--include/media/IMediaRecorder.h2
-rw-r--r--include/media/MediaPlayerInterface.h10
-rwxr-xr-x[-rw-r--r--]include/media/MediaProfiles.h31
-rw-r--r--include/media/MediaRecorderBase.h6
-rw-r--r--include/media/ToneGenerator.h2
-rw-r--r--include/media/Visualizer.h1
-rw-r--r--include/media/mediaplayer.h16
-rwxr-xr-x[-rw-r--r--]include/media/mediarecorder.h30
-rw-r--r--include/media/stagefright/ACodec.h68
-rw-r--r--include/media/stagefright/AudioPlayer.h2
-rw-r--r--include/media/stagefright/AudioSource.h18
-rw-r--r--include/media/stagefright/CameraSource.h9
-rw-r--r--include/media/stagefright/CameraSourceTimeLapse.h8
-rw-r--r--include/media/stagefright/DataSource.h13
-rw-r--r--include/media/stagefright/ExtendedMediaDefs.h69
-rw-r--r--include/media/stagefright/FFMPEGSoftCodec.h141
-rw-r--r--include/media/stagefright/FileSource.h8
-rw-r--r--include/media/stagefright/MPEG4Writer.h17
-rw-r--r--include/media/stagefright/MediaAdapter.h4
-rw-r--r--include/media/stagefright/MediaBuffer.h1
-rw-r--r--include/media/stagefright/MediaCodec.h2
-rw-r--r--include/media/stagefright/MediaCodecSource.h2
-rw-r--r--include/media/stagefright/MediaDefs.h37
-rw-r--r--include/media/stagefright/MediaExtractor.h19
-rw-r--r--include/media/stagefright/MediaHTTP.h2
-rw-r--r--include/media/stagefright/MediaSource.h1
-rw-r--r--include/media/stagefright/MetaData.h55
-rw-r--r--include/media/stagefright/OMXCodec.h4
-rw-r--r--include/media/stagefright/SkipCutBuffer.h7
-rw-r--r--include/media/stagefright/Utils.h9
-rw-r--r--include/media/stagefright/WAVEWriter.h108
-rw-r--r--media/libavextensions/Android.mk101
-rw-r--r--media/libavextensions/common/AVExtensionsCommon.h67
-rw-r--r--media/libavextensions/common/ExtensionsLoader.hpp97
-rw-r--r--media/libavextensions/media/AVMediaExtensions.h96
-rw-r--r--media/libavextensions/media/AVMediaUtils.cpp66
-rw-r--r--media/libavextensions/mediaplayerservice/AVMediaServiceExtensions.h92
-rw-r--r--media/libavextensions/mediaplayerservice/AVMediaServiceFactory.cpp72
-rw-r--r--media/libavextensions/mediaplayerservice/AVMediaServiceUtils.cpp104
-rw-r--r--media/libavextensions/mediaplayerservice/AVNuExtensions.h102
-rw-r--r--media/libavextensions/mediaplayerservice/AVNuFactory.cpp89
-rw-r--r--media/libavextensions/mediaplayerservice/AVNuUtils.cpp363
-rw-r--r--media/libavextensions/stagefright/AVExtensions.h300
-rw-r--r--media/libavextensions/stagefright/AVFactory.cpp141
-rw-r--r--media/libavextensions/stagefright/AVUtils.cpp1139
-rw-r--r--media/libavextensions/stagefright/ExtendedMediaDefs.cpp68
-rw-r--r--media/libeffects/downmix/EffectDownmix.c9
-rw-r--r--media/libeffects/factory/EffectsFactory.c4
-rw-r--r--media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp55
-rw-r--r--media/libeffects/lvm/wrapper/Bundle/EffectBundle.h2
-rw-r--r--media/libmedia/Android.mk12
-rw-r--r--media/libmedia/AudioParameter.cpp1
-rw-r--r--media/libmedia/AudioRecord.cpp15
-rw-r--r--media/libmedia/AudioSystem.cpp39
-rw-r--r--media/libmedia/AudioTrack.cpp171
-rw-r--r--media/libmedia/AudioTrackShared.cpp9
-rw-r--r--media/libmedia/IAudioPolicyService.cpp42
-rw-r--r--media/libmedia/IAudioPolicyServiceClient.cpp30
-rw-r--r--media/libmedia/ICrypto.cpp6
-rw-r--r--media/libmedia/IEffectClient.cpp4
-rw-r--r--media/libmedia/IMediaHTTPConnection.cpp8
-rw-r--r--media/libmedia/IMediaMetadataRetriever.cpp2
-rw-r--r--media/libmedia/IMediaPlayer.cpp30
-rw-r--r--media/libmedia/IMediaRecorder.cpp16
-rw-r--r--media/libmedia/IOMX.cpp3
-rwxr-xr-x[-rw-r--r--]media/libmedia/MediaProfiles.cpp67
-rw-r--r--media/libmedia/MediaScanner.cpp4
-rw-r--r--media/libmedia/ToneGenerator.cpp20
-rw-r--r--media/libmedia/Visualizer.cpp8
-rw-r--r--media/libmedia/mediaplayer.cpp64
-rwxr-xr-x[-rw-r--r--]media/libmedia/mediarecorder.cpp28
-rw-r--r--media/libmediaplayerservice/Android.mk14
-rw-r--r--media/libmediaplayerservice/MediaPlayerFactory.cpp75
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp72
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h4
-rw-r--r--media/libmediaplayerservice/MediaRecorderClient.cpp17
-rw-r--r--media/libmediaplayerservice/MediaRecorderClient.h5
-rw-r--r--media/libmediaplayerservice/MetadataRetrieverClient.cpp21
-rw-r--r--media/libmediaplayerservice/StagefrightPlayer.cpp230
-rw-r--r--media/libmediaplayerservice/StagefrightPlayer.h80
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp298
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.h30
-rw-r--r--media/libmediaplayerservice/nuplayer/Android.mk14
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.cpp62
-rw-r--r--media/libmediaplayerservice/nuplayer/GenericSource.h8
-rw-r--r--media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp21
-rw-r--r--media/libmediaplayerservice/nuplayer/HTTPLiveSource.h4
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.cpp177
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayer.h24
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp6
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp72
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h13
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp19
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h5
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp28
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h15
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp28
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp202
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h17
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerSource.h6
-rw-r--r--media/libmediaplayerservice/nuplayer/RTSPSource.cpp52
-rw-r--r--media/libmediaplayerservice/nuplayer/RTSPSource.h4
-rw-r--r--media/libmediaplayerservice/nuplayer/StreamingSource.cpp9
-rw-r--r--media/libstagefright/AACExtractor.cpp28
-rw-r--r--media/libstagefright/ACodec.cpp410
-rw-r--r--media/libstagefright/APE.cpp125
-rw-r--r--media/libstagefright/Android.mk82
-rw-r--r--media/libstagefright/AudioPlayer.cpp52
-rw-r--r--media/libstagefright/AudioSource.cpp30
-rw-r--r--media/libstagefright/AwesomePlayer.cpp100
-rw-r--r--media/libstagefright/CameraSource.cpp78
-rw-r--r--media/libstagefright/CameraSourceTimeLapse.cpp18
-rw-r--r--media/libstagefright/DataSource.cpp76
-rw-r--r--media/libstagefright/DataURISource.cpp3
-rw-r--r--media/libstagefright/FFMPEGSoftCodec.cpp1384
-rw-r--r--media/libstagefright/FLACExtractor.cpp199
-rw-r--r--media/libstagefright/FileSource.cpp20
-rwxr-xr-xmedia/libstagefright/MPEG4Extractor.cpp76
-rw-r--r--media/libstagefright/MPEG4Writer.cpp183
-rw-r--r--media/libstagefright/MediaAdapter.cpp18
-rw-r--r--media/libstagefright/MediaBuffer.cpp1
-rw-r--r--media/libstagefright/MediaCodec.cpp28
-rw-r--r--media/libstagefright/MediaCodecList.cpp11
-rw-r--r--media/libstagefright/MediaCodecSource.cpp44
-rw-r--r--media/libstagefright/MediaDefs.cpp28
-rw-r--r--media/libstagefright/MediaExtractor.cpp30
-rw-r--r--media/libstagefright/MediaMuxer.cpp3
-rw-r--r--media/libstagefright/NuCachedSource2.cpp33
-rw-r--r--media/libstagefright/OMXClient.cpp3
-rw-r--r--media/libstagefright/OMXCodec.cpp165
-rw-r--r--media/libstagefright/OggExtractor.cpp15
-rw-r--r--media/libstagefright/SampleIterator.cpp103
-rw-r--r--media/libstagefright/SampleTable.cpp15
-rw-r--r--media/libstagefright/SkipCutBuffer.cpp39
-rw-r--r--media/libstagefright/StagefrightMediaScanner.cpp12
-rw-r--r--media/libstagefright/StagefrightMetadataRetriever.cpp55
-rw-r--r--media/libstagefright/SurfaceMediaSource.cpp17
-rw-r--r--media/libstagefright/Utils.cpp300
-rw-r--r--media/libstagefright/VideoFrameScheduler.cpp9
-rw-r--r--media/libstagefright/WAVEWriter.cpp324
-rw-r--r--media/libstagefright/WAVExtractor.cpp78
-rw-r--r--media/libstagefright/avc_utils.cpp20
-rw-r--r--media/libstagefright/codecs/aacdec/SoftAAC2.cpp41
-rw-r--r--media/libstagefright/codecs/aacdec/SoftAAC2.h3
-rw-r--r--media/libstagefright/codecs/aacenc/Android.mk52
-rw-r--r--media/libstagefright/codecs/amrnb/common/Android.mk2
-rw-r--r--media/libstagefright/codecs/amrnb/common/include/bytesused.h109
-rw-r--r--media/libstagefright/codecs/amrnb/common/src/bytesused.cpp208
-rw-r--r--media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp174
-rw-r--r--media/libstagefright/codecs/amrnb/dec/Android.mk1
-rw-r--r--media/libstagefright/codecs/amrwbenc/Android.mk85
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/acelp.h406
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/basic_op.h656
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/bits.h34
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/cod_main.h28
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/dtx.h4
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/log2.h18
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/main.h4
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/math_op.h40
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/mem_align.h6
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/p_med_o.h40
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/q_pulse.h42
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/stream.h18
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/wb_vad.h38
-rw-r--r--media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h4
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s5
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/autocorr.c182
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/az_isp.c334
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/bits.c322
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/c2t64fx.c439
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/c4t64fx.c1868
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/convolve.c146
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/cor_h_x.c172
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/decim54.c164
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/deemph.c145
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/dtx.c964
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/g_pitch.c72
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/gpclip.c94
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/homing.c20
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/hp400.c86
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/hp50.c86
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/hp6k.c88
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/hp_wsp.c170
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/int_lpc.c50
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/isp_az.c356
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/isp_isf.c94
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/lag_wind.c30
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/levinson.c286
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/log2.c64
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/lp_dec2.c60
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/math_op.c167
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/mem_align.c84
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/oper_32b.c66
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/p_med_ol.c384
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/pit_shrp.c34
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/pitch_f4.c448
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/pred_lt4.c144
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/preemph.c104
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/q_gain2.c544
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/q_pulse.c668
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/qisf_ns.c104
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c852
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/random.c6
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/residu.c64
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/scale.c50
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/stream.c42
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/syn_filt.c240
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/updt_tar.c35
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/util.c48
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c3354
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/voicefac.c88
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/wb_vad.c1156
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/weight_a.c30
-rw-r--r--media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp13
-rw-r--r--media/libstagefright/codecs/common/Config.mk24
-rw-r--r--media/libstagefright/codecs/mp3dec/Android.mk13
-rw-r--r--media/libstagefright/codecs/mp3dec/SoftMP3.cpp46
-rw-r--r--media/libstagefright/codecs/mp3dec/SoftMP3.h3
-rw-r--r--media/libstagefright/codecs/on2/dec/SoftVPX.cpp4
-rw-r--r--media/libstagefright/codecs/on2/h264dec/Android.mk16
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h6
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S6
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S4
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S4
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S4
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S4
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S11
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S11
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S4
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h6
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S14
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S40
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S2
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S161
-rw-r--r--media/libstagefright/codecs/raw/SoftRaw.cpp10
-rw-r--r--media/libstagefright/codecs/raw/SoftRaw.h3
-rw-r--r--media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp16
-rw-r--r--media/libstagefright/codecs/vorbis/dec/SoftVorbis.h2
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp29
-rw-r--r--media/libstagefright/data/media_codecs_google_video.xml9
-rw-r--r--media/libstagefright/foundation/ABuffer.cpp10
-rw-r--r--media/libstagefright/foundation/ANetworkSession.cpp4
-rw-r--r--media/libstagefright/httplive/LiveSession.cpp5
-rw-r--r--media/libstagefright/httplive/LiveSession.h25
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.cpp20
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.h3
-rw-r--r--media/libstagefright/id3/ID3.cpp13
-rw-r--r--media/libstagefright/include/AACExtractor.h4
-rw-r--r--media/libstagefright/include/APE.h43
-rw-r--r--media/libstagefright/include/AwesomePlayer.h6
-rw-r--r--media/libstagefright/include/NuCachedSource2.h7
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h2
-rw-r--r--media/libstagefright/include/SampleIterator.h10
-rw-r--r--media/libstagefright/include/SimpleSoftOMXComponent.h1
-rw-r--r--media/libstagefright/matroska/MatroskaExtractor.cpp102
-rw-r--r--media/libstagefright/matroska/MatroskaExtractor.h2
-rw-r--r--media/libstagefright/mpeg2ts/ATSParser.cpp2
-rw-r--r--media/libstagefright/omx/Android.mk6
-rw-r--r--media/libstagefright/omx/OMX.cpp18
-rw-r--r--media/libstagefright/omx/OMXMaster.cpp67
-rw-r--r--media/libstagefright/omx/OMXMaster.h7
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp24
-rw-r--r--media/libstagefright/omx/SimpleSoftOMXComponent.cpp44
-rwxr-xr-xmedia/libstagefright/omx/SoftOMXPlugin.cpp9
-rw-r--r--media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp3
-rw-r--r--media/libstagefright/rtsp/AH263Assembler.cpp5
-rw-r--r--media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp10
-rw-r--r--media/libstagefright/rtsp/APacketSource.cpp3
-rw-r--r--media/libstagefright/rtsp/ARTPConnection.cpp10
-rw-r--r--media/libstagefright/rtsp/ARTPConnection.h7
-rw-r--r--media/libstagefright/rtsp/ARTSPConnection.cpp13
-rw-r--r--media/libstagefright/rtsp/ARTSPConnection.h7
-rw-r--r--media/libstagefright/rtsp/ASessionDescription.cpp5
-rw-r--r--media/libstagefright/rtsp/Android.mk6
-rw-r--r--media/libstagefright/rtsp/MyHandler.h71
-rw-r--r--media/libstagefright/wifi-display/Android.mk4
-rw-r--r--media/libstagefright/wifi-display/source/Converter.cpp2
-rw-r--r--media/mediaserver/Android.mk10
-rw-r--r--media/mediaserver/main_mediaserver.cpp8
-rw-r--r--media/mtp/MtpServer.cpp85
-rw-r--r--media/mtp/MtpServer.h7
-rw-r--r--media/mtp/MtpUtils.cpp38
-rw-r--r--media/utils/BatteryNotifier.cpp3
-rw-r--r--services/audioflinger/Android.mk54
-rw-r--r--services/audioflinger/AudioFlinger.cpp72
-rw-r--r--services/audioflinger/AudioMixer.cpp28
-rw-r--r--services/audioflinger/AudioMixer.h1
-rw-r--r--services/audioflinger/AudioResampler.cpp25
-rw-r--r--services/audioflinger/AudioResampler.h3
-rw-r--r--services/audioflinger/AudioResamplerQTI.cpp173
-rw-r--r--services/audioflinger/AudioResamplerQTI.h52
-rw-r--r--services/audioflinger/BufferProviders.cpp2
-rw-r--r--services/audioflinger/Effects.cpp25
-rw-r--r--services/audioflinger/FastCapture.cpp3
-rw-r--r--services/audioflinger/FastCaptureDumpState.cpp2
-rw-r--r--services/audioflinger/FastMixer.cpp18
-rw-r--r--services/audioflinger/ServiceUtilities.cpp8
-rw-r--r--services/audioflinger/ServiceUtilities.h1
-rw-r--r--services/audioflinger/Threads.cpp263
-rw-r--r--services/audioflinger/Threads.h30
-rw-r--r--services/audioflinger/Tracks.cpp14
-rw-r--r--services/audiopolicy/Android.mk4
-rw-r--r--services/audiopolicy/AudioPolicyInterface.h3
-rw-r--r--services/audiopolicy/common/managerdefinitions/Android.mk19
-rw-r--r--services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h2
-rw-r--r--services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h45
-rw-r--r--services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h3
-rw-r--r--services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp8
-rw-r--r--services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp5
-rw-r--r--services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp5
-rw-r--r--services/audiopolicy/engineconfigurable/parameter-framework/Android.mk2
-rwxr-xr-xservices/audiopolicy/engineconfigurable/src/Stream.cpp10
-rwxr-xr-xservices/audiopolicy/enginedefault/Android.mk5
-rwxr-xr-xservices/audiopolicy/enginedefault/src/Engine.cpp46
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.cpp40
-rw-r--r--services/audiopolicy/managerdefault/AudioPolicyManager.h14
-rw-r--r--services/audiopolicy/service/AudioPolicyClientImpl.cpp6
-rw-r--r--services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp5
-rw-r--r--services/audiopolicy/service/AudioPolicyEffects.cpp121
-rw-r--r--services/audiopolicy/service/AudioPolicyEffects.h24
-rw-r--r--services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp86
-rw-r--r--services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp34
-rw-r--r--services/audiopolicy/service/AudioPolicyService.cpp93
-rw-r--r--services/audiopolicy/service/AudioPolicyService.h36
-rw-r--r--services/camera/libcameraservice/Android.mk8
-rw-r--r--services/camera/libcameraservice/CameraFlashlight.cpp14
-rw-r--r--services/camera/libcameraservice/CameraService.cpp5
-rw-r--r--services/camera/libcameraservice/CameraService.h6
-rw-r--r--services/camera/libcameraservice/api1/Camera2Client.cpp2
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.cpp44
-rw-r--r--services/camera/libcameraservice/api1/CameraClient.h3
-rw-r--r--services/camera/libcameraservice/device1/CameraHardwareInterface.h27
-rw-r--r--services/soundtrigger/SoundTriggerHwService.cpp31
389 files changed, 19585 insertions, 10541 deletions
diff --git a/camera/Android.mk b/camera/Android.mk
index 471cb0d..36f6da1 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -21,7 +21,6 @@ LOCAL_PATH := $(CAMERA_CLIENT_LOCAL_PATH)
LOCAL_SRC_FILES:= \
Camera.cpp \
CameraMetadata.cpp \
- CameraParameters.cpp \
CaptureResult.cpp \
CameraParameters2.cpp \
ICamera.cpp \
@@ -53,6 +52,21 @@ LOCAL_C_INCLUDES += \
system/media/camera/include \
system/media/private/camera/include \
+ifneq ($(TARGET_SPECIFIC_CAMERA_PARAMETER_LIBRARY),)
+LOCAL_WHOLE_STATIC_LIBRARIES += $(TARGET_SPECIFIC_CAMERA_PARAMETER_LIBRARY)
+else
+LOCAL_WHOLE_STATIC_LIBRARIES += libcamera_parameters
+endif
+
LOCAL_MODULE:= libcamera_client
include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ CameraParameters.cpp
+
+LOCAL_MODULE := libcamera_parameters
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index 68969cf..42b0884 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -21,6 +21,7 @@
#include <string.h>
#include <stdlib.h>
#include <camera/CameraParameters.h>
+#include <camera/CameraParametersExtra.h>
#include <system/graphics.h>
namespace android {
@@ -106,6 +107,7 @@ const char CameraParameters::WHITE_BALANCE_DAYLIGHT[] = "daylight";
const char CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT[] = "cloudy-daylight";
const char CameraParameters::WHITE_BALANCE_TWILIGHT[] = "twilight";
const char CameraParameters::WHITE_BALANCE_SHADE[] = "shade";
+const char CameraParameters::WHITE_BALANCE_MANUAL_CCT[] = "manual-cct";
// Values for effect settings.
const char CameraParameters::EFFECT_NONE[] = "none";
@@ -168,11 +170,16 @@ const char CameraParameters::FOCUS_MODE_FIXED[] = "fixed";
const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
+const char CameraParameters::FOCUS_MODE_MANUAL_POSITION[] = "manual";
// Values for light fx settings
const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
+#ifdef CAMERA_PARAMETERS_EXTRA_C
+CAMERA_PARAMETERS_EXTRA_C
+#endif
+
CameraParameters::CameraParameters()
: mMap()
{
@@ -237,6 +244,9 @@ void CameraParameters::unflatten(const String8 &params)
void CameraParameters::set(const char *key, const char *value)
{
+ if (key == NULL || value == NULL)
+ return;
+
// XXX i think i can do this with strspn()
if (strchr(key, '=') || strchr(key, ';')) {
//XXX ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
@@ -247,6 +257,14 @@ void CameraParameters::set(const char *key, const char *value)
//XXX ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
return;
}
+#ifdef QCOM_HARDWARE
+ // qcom cameras default to delivering an extra zero-exposure frame on HDR.
+ // The android SDK only wants one frame, so disable this unless the app
+ // explicitly asks for it
+ if (!get("hdr-need-1x")) {
+ mMap.replaceValueFor(String8("hdr-need-1x"), String8("false"));
+ }
+#endif
mMap.replaceValueFor(String8(key), String8(value));
}
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index 179a341..4f43796 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -46,7 +46,12 @@ public:
data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
data.writeInt32(msgType);
data.writeInt32(ext1);
- data.writeInt32(ext2);
+ if ((msgType == CAMERA_MSG_PREVIEW_FRAME) && (ext1 == CAMERA_FRAME_DATA_FD)) {
+ ALOGD("notifyCallback: CAMERA_MSG_PREVIEW_FRAME fd = %d", ext2);
+ data.writeFileDescriptor(ext2);
+ } else {
+ data.writeInt32(ext2);
+ }
remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
}
@@ -91,8 +96,14 @@ status_t BnCameraClient::onTransact(
ALOGV("NOTIFY_CALLBACK");
CHECK_INTERFACE(ICameraClient, data, reply);
int32_t msgType = data.readInt32();
- int32_t ext1 = data.readInt32();
- int32_t ext2 = data.readInt32();
+ int32_t ext1 = data.readInt32();
+ int32_t ext2 = 0;
+ if ((msgType == CAMERA_MSG_PREVIEW_FRAME) && (ext1 == CAMERA_FRAME_DATA_FD)) {
+ ext2 = data.readFileDescriptor();
+ ALOGD("onTransact: CAMERA_MSG_PREVIEW_FRAME fd = %d", ext2);
+ } else {
+ ext2 = data.readInt32();
+ }
notifyCallback(msgType, ext1, ext2);
return NO_ERROR;
} break;
diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp
index 857d73e..df08f32 100644
--- a/drm/drmserver/DrmManagerService.cpp
+++ b/drm/drmserver/DrmManagerService.cpp
@@ -93,6 +93,9 @@ bool DrmManagerService::isProtectedCallAllowed(drm_perm_t perm) {
return selinuxIsProtectedCallAllowed(spid, perm);
}
}
+ if (checkCallingPermission(String16("com.oma.drm.permission.ACCESS_OMA_DRM")) == true) {
+ return true;
+ }
return false;
}
diff --git a/drm/libdrmframework/Android.mk b/drm/libdrmframework/Android.mk
index 33f9d3b..b7eb70b 100644
--- a/drm/libdrmframework/Android.mk
+++ b/drm/libdrmframework/Android.mk
@@ -31,7 +31,7 @@ LOCAL_SHARED_LIBRARIES := \
libbinder \
libdl
-LOCAL_STATIC_LIBRARIES := \
+LOCAL_WHOLE_STATIC_LIBRARIES := \
libdrmframeworkcommon
LOCAL_C_INCLUDES += \
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index ba33ffe..d85050d 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -19,6 +19,7 @@
#include <utils/KeyedVector.h>
#include <utils/String8.h>
+#include <camera/CameraParametersExtra.h>
namespace android {
@@ -554,6 +555,7 @@ public:
static const char WHITE_BALANCE_CLOUDY_DAYLIGHT[];
static const char WHITE_BALANCE_TWILIGHT[];
static const char WHITE_BALANCE_SHADE[];
+ static const char WHITE_BALANCE_MANUAL_CCT[];
// Values for effect settings.
static const char EFFECT_NONE[];
@@ -677,12 +679,18 @@ public:
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+ static const char FOCUS_MODE_MANUAL_POSITION[];
+
// Values for light special effects
// Low-light enhancement mode
static const char LIGHTFX_LOWLIGHT[];
// High-dynamic range mode
static const char LIGHTFX_HDR[];
+#ifdef CAMERA_PARAMETERS_EXTRA_H
+CAMERA_PARAMETERS_EXTRA_H
+#endif
+
/**
* Returns the the supported preview formats as an enum given in graphics.h
* corrsponding to the format given in the input string or -1 if no such
diff --git a/include/camera/CameraParametersExtra.h b/include/camera/CameraParametersExtra.h
new file mode 100644
index 0000000..80a67cc
--- /dev/null
+++ b/include/camera/CameraParametersExtra.h
@@ -0,0 +1,35 @@
+// Overload this file in your device specific config if you need
+// to add extra camera parameters.
+// A typical file would look like this:
+/*
+ * Copyright (C) 2014 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+#define CAMERA_PARAMETERS_EXTRA_C \
+const char CameraParameters::KEY_SUPPORTED_BURST_NUM[] = "supported-burst-num"; \
+const char CameraParameters::KEY_BURST_NUM[] = "burst-num"; \
+const char CameraParameters::KEY_SUPPORTED_HDR_MODES[] = "supported-hdr-modes"; \
+const char CameraParameters::KEY_HDR_MODE[] = "hdr-mode"; \
+const char CameraParameters::HDR_MODE_OFF[] = "hdr-mode-off"; \
+const char CameraParameters::HDR_MODE_HDR[] = "hdr-mode-hdr";
+
+#define CAMERA_PARAMETERS_EXTRA_H \
+ static const char KEY_SUPPORTED_BURST_NUM[]; \
+ static const char KEY_BURST_NUM[]; \
+ static const char KEY_SUPPORTED_HDR_MODES[]; \
+ static const char KEY_HDR_MODE[]; \
+ static const char HDR_MODE_OFF[]; \
+ static const char HDR_MODE_HDR[];
+*/
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index 891bc4b..c769c4b 100644
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -48,6 +48,7 @@ public:
static const char * const keyFrameCount;
static const char * const keyInputSource;
static const char * const keyScreenState;
+ static const char * const keyDevShutdown;
String8 toString();
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index feed402..7e33df7 100644
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -23,6 +23,7 @@
#include <binder/Parcel.h>
#include <utils/String8.h>
#include <utils/Vector.h>
+#include <media/AudioSession.h>
namespace android {
@@ -42,6 +43,7 @@ namespace android {
// AudioSystem's implementation of the AudioPolicyClient interface
// keep in sync with AudioSystem.java
#define DYNAMIC_POLICY_EVENT_MIX_STATE_UPDATE 0
+#define AUDIO_OUTPUT_SESSION_EFFECTS_UPDATE 10
#define MIX_STATE_DISABLED -1
#define MIX_STATE_IDLE 0
diff --git a/include/media/AudioSession.h b/include/media/AudioSession.h
new file mode 100644
index 0000000..d9658cc
--- /dev/null
+++ b/include/media/AudioSession.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOSESSION_H
+#define ANDROID_AUDIOSESSION_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+// class to store streaminfo
+class AudioSessionInfo : public RefBase {
+public:
+ AudioSessionInfo(int session, audio_stream_type_t stream, audio_output_flags_t flags,
+ audio_channel_mask_t channelMask, uid_t uid) :
+ mSessionId(session), mStream(stream), mFlags(flags), mChannelMask(channelMask),
+ mUid(uid), mRefCount(0) {}
+
+ AudioSessionInfo() : mSessionId(0), mStream(AUDIO_STREAM_DEFAULT), mFlags(AUDIO_OUTPUT_FLAG_NONE), mChannelMask(AUDIO_CHANNEL_NONE), mUid(0) {}
+
+ /*virtual*/ ~AudioSessionInfo() {}
+
+ int mSessionId;
+ audio_stream_type_t mStream;
+ audio_output_flags_t mFlags;
+ audio_channel_mask_t mChannelMask;
+ uid_t mUid;
+
+ // AudioPolicyManager keeps mLock, no need for lock on reference count here
+ int mRefCount;
+
+ void readFromParcel(const Parcel &parcel) {
+ mSessionId = parcel.readInt32();
+ mStream = static_cast<audio_stream_type_t>(parcel.readInt32());
+ mFlags = static_cast<audio_output_flags_t>(parcel.readInt32());
+ mChannelMask = static_cast<audio_channel_mask_t>(parcel.readInt32());
+ mUid = static_cast<uid_t>(parcel.readInt32());
+ }
+
+ void writeToParcel(Parcel *parcel) const {
+ parcel->writeInt32(mSessionId);
+ parcel->writeInt32(mStream);
+ parcel->writeInt32(mFlags);
+ parcel->writeInt32(mChannelMask);
+ parcel->writeInt32(mUid);
+ }
+};
+
+}; // namespace android
+
+#endif // ANDROID_AUDIOSESSION_H
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 26a0bb2..3f4a610 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -31,6 +31,8 @@ namespace android {
typedef void (*audio_error_callback)(status_t err);
typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
+typedef void (*audio_session_callback)(int event,
+ sp<AudioSessionInfo>& session, bool added);
class IAudioFlinger;
class IAudioPolicyService;
@@ -92,6 +94,7 @@ public:
static void setErrorCallback(audio_error_callback cb);
static void setDynPolicyCallback(dynamic_policy_callback cb);
+ static status_t setAudioSessionCallback(audio_session_callback cb);
// helper function to obtain AudioFlinger service handle
static const sp<IAudioFlinger> get_audio_flinger();
@@ -319,6 +322,8 @@ public:
audio_io_handle_t *handle);
static status_t stopAudioSource(audio_io_handle_t handle);
+ static status_t listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions);
// ----------------------------------------------------------------------------
@@ -419,6 +424,7 @@ private:
virtual void onAudioPortListUpdate();
virtual void onAudioPatchListUpdate();
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+ virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added);
private:
Mutex mLock;
@@ -438,6 +444,7 @@ private:
static sp<IAudioFlinger> gAudioFlinger;
static audio_error_callback gAudioErrorCallback;
static dynamic_policy_callback gDynPolicyCallback;
+ static audio_session_callback gAudioSessionCallback;
static size_t gInBuffSize;
// previous parameters for recording buffer size queries
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index e02f1b7..6f07f98 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -31,6 +31,7 @@ namespace android {
struct audio_track_cblk_t;
class AudioTrackClientProxy;
class StaticAudioTrackClientProxy;
+struct ExtendedMediaUtils;
// ----------------------------------------------------------------------------
@@ -623,6 +624,7 @@ private:
*/
status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+ friend struct AVMediaUtils;
public:
/* Public API for TRANSFER_OBTAIN mode.
@@ -940,6 +942,8 @@ protected:
// For Device Selection API
// a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
audio_port_handle_t mSelectedDeviceId;
+ bool mPlaybackRateSet;
+ bool mTrackOffloaded;
private:
class DeathNotifier : public IBinder::DeathRecipient {
@@ -957,6 +961,7 @@ private:
pid_t mClientPid;
sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ friend struct ExtendedMediaUtils;
};
class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 6b93f6f..1df91ee 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -165,6 +165,9 @@ public:
const audio_attributes_t *attributes,
audio_io_handle_t *handle) = 0;
virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
+
+ virtual status_t listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions) = 0;
};
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
index a7f2cc3..ec38157 100644
--- a/include/media/IAudioPolicyServiceClient.h
+++ b/include/media/IAudioPolicyServiceClient.h
@@ -21,6 +21,7 @@
#include <utils/RefBase.h>
#include <binder/IInterface.h>
#include <system/audio.h>
+#include <media/AudioSession.h>
namespace android {
@@ -37,6 +38,8 @@ public:
virtual void onAudioPatchListUpdate() = 0;
// Notifies a change in the mixing state of a specific mix in a dynamic audio policy
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
+ // Notifies when a default effect set is attached to a session/stream
+ virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added) = 0;
};
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 0fd8933..5957535 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -109,6 +109,16 @@ public:
virtual status_t getMetadata(bool update_only,
bool apply_filter,
Parcel *metadata) = 0;
+
+ // Suspend the video player
+ // In other words, just release the audio decoder and the video decoder
+ // @return OK if the video player was suspended successfully
+ virtual status_t suspend() = 0;
+
+ // Resume the video player
+ // Init the audio decoder and the video decoder
+ // @return OK if the video player was resumed successfully
+ virtual status_t resume() = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 77ed5d3..7e6befd 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -58,6 +58,8 @@ public:
virtual status_t release() = 0;
virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
+
+ virtual status_t pause() = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index de82554..4810b7e 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -46,12 +46,12 @@ class IGraphicBufferProducer;
template<typename T> class SortedVector;
enum player_type {
- STAGEFRIGHT_PLAYER = 3,
NU_PLAYER = 4,
// Test players are available only in the 'test' and 'eng' builds.
// The shared library with the test player is passed passed as an
// argument to the 'test:' url in the setDataSource call.
TEST_PLAYER = 5,
+ DASH_PLAYER = 6,
};
@@ -267,6 +267,14 @@ public:
return INVALID_OPERATION;
}
+ virtual status_t suspend() {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t resume() {
+ return INVALID_OPERATION;
+ }
+
private:
friend class MediaPlayerService;
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index e02918f..a45dc58 100644..100755
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -1,4 +1,6 @@
/*
+ ** Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ ** Not a Contribution.
**
** Copyright 2010, The Android Open Source Project.
**
@@ -56,6 +58,20 @@ enum camcorder_quality {
CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
+
+ CAMCORDER_QUALITY_VENDOR_START = 10000,
+ CAMCORDER_QUALITY_VGA = 10000,
+ CAMCORDER_QUALITY_4KDCI = 10001,
+ CAMCORDER_QUALITY_TIME_LAPSE_VGA = 10002,
+ CAMCORDER_QUALITY_TIME_LAPSE_4KDCI = 10003,
+ CAMCORDER_QUALITY_HIGH_SPEED_CIF = 10004,
+ CAMCORDER_QUALITY_HIGH_SPEED_VGA = 10005,
+ CAMCORDER_QUALITY_HIGH_SPEED_4KDCI = 10006,
+ CAMCORDER_QUALITY_QHD = 10007,
+ CAMCORDER_QUALITY_2k = 10008,
+ CAMCORDER_QUALITY_TIME_LAPSE_QHD = 10009,
+ CAMCORDER_QUALITY_TIME_LAPSE_2k = 10010,
+ CAMCORDER_QUALITY_VENDOR_END = 10010,
};
enum video_decoder {
@@ -126,6 +142,9 @@ public:
* enc.vid.bps.max - max bit rate in bits per second
* enc.vid.fps.min - min frame rate in frames per second
* enc.vid.fps.max - max frame rate in frames per second
+ * enc.vid.hfr.width.max - max hfr video frame width
+ * enc.vid.hfr.height.max - max hfr video frame height
+ * enc.vid.hfr.mode.max - max hfr mode
*/
int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
@@ -264,12 +283,16 @@ private:
int minBitRate, int maxBitRate,
int minFrameWidth, int maxFrameWidth,
int minFrameHeight, int maxFrameHeight,
- int minFrameRate, int maxFrameRate)
+ int minFrameRate, int maxFrameRate,
+ int maxHFRFrameWidth, int maxHFRFrameHeight,
+ int maxHFRMode)
: mCodec(codec),
mMinBitRate(minBitRate), mMaxBitRate(maxBitRate),
mMinFrameWidth(minFrameWidth), mMaxFrameWidth(maxFrameWidth),
mMinFrameHeight(minFrameHeight), mMaxFrameHeight(maxFrameHeight),
- mMinFrameRate(minFrameRate), mMaxFrameRate(maxFrameRate) {}
+ mMinFrameRate(minFrameRate), mMaxFrameRate(maxFrameRate),
+ mMaxHFRFrameWidth(maxHFRFrameWidth), mMaxHFRFrameHeight(maxHFRFrameHeight),
+ mMaxHFRMode(maxHFRMode) {}
~VideoEncoderCap() {}
@@ -278,6 +301,8 @@ private:
int mMinFrameWidth, mMaxFrameWidth;
int mMinFrameHeight, mMaxFrameHeight;
int mMinFrameRate, mMaxFrameRate;
+ int mMaxHFRFrameWidth, mMaxHFRFrameHeight;
+ int mMaxHFRMode;
};
struct AudioEncoderCap {
@@ -392,6 +417,8 @@ private:
static VideoEncoderCap* createDefaultH263VideoEncoderCap();
static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
static AudioEncoderCap* createDefaultAmrNBEncoderCap();
+ static AudioEncoderCap* createDefaultAacEncoderCap();
+ static AudioEncoderCap* createDefaultLpcmEncoderCap();
static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index d6cc4bb..4290563 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -60,13 +60,17 @@ struct MediaRecorderBase {
virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
-
protected:
String16 mOpPackageName;
private:
MediaRecorderBase(const MediaRecorderBase &);
MediaRecorderBase &operator=(const MediaRecorderBase &);
+
+public:
+ virtual status_t pause() = 0;
+
+
};
} // namespace android
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 8406ed6..0043bdc 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -148,6 +148,8 @@ public:
TONE_CDMA_ABBR_ALERT,
TONE_CDMA_SIGNAL_OFF,
//CDMA end
+ TONE_HOLD_RECALL,
+
NUM_TONES,
NUM_SUP_TONES = LAST_SUP_TONE-FIRST_SUP_TONE+1
};
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 186e018..f14977b 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -97,6 +97,7 @@ public:
// and the capture format is according to flags (see callback_flags).
status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate,
bool force = false);
+ void cancelCaptureCallBack();
// set the capture size capture size must be a power of two in the range
// [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 3fe749c..3d4a6e2 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -53,7 +53,7 @@ enum media_event_type {
MEDIA_ERROR = 100,
MEDIA_INFO = 200,
MEDIA_SUBTITLE_DATA = 201,
- MEDIA_META_DATA = 202,
+ MEDIA_META_DATA = 202
};
// Generic error codes for the media player framework. Errors are fatal, the
@@ -147,7 +147,8 @@ enum media_player_states {
MEDIA_PLAYER_STARTED = 1 << 4,
MEDIA_PLAYER_PAUSED = 1 << 5,
MEDIA_PLAYER_STOPPED = 1 << 6,
- MEDIA_PLAYER_PLAYBACK_COMPLETE = 1 << 7
+ MEDIA_PLAYER_PLAYBACK_COMPLETE = 1 << 7,
+ MEDIA_PLAYER_SUSPENDED = 1 << 8
};
// Keep KEY_PARAMETER_* in sync with MediaPlayer.java.
@@ -209,7 +210,7 @@ public:
void died();
void disconnect();
- status_t setDataSource(
+ virtual status_t setDataSource(
const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers);
@@ -224,7 +225,7 @@ public:
status_t prepareAsync();
status_t start();
status_t stop();
- status_t pause();
+ virtual status_t pause();
bool isPlaying();
status_t setPlaybackSettings(const AudioPlaybackRate& rate);
status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
@@ -234,7 +235,7 @@ public:
float* videoFps /* nonnull */);
status_t getVideoWidth(int *w);
status_t getVideoHeight(int *h);
- status_t seekTo(int msec);
+ virtual status_t seekTo(int msec);
status_t getCurrentPosition(int *msec);
status_t getDuration(int *msec);
status_t reset();
@@ -243,7 +244,7 @@ public:
status_t setLooping(int loop);
bool isLooping();
status_t setVolume(float leftVolume, float rightVolume);
- void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
status_t invoke(const Parcel& request, Parcel *reply);
status_t setMetadataFilter(const Parcel& filter);
status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
@@ -255,6 +256,8 @@ public:
status_t getParameter(int key, Parcel* reply);
status_t setRetransmitEndpoint(const char* addrString, uint16_t port);
status_t setNextMediaPlayer(const sp<MediaPlayer>& player);
+ status_t suspend();
+ status_t resume();
private:
void clear_l();
@@ -289,6 +292,7 @@ private:
float mSendLevel;
struct sockaddr_in mRetransmitEndpoint;
bool mRetransmitEndpointValid;
+ friend class QCMediaPlayer;
};
}; // namespace android
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 15ff82d..0cf1146 100644..100755
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -1,4 +1,7 @@
/*
+ ** Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ ** Not a Contribution.
+ **
** Copyright (C) 2008 The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,6 +77,9 @@ enum output_format {
/* VP8/VORBIS data in a WEBM container */
OUTPUT_FORMAT_WEBM = 9,
+ OUTPUT_FORMAT_QCP = 20,
+ OUTPUT_FORMAT_WAVE = 21,
+
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
@@ -86,6 +92,10 @@ enum audio_encoder {
AUDIO_ENCODER_AAC_ELD = 5,
AUDIO_ENCODER_VORBIS = 6,
+ AUDIO_ENCODER_EVRC = 10,
+ AUDIO_ENCODER_QCELP = 11,
+ AUDIO_ENCODER_LPCM = 12,
+
AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
};
@@ -96,7 +106,11 @@ enum video_encoder {
VIDEO_ENCODER_MPEG_4_SP = 3,
VIDEO_ENCODER_VP8 = 4,
- VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
+ VIDEO_ENCODER_LIST_END, // must be the last - used to validate the video encoder type
+
+ VIDEO_ENCODER_LIST_VENDOR_START = 1000,
+ VIDEO_ENCODER_H265 = 1001,
+ VIDEO_ENCODER_LIST_VENDOR_END,
};
/*
@@ -120,6 +134,9 @@ enum media_recorder_states {
// Recording is in progress.
MEDIA_RECORDER_RECORDING = 1 << 4,
+
+ // Recording is paused.
+ MEDIA_RECORDER_PAUSED = 1 << 5,
};
// The "msg" code passed to the listener in notify.
@@ -225,13 +242,13 @@ public:
status_t setOutputFile(int fd, int64_t offset, int64_t length);
status_t setVideoSize(int width, int height);
status_t setVideoFrameRate(int frames_per_second);
- status_t setParameters(const String8& params);
+ virtual status_t setParameters(const String8& params);
status_t setListener(const sp<MediaRecorderListener>& listener);
status_t setClientName(const String16& clientName);
status_t prepare();
status_t getMaxAmplitude(int* max);
- status_t start();
- status_t stop();
+ virtual status_t start();
+ virtual status_t stop();
status_t reset();
status_t init();
status_t close();
@@ -240,7 +257,7 @@ public:
status_t setInputSurface(const sp<PersistentSurface>& surface);
sp<IGraphicBufferProducer> querySurfaceMediaSourceFromMediaServer();
-private:
+protected:
void doCleanUp();
status_t doReset();
@@ -260,6 +277,9 @@ private:
bool mIsOutputFileSet;
Mutex mLock;
Mutex mNotifyLock;
+
+public:
+ virtual status_t pause();
};
}; // namespace android
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 8b5b862..0ac13b9 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -28,6 +28,8 @@
#include <media/stagefright/SkipCutBuffer.h>
#include <OMX_Audio.h>
+#include <system/audio.h>
+
#define TRACK_BUFFER_TIMING 0
namespace android {
@@ -93,8 +95,11 @@ struct ACodec : public AHierarchicalStateMachine, public CodecBase {
protected:
virtual ~ACodec();
+ virtual status_t setupCustomCodec(
+ status_t err, const char *mime, const sp<AMessage> &msg);
+ virtual status_t GetVideoCodingTypeFromMime(
+ const char *mime, OMX_VIDEO_CODINGTYPE *codingType);
-private:
struct BaseState;
struct UninitializedState;
struct LoadedState;
@@ -143,6 +148,7 @@ private:
kFlagIsSecure = 1,
kFlagPushBlankBuffersToNativeWindowOnShutdown = 2,
kFlagIsGrallocUsageProtected = 4,
+ kFlagPushBlankBuffersToNativeWindowOnSwitch = 1 << 7,
};
enum {
@@ -152,6 +158,7 @@ private:
};
struct BufferInfo {
+ BufferInfo() : mCustomData(-1) {}
enum Status {
OWNED_BY_US,
OWNED_BY_COMPONENT,
@@ -173,6 +180,7 @@ private:
sp<GraphicBuffer> mGraphicBuffer;
int mFenceFd;
FrameRenderTracker::Info *mRenderInfo;
+ int mCustomData;
// The following field and 4 methods are used for debugging only
bool mIsReadFence;
@@ -236,6 +244,8 @@ private:
bool mSentFormat;
bool mIsVideo;
bool mIsEncoder;
+ bool mEncoderComponent;
+ bool mComponentAllocByName;
bool mFatalError;
bool mShutdownInProgress;
bool mExplicitShutdown;
@@ -271,7 +281,7 @@ private:
status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
status_t allocateBuffersOnPort(OMX_U32 portIndex);
status_t freeBuffersOnPort(OMX_U32 portIndex);
- status_t freeBuffer(OMX_U32 portIndex, size_t i);
+ virtual status_t freeBuffer(OMX_U32 portIndex, size_t i);
status_t handleSetSurface(const sp<Surface> &surface);
status_t setupNativeWindowSizeFormatAndUsage(
@@ -284,6 +294,9 @@ private:
status_t submitOutputMetadataBuffer();
void signalSubmitOutputMetadataBufferIfEOS_workaround();
status_t allocateOutputBuffersFromNativeWindow();
+#ifdef USE_SAMSUNG_COLORFORMAT
+ void setNativeWindowColorFormat(OMX_COLOR_FORMATTYPE &eNativeColorFormat);
+#endif
status_t cancelBufferToNativeWindow(BufferInfo *info);
status_t freeOutputBuffersNotOwnedByComponent();
BufferInfo *dequeueBufferFromNativeWindow();
@@ -300,8 +313,8 @@ private:
uint32_t portIndex, IOMX::buffer_id bufferID,
ssize_t *index = NULL);
- status_t setComponentRole(bool isEncoder, const char *mime);
- status_t configureCodec(const char *mime, const sp<AMessage> &msg);
+ virtual status_t setComponentRole(bool isEncoder, const char *mime);
+ virtual status_t configureCodec(const char *mime, const sp<AMessage> &msg);
status_t configureTunneledVideoPlayback(int32_t audioHwSync,
const sp<ANativeWindow> &nativeWindow);
@@ -314,10 +327,10 @@ private:
status_t setSupportedOutputFormat(bool getLegacyFlexibleFormat);
- status_t setupVideoDecoder(
+ virtual status_t setupVideoDecoder(
const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers);
- status_t setupVideoEncoder(
+ virtual status_t setupVideoEncoder(
const char *mime, const sp<AMessage> &msg);
status_t setVideoFormatOnPort(
@@ -338,11 +351,13 @@ private:
int32_t numChannels, int32_t sampleRate, int32_t bitRate,
int32_t aacProfile, bool isADTS, int32_t sbrMode,
int32_t maxOutputChannelCount, const drcParams_t& drc,
- int32_t pcmLimiterEnable);
+ int32_t pcmLimiterEnable, int32_t bitsPerSample = 16);
- status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+ status_t setupAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate,
+ int32_t bitsPerSample = 16);
- status_t setupEAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate);
+ status_t setupEAC3Codec(bool encoder, int32_t numChannels, int32_t sampleRate,
+ int32_t bitsPerSample = 16);
status_t selectAudioPortFormat(
OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
@@ -372,7 +387,7 @@ private:
status_t configureBitrate(
int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode);
- status_t setupErrorCorrectionParameters();
+ virtual status_t setupErrorCorrectionParameters();
status_t initNativeWindow();
@@ -408,7 +423,7 @@ private:
bool dropIncomplete = false, FrameRenderTracker::Info *until = NULL);
void sendFormatChange(const sp<AMessage> &reply);
- status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify);
+ virtual status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify);
void signalError(
OMX_ERRORTYPE error = OMX_ErrorUndefined,
@@ -420,11 +435,40 @@ private:
DescribeColorFormatParams &describeParams);
status_t requestIDRFrame();
- status_t setParameters(const sp<AMessage> &params);
+ virtual status_t setParameters(const sp<AMessage> &params);
// Send EOS on input stream.
void onSignalEndOfInputStream();
+ virtual void setBFrames(OMX_VIDEO_PARAM_MPEG4TYPE *mpeg4type) {}
+ virtual void setBFrames(OMX_VIDEO_PARAM_AVCTYPE *h264type,
+ const int32_t iFramesInterval, const int32_t frameRate) {}
+
+ virtual status_t getVQZIPInfo(const sp<AMessage> &msg) {
+ return OK;
+ }
+ virtual bool canAllocateBuffer(OMX_U32 /* portIndex */) {
+ return false;
+ }
+ virtual void enableCustomAllocationMode(const sp<AMessage> &/* msg */) {}
+ virtual status_t allocateBuffer(
+ OMX_U32 portIndex, size_t bufSize, BufferInfo &info);
+
+ virtual status_t setDSModeHint(sp<AMessage>& msg,
+ OMX_U32 flags, int64_t timeUs) {
+ return UNKNOWN_ERROR;
+ }
+
+ virtual bool getDSModeHint(const sp<AMessage>& msg) {
+ return false;
+ }
+
+ sp<IOMXObserver> createObserver();
+
+ status_t setupRawAudioFormatInternal(
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels,
+ int32_t bitsPerSample);
+
DISALLOW_EVIL_CONSTRUCTORS(ACodec);
};
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index e0cd965..edc9f25 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -103,6 +103,7 @@ private:
int64_t mSeekTimeUs;
bool mStarted;
+ bool mSourcePaused;
bool mIsFirstBuffer;
status_t mFirstBufferResult;
@@ -115,6 +116,7 @@ private:
bool mPlaying;
int64_t mStartPosUs;
const uint32_t mCreateFlags;
+ bool mPauseRequired;
static void AudioCallback(int event, void *user, void *info);
void AudioCallback(int event, void *info);
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 3074910..699b1b4 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -58,9 +58,11 @@ struct AudioSource : public MediaSource, public MediaBufferObserver {
protected:
virtual ~AudioSource();
-private:
+protected:
enum {
- kMaxBufferSize = 2048,
+ //calculated for max duration 80 msec with 48K sampling rate.
+ kMaxBufferSize = 30720,
+
// After the initial mute, we raise the volume linearly
// over kAutoRampDurationUs.
@@ -68,7 +70,7 @@ private:
// This is the initial mute duration to suppress
// the video recording signal tone
- kAutoRampStartUs = 0,
+ kAutoRampStartUs = 500000,
};
Mutex mLock;
@@ -90,6 +92,8 @@ private:
int64_t mNumFramesReceived;
int64_t mNumClientOwnedBuffers;
+ bool mRecPaused;
+
List<MediaBuffer * > mBuffersReceived;
void trackMaxAmplitude(int16_t *data, int nSamples);
@@ -100,13 +104,17 @@ private:
int32_t startFrame, int32_t rampDurationFrames,
uint8_t *data, size_t bytes);
- void queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs);
+ virtual void queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs);
void releaseQueuedFrames_l();
void waitOutstandingEncodingFrames_l();
- status_t reset();
+ virtual status_t reset();
AudioSource(const AudioSource &);
AudioSource &operator=(const AudioSource &);
+
+public:
+ virtual status_t pause();
+
};
} // namespace android
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 6c938a5..3dcfe4e 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -92,6 +92,8 @@ public:
virtual status_t read(
MediaBuffer **buffer, const ReadOptions *options = NULL);
+ virtual status_t pause();
+
/**
* Check whether a CameraSource object is properly initialized.
* Must call this method before stop().
@@ -189,7 +191,7 @@ protected:
void releaseCamera();
-private:
+protected:
friend struct CameraSourceListener;
Mutex mLock;
@@ -206,6 +208,11 @@ private:
bool mCollectStats;
bool mIsMetaDataStoredInVideoBuffers;
+ int64_t mPauseAdjTimeUs;
+ int64_t mPauseStartTimeUs;
+ int64_t mPauseEndTimeUs;
+ bool mRecPause;
+
void releaseQueuedFrames();
void releaseOneRecordingFrame(const sp<IMemory>& frame);
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 34213be..eeb453f 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -20,6 +20,7 @@
#include <pthread.h>
+#include <media/stagefright/CameraSource.h>
#include <utils/RefBase.h>
#include <utils/threads.h>
#include <utils/String16.h>
@@ -56,7 +57,7 @@ public:
// returning quickly.
void startQuickReadReturns();
-private:
+protected:
// size of the encoded video.
int32_t mVideoWidth;
int32_t mVideoHeight;
@@ -66,6 +67,9 @@ private:
// Real timestamp of the last encoded time lapse frame
int64_t mLastTimeLapseFrameRealTimestampUs;
+ // Adjusted continuous timestamp based on recording fps
+ // of the last encoded time lapse frame
+ int64_t mLastTimeLapseFrameTimeStampUs;
// Variable set in dataCallbackTimestamp() to help skipCurrentFrame()
// to know if current frame needs to be skipped.
@@ -152,7 +156,7 @@ private:
// the frame needs to be encoded, it returns false and also modifies
// the time stamp to be one frame time ahead of the last encoded
// frame's time stamp.
- bool skipFrameAndModifyTimeStamp(int64_t *timestampUs);
+ virtual bool skipFrameAndModifyTimeStamp(int64_t *timestampUs);
// Wrapper to enter threadTimeLapseEntry()
static void *ThreadTimeLapseWrapper(void *me);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 47c5c34..c8ad05e 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -27,10 +27,10 @@
#include <utils/RefBase.h>
#include <utils/threads.h>
#include <drm/DrmManagerClient.h>
+#include <media/stagefright/foundation/AMessage.h>
namespace android {
-struct AMessage;
struct AString;
class IDataSource;
struct IMediaHTTPService;
@@ -51,12 +51,13 @@ public:
const char *uri,
const KeyedVector<String8, String8> *headers = NULL,
String8 *contentType = NULL,
- HTTPBase *httpSource = NULL);
+ HTTPBase *httpSource = NULL,
+ bool useExtendedCache = false);
static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
static sp<DataSource> CreateFromIDataSource(const sp<IDataSource> &source);
- DataSource() {}
+ DataSource() : mMeta(new AMessage) {}
virtual status_t initCheck() const = 0;
@@ -121,15 +122,21 @@ public:
virtual String8 getMIMEType() const;
+ virtual sp<AMessage> meta() { return mMeta; }
+
protected:
virtual ~DataSource() {}
private:
+ sp<AMessage> mMeta;
+
static Mutex gSnifferMutex;
static List<SnifferFunc> gSniffers;
+ static List<SnifferFunc> gExtraSniffers;
static bool gSniffersRegistered;
static void RegisterSniffer_l(SnifferFunc func);
+ static void RegisterSnifferPlugin();
DataSource(const DataSource &);
DataSource &operator=(const DataSource &);
diff --git a/include/media/stagefright/ExtendedMediaDefs.h b/include/media/stagefright/ExtendedMediaDefs.h
new file mode 100644
index 0000000..18b5e9a
--- /dev/null
+++ b/include/media/stagefright/ExtendedMediaDefs.h
@@ -0,0 +1,69 @@
+/*Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EXTENDED_MEDIA_DEFS_H_
+#define _EXTENDED_MEDIA_DEFS_H_
+
+namespace android {
+
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
+extern const char *MEDIA_MIMETYPE_VIDEO_WMV;
+extern const char *MEDIA_MIMETYPE_VIDEO_WMV_VC1;
+extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
+extern const char *MEDIA_MIMETYPE_AUDIO_WMA_PRO;
+extern const char *MEDIA_MIMETYPE_AUDIO_WMA_LOSSLESS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_ASF;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AAC;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCP;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX311;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2;
+extern const char *MEDIA_MIMETYPE_CONTAINER_3G2;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_LBR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_AIFF;
+extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_APE;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCAMR_NB;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCAMR_WB;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCWAV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG2PS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCMATROSKA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCOGG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QCFLV;
+extern const char *MEDIA_MIMETYPE_VIDEO_VPX;
+extern const char *MEDIA_MIMETYPE_CONTAINER_QTIFLAC;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4_DP;
+
+} // namespace android
+
+#endif // _EXTENDED_MEDIA_DEFS_H_
diff --git a/include/media/stagefright/FFMPEGSoftCodec.h b/include/media/stagefright/FFMPEGSoftCodec.h
new file mode 100644
index 0000000..c6b6482
--- /dev/null
+++ b/include/media/stagefright/FFMPEGSoftCodec.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2014 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FFMPEG_SOFT_CODEC_H_
+#define FFMPEG_SOFT_CODEC_H_
+
+#include <media/IOMX.h>
+#include <media/MediaCodecInfo.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <media/stagefright/MetaData.h>
+
+#include <OMX_Audio.h>
+#include <OMX_Video.h>
+
+namespace android {
+
+struct FFMPEGSoftCodec {
+
+ enum {
+ kPortIndexInput = 0,
+ kPortIndexOutput = 1
+ };
+
+ static void convertMessageToMetaDataFF(
+ const sp<AMessage> &msg, sp<MetaData> &meta);
+
+ static void convertMetaDataToMessageFF(
+ const sp<MetaData> &meta, sp<AMessage> *format);
+
+ static const char* overrideComponentName(
+ uint32_t quirks, const sp<MetaData> &meta,
+ const char *mime, bool isEncoder);
+
+ static void overrideComponentName(
+ uint32_t quirks, const sp<AMessage> &msg,
+ AString* componentName, AString* mime,
+ int32_t isEncoder);
+
+ static status_t setSupportedRole(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder, const char *mime);
+
+ static status_t setAudioFormat(
+ const sp<AMessage> &msg, const char* mime,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+ static status_t setVideoFormat(
+ status_t status,
+ const sp<AMessage> &msg, const char* mime,
+ sp<IOMX> OMXhandle,IOMX::node_id nodeID,
+ bool isEncoder, OMX_VIDEO_CODINGTYPE *compressionFormat,
+ const char* componentName);
+
+ static status_t getAudioPortFormat(
+ OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+ static status_t getVideoPortFormat(
+ OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+private:
+ static const char* getMsgKey(int key);
+
+ static status_t setWMVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setRVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setFFmpegVideoFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setRawAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setWMAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setVORBISFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setRAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setFLACFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setMP2Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setAC3Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setAPEFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setDTSFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ static status_t setFFmpegAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+#ifdef QCOM_HARDWARE
+ static status_t setQCDIVXFormat(
+ const sp<AMessage> &msg, const char* mime,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID, int port_index);
+#endif
+
+};
+
+}
+#endif
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index a981d1c..f4f874f 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -43,11 +43,16 @@ public:
virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
+ virtual String8 getUri() {
+ return mUri;
+ }
+
protected:
virtual ~FileSource();
private:
int mFd;
+ String8 mUri;
int64_t mOffset;
int64_t mLength;
Mutex mLock;
@@ -56,10 +61,11 @@ private:
sp<DecryptHandle> mDecryptHandle;
DrmManagerClient *mDrmManagerClient;
int64_t mDrmBufOffset;
- size_t mDrmBufSize;
+ ssize_t mDrmBufSize;
unsigned char *mDrmBuf;
ssize_t readAtDRM(off64_t offset, void *data, size_t size);
+ void fetchUriFromFd(int fd);
FileSource(const FileSource &);
FileSource &operator=(const FileSource &);
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index a195fe8..09a48f9 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -77,13 +77,17 @@ private:
int mFd;
status_t mInitCheck;
bool mIsRealTimeRecording;
+protected:
bool mUse4ByteNalLength;
+private:
bool mUse32BitOffset;
bool mIsFileSizeLimitExplicitlyRequested;
bool mPaused;
bool mStarted; // Writer thread + track threads started successfully
bool mWriterThreadStarted; // Only writer thread started successfully
+protected:
off64_t mOffset;
+private:
off_t mMdatOffset;
uint8_t *mMoovBoxBuffer;
off64_t mMoovBoxBufferOffset;
@@ -108,6 +112,8 @@ private:
sp<AMessage> mMetaKeys;
+ bool mIsVideoHEVC;
+
void setStartTimestampUs(int64_t timeUs);
int64_t getStartTimestampUs(); // Not const
status_t startTracks(MetaData *params);
@@ -181,13 +187,22 @@ private:
// By default, real time recording is on.
bool isRealTimeRecording() const;
+ // To use 3gp4 box for clips with AMR audio
+ bool mIsAudioAMR;
+
+ // HFR scale
+ uint32_t mHFRRatio;
+
void lock();
void unlock();
// Acquire lock before calling these methods
off64_t addSample_l(MediaBuffer *buffer);
- off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
+protected:
+ static void StripStartcode(MediaBuffer *buffer);
+ virtual off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
+private:
bool exceedsFileSizeLimit();
bool use32BitFileOffset() const;
bool exceedsFileDurationLimit();
diff --git a/include/media/stagefright/MediaAdapter.h b/include/media/stagefright/MediaAdapter.h
index 369fce6..8622546 100644
--- a/include/media/stagefright/MediaAdapter.h
+++ b/include/media/stagefright/MediaAdapter.h
@@ -56,6 +56,8 @@ public:
// deep copy, such that after pushBuffer return, the buffer can be re-used.
status_t pushBuffer(MediaBuffer *buffer);
+ virtual void notifyError(status_t err);
+
private:
Mutex mAdapterLock;
// Make sure the read() wait for the incoming buffer.
@@ -68,6 +70,8 @@ private:
bool mStarted;
sp<MetaData> mOutputFormat;
+ status_t mStatus;
+
DISALLOW_EVIL_CONSTRUCTORS(MediaAdapter);
};
diff --git a/include/media/stagefright/MediaBuffer.h b/include/media/stagefright/MediaBuffer.h
index c8a50e8..5ab266f 100644
--- a/include/media/stagefright/MediaBuffer.h
+++ b/include/media/stagefright/MediaBuffer.h
@@ -93,6 +93,7 @@ protected:
private:
friend class MediaBufferGroup;
friend class OMXDecoder;
+ friend class MediaAdapter;
// For use by OMXDecoder, reference count must be 1, drop reference
// count to 0 without signalling the observer.
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index cdfa159..e2588a4 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -51,6 +51,8 @@ struct MediaCodec : public AHandler {
BUFFER_FLAG_SYNCFRAME = 1,
BUFFER_FLAG_CODECCONFIG = 2,
BUFFER_FLAG_EOS = 4,
+ BUFFER_FLAG_EXTRADATA = 0x1000,
+ BUFFER_FLAG_DATACORRUPT = 0x2000,
};
enum {
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 71f58a9..d41d87b 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -28,7 +28,7 @@ class AMessage;
struct AReplyToken;
class IGraphicBufferProducer;
class IGraphicBufferConsumer;
-class MediaCodec;
+struct MediaCodec;
class MetaData;
struct MediaCodecSource : public MediaSource,
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 21eb04a..ffbd20c 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -66,6 +66,43 @@ extern const char *MEDIA_MIMETYPE_TEXT_VTT;
extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_FLV1;
+extern const char *MEDIA_MIMETYPE_VIDEO_MJPEG;
+extern const char *MEDIA_MIMETYPE_VIDEO_RV;
+extern const char *MEDIA_MIMETYPE_VIDEO_VC1;
+extern const char *MEDIA_MIMETYPE_VIDEO_WMV;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_FFMPEG;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_PCM;
+extern const char *MEDIA_MIMETYPE_AUDIO_RA;
+extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
+extern const char *MEDIA_MIMETYPE_AUDIO_FFMPEG;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_APE;
+extern const char *MEDIA_MIMETYPE_CONTAINER_DIVX;
+extern const char *MEDIA_MIMETYPE_CONTAINER_DTS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_FLAC;
+extern const char *MEDIA_MIMETYPE_CONTAINER_FLV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MOV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MP2;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_RA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_RM;
+extern const char *MEDIA_MIMETYPE_CONTAINER_TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WEBM;
+extern const char *MEDIA_MIMETYPE_CONTAINER_VC1;
+extern const char *MEDIA_MIMETYPE_CONTAINER_HEVC;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WMA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WMV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_FFMPEG;
+
} // namespace android
+#include <media/stagefright/ExtendedMediaDefs.h>
+
#endif // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 183933a..e83defa 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -19,17 +19,30 @@
#define MEDIA_EXTRACTOR_H_
#include <utils/RefBase.h>
+#include <media/stagefright/DataSource.h>
namespace android {
-class DataSource;
class MediaSource;
class MetaData;
class MediaExtractor : public RefBase {
public:
+ typedef MediaExtractor *(*CreateFunc)(const sp<DataSource> &source,
+ const char *mime, const sp<AMessage> &meta);
+
+ struct Plugin {
+ DataSource::SnifferFunc sniff;
+ CreateFunc create;
+ };
+
+ static Plugin *getPlugin() {
+ return &sPlugin;
+ }
+
static sp<MediaExtractor> Create(
- const sp<DataSource> &source, const char *mime = NULL);
+ const sp<DataSource> &source, const char *mime = NULL,
+ const uint32_t flags = 0, const sp<AMessage> *meta = NULL);
virtual size_t countTracks() = 0;
virtual sp<MediaSource> getTrack(size_t index) = 0;
@@ -67,6 +80,7 @@ public:
}
virtual void setUID(uid_t uid) {
}
+ virtual void setExtraFlags(uint32_t flag) {}
protected:
MediaExtractor() : mIsDrm(false) {}
@@ -74,6 +88,7 @@ protected:
private:
bool mIsDrm;
+ static Plugin sPlugin;
MediaExtractor(const MediaExtractor &);
MediaExtractor &operator=(const MediaExtractor &);
diff --git a/include/media/stagefright/MediaHTTP.h b/include/media/stagefright/MediaHTTP.h
index 006d8d8..88683bd 100644
--- a/include/media/stagefright/MediaHTTP.h
+++ b/include/media/stagefright/MediaHTTP.h
@@ -54,7 +54,7 @@ protected:
virtual String8 getUri();
virtual String8 getMIMEType() const;
-private:
+protected:
status_t mInitCheck;
sp<IMediaHTTPConnection> mHTTPConnection;
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index a653db9..7ab5f62 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -59,6 +59,7 @@ struct MediaSource : public virtual RefBase {
virtual status_t read(
MediaBuffer **buffer, const ReadOptions *options = NULL) = 0;
+ virtual void notifyError(status_t) {}
// Options that modify read() behaviour. The default is to
// a) not request a seek
// b) not be late, i.e. lateness_us = 0
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 8d4e15a..66e7d63 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -50,6 +50,10 @@ enum {
kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
kKeyBitRate = 'brte', // int32_t (bps)
+ kKeyCodecId = 'cdid', // int32_t
+ kKeyBitsPerSample = 'sbit', // int32_t (DUPE of kKeySampleBits)
+ kKeyCodedSampleBits = 'cosb', // int32_t
+ kKeySampleFormat = 'sfmt', // int32_t
kKeyESDS = 'esds', // raw data
kKeyAACProfile = 'aacp', // int32_t
kKeyAVCC = 'avcc', // raw data
@@ -64,6 +68,7 @@ enum {
kKeyIsSyncFrame = 'sync', // int32_t (bool)
kKeyIsCodecConfig = 'conf', // int32_t (bool)
kKeyTime = 'time', // int64_t (usecs)
+ kKeyTimeBoot = 'timb', // int64_t (usecs)
kKeyDecodingTime = 'decT', // int64_t (decoding timestamp in usecs)
kKeyNTPTime = 'ntpT', // uint64_t (ntp-timestamp)
kKeyTargetTime = 'tarT', // int64_t (usecs)
@@ -131,6 +136,23 @@ enum {
kKeyIsUnreadable = 'unre', // bool (int32_t)
+ kKeyRawCodecSpecificData = 'rcsd', // raw data - added to support mmParser
+ kKeyDivXVersion = 'DivX', // int32_t
+ kKeyDivXDrm = 'QDrm', // void *
+ kKeyWMAEncodeOpt = 'eopt', // int32_t
+ kKeyWMABlockAlign = 'blka', // int32_t
+ kKeyWMAVersion = 'wmav', // int32_t
+ kKeyWMAAdvEncOpt1 = 'ade1', // int16_t
+ kKeyWMAAdvEncOpt2 = 'ade2', // int32_t
+ kKeyWMAFormatTag = 'fmtt', // int64_t
+ kKeyWMABitspersample = 'bsps', // int64_t
+ kKeyWMAVirPktSize = 'vpks', // int64_t
+ kKeyWMVProfile = 'wmvp', // int32_t
+
+ kKeyWMVVersion = 'wmvv', // int32_t
+ kKeyRVVersion = '#rvv', // int32_t
+ kKeyBlockAlign = 'ablk', // int32_t , should be different from kKeyWMABlockAlign
+
// An indication that a video buffer has been rendered.
kKeyRendered = 'rend', // bool (int32_t)
@@ -181,6 +203,13 @@ enum {
// H264 supplemental enhancement information offsets/sizes
kKeySEI = 'sei ', // raw data
+
+ kKeyPCMFormat = 'pfmt',
+ kKeyArbitraryMode = 'ArbM',
+
+ // Indicate if it is OK to hold on to the MediaBuffer and not
+ // release it immediately
+ kKeyCanDeferRelease = 'drel', // bool (int32_t)
};
enum {
@@ -190,6 +219,32 @@ enum {
kTypeD263 = 'd263',
};
+enum {
+ kTypeDivXVer_3_11,
+ kTypeDivXVer_4,
+ kTypeDivXVer_5,
+ kTypeDivXVer_6,
+};
+
+enum {
+ kTypeWMA,
+ kTypeWMAPro,
+ kTypeWMALossLess,
+};
+
+enum {
+ kTypeWMVVer_7, // WMV1
+ kTypeWMVVer_8, // WMV2
+ kTypeWMVVer_9, // WMV3
+};
+
+// http://en.wikipedia.org/wiki/RealVideo
+enum {
+ kTypeRVVer_G2, // rv20: RealVideo G2
+ kTypeRVVer_8, // rv30: RealVideo 8
+ kTypeRVVer_9, // rv40: RealVideo 9
+};
+
class MetaData : public RefBase {
public:
MetaData();
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index b0404aa..2f73de8 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -135,6 +135,9 @@ private:
EXECUTING_TO_IDLE,
IDLE_TO_LOADED,
RECONFIGURING,
+ PAUSING,
+ FLUSHING,
+ PAUSED,
ERROR
};
@@ -344,6 +347,7 @@ private:
status_t waitForBufferFilled_l();
+ status_t resumeLocked(bool drainInputBuf);
int64_t getDecodingTimeUs();
status_t parseHEVCCodecSpecificData(
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 098aa69..61f9949 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -29,9 +29,10 @@ namespace android {
*/
class SkipCutBuffer: public RefBase {
public:
- // 'skip' is the number of bytes to skip from the beginning
- // 'cut' is the number of bytes to cut from the end
- SkipCutBuffer(int32_t skip, int32_t cut);
+ // 'skip' is the number of frames to skip from the beginning
+ // 'cut' is the number of frames to cut from the end
+ // 'num16BitChannels' is the number of channels, which are assumed to be 16 bit wide each
+ SkipCutBuffer(size_t skip, size_t cut, size_t num16Channels);
// Submit one MediaBuffer for skipping and cutting. This may consume all or
// some of the data in the buffer, or it may add data to it.
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 5e9d7d4..e91c03a 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -18,6 +18,7 @@
#define UTILS_H_
+#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AString.h>
#include <stdint.h>
#include <utils/Errors.h>
@@ -85,6 +86,14 @@ void writeToAMessage(sp<AMessage> msg, const AVSyncSettings &sync, float videoFp
void readFromAMessage(
const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+audio_format_t getPCMFormat(const sp<AMessage> &format);
+
+void updateVideoTrackInfoFromESDS_MPEG4Video(sp<MetaData> meta);
+bool checkDPFromVOLHeader(const uint8_t *ptr, size_t size);
+bool checkDPFromCodecSpecificData(const uint8_t *ptr, size_t size);
+
+status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length);
+
} // namespace android
#endif // UTILS_H_
diff --git a/include/media/stagefright/WAVEWriter.h b/include/media/stagefright/WAVEWriter.h
new file mode 100644
index 0000000..766d8f4
--- /dev/null
+++ b/include/media/stagefright/WAVEWriter.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WAVE_WRITER_H_
+
+#define WAVE_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+
+namespace android {
+
+
+#define ID_RIFF 0x46464952
+#define ID_WAVE 0x45564157
+#define ID_FMT 0x20746d66
+#define ID_DATA 0x61746164
+#define FORMAT_PCM 1
+
+
+struct MediaSource;
+struct MetaData;
+
+struct wav_header {
+ uint32_t riff_id;
+ uint32_t riff_sz;
+ uint32_t riff_fmt;
+ uint32_t fmt_id;
+ uint32_t fmt_sz;
+ uint16_t audio_format;
+ uint16_t num_channels;
+ uint32_t sample_rate;
+ uint32_t byte_rate; /* sample_rate * num_channels * bps / 8 */
+ uint16_t block_align; /* num_channels * bps / 8 */
+ uint16_t bits_per_sample;
+ uint32_t data_id;
+ uint32_t data_sz;
+};
+
+
+struct WAVEWriter : public MediaWriter {
+ WAVEWriter(const char *filename);
+ WAVEWriter(int fd);
+
+ status_t initCheck() const;
+
+ virtual status_t addSource(const sp<MediaSource> &source);
+ virtual bool reachedEOS();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual status_t pause();
+
+protected:
+ virtual ~WAVEWriter();
+
+private:
+ int mFd;
+ status_t mInitCheck;
+ sp<MediaSource> mSource;
+ bool mStarted;
+ volatile bool mPaused;
+ volatile bool mResumed;
+ volatile bool mDone;
+ volatile bool mReachedEOS;
+ pthread_t mThread;
+ int64_t mEstimatedSizeBytes;
+ int64_t mEstimatedDurationUs;
+
+ static void *ThreadWrapper(void *);
+ status_t threadFunc();
+ bool exceedsFileSizeLimit();
+ bool exceedsFileDurationLimit();
+
+ WAVEWriter(const WAVEWriter &);
+ WAVEWriter &operator=(const WAVEWriter &);
+};
+
+} // namespace android
+
+#endif // WAVE_WRITER_H_
diff --git a/media/libavextensions/Android.mk b/media/libavextensions/Android.mk
new file mode 100644
index 0000000..3370116
--- /dev/null
+++ b/media/libavextensions/Android.mk
@@ -0,0 +1,101 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ stagefright/ExtendedMediaDefs.cpp \
+ stagefright/AVUtils.cpp \
+ stagefright/AVFactory.cpp \
+
+LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/include/media/ \
+ $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/native/include/media/hardware \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/external/flac/include \
+ $(TOP)/$(call project-path-for,qcom-media)/mm-core/inc \
+ $(TOP)/frameworks/av/media/libstagefright \
+
+LOCAL_CFLAGS += -Wno-multichar -Werror
+
+LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio
+
+ifeq ($(TARGET_ENABLE_QC_AV_ENHANCEMENTS),true)
+ LOCAL_CFLAGS += -DENABLE_AV_ENHANCEMENTS
+endif
+
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_FLAC_OFFLOAD)),true)
+ LOCAL_CFLAGS += -DFLAC_OFFLOAD_ENABLED
+endif
+
+LOCAL_MODULE:= libavextensions
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
+
+########################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ media/AVMediaUtils.cpp \
+
+LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/include/media/ \
+ $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/native/include/media/hardware \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/external/flac/include \
+ $(TOP)/system/media/audio_utils/include \
+ $(TOP)/$(call project-path-for,qcom-media)/mm-core/inc
+
+LOCAL_CFLAGS += -Wno-multichar -Werror
+
+ifeq ($(TARGET_ENABLE_QC_AV_ENHANCEMENTS),true)
+ LOCAL_CFLAGS += -DENABLE_AV_ENHANCEMENTS
+endif
+
+LOCAL_MODULE:= libavmediaextentions
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
+
+########################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ mediaplayerservice/AVMediaServiceFactory.cpp \
+ mediaplayerservice/AVMediaServiceUtils.cpp \
+ mediaplayerservice/AVNuFactory.cpp \
+ mediaplayerservice/AVNuUtils.cpp \
+
+LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/include/media/ \
+ $(TOP)/frameworks/av/media/libmediaplayerservice \
+ $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/av/media/libstagefright/include \
+ $(TOP)/frameworks/av/media/libstagefright/rtsp \
+ $(TOP)/frameworks/native/include/media/hardware \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/external/flac/include \
+ $(TOP)/system/media/audio_utils/include \
+ $(TOP)/$(call project-path-for,qcom-media)/mm-core/inc
+
+LOCAL_CFLAGS += -Wno-multichar -Werror
+
+ifeq ($(TARGET_ENABLE_QC_AV_ENHANCEMENTS),true)
+ LOCAL_CFLAGS += -DENABLE_AV_ENHANCEMENTS
+endif
+
+ifeq ($(TARGET_BOARD_PLATFORM),msm8974)
+ LOCAL_CFLAGS += -DTARGET_8974
+endif
+
+LOCAL_MODULE:= libavmediaserviceextensions
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/media/libavextensions/common/AVExtensionsCommon.h b/media/libavextensions/common/AVExtensionsCommon.h
new file mode 100644
index 0000000..c39872e
--- /dev/null
+++ b/media/libavextensions/common/AVExtensionsCommon.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _AV_EXTENSIONS_COMMON_H_
+#define _AV_EXTENSIONS_COMMON_H_
+
+namespace android {
+
+typedef void *(*createFunction_t)(void);
+
+template <typename T>
+struct ExtensionsLoader {
+
+ static T *createInstance(const char *createFunctionName);
+
+private:
+ static void loadLib();
+ static createFunction_t loadCreateFunction(const char *createFunctionName);
+ static void *mLibHandle;
+};
+
+/*
+ * Boiler-plate to declare the class as a singleton (with a static getter)
+ * which can be loaded (dlopen'd) via ExtensionsLoader
+ */
+#define DECLARE_LOADABLE_SINGLETON(className) \
+protected: \
+ className(); \
+ virtual ~className(); \
+ static className *sInst; \
+private: \
+ className(const className&); \
+ className &operator=(className &); \
+public: \
+ static className *get() { \
+ return sInst; \
+ } \
+ friend struct ExtensionsLoader<className>;
+
+} //namespace android
+
+#endif // _AV_EXTENSIONS_COMMON_H_
diff --git a/media/libavextensions/common/ExtensionsLoader.hpp b/media/libavextensions/common/ExtensionsLoader.hpp
new file mode 100644
index 0000000..15728ed
--- /dev/null
+++ b/media/libavextensions/common/ExtensionsLoader.hpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <dlfcn.h>
+#include <common/AVExtensionsCommon.h>
+#include <cutils/properties.h>
+
+namespace android {
+
+static const char * CUSTOMIZATION_LIB_NAME = "libavenhancements.so";
+
+/*
+ * Create strongly-typed objects of type T
+ * If the customization library exists and does contain a "named" constructor,
+ * invoke and create an instance
+ * Else create the object of type T itself
+ *
+ * Contains a static instance to dlopen'd library, But may end up
+ * opening the library mutiple times. Following snip from dlopen man page is
+ * reassuring "...Only a single copy of an object file is brought into the
+ * address space, even if dlopen() is invoked multiple times in reference to
+ * the file, and even if different pathnames are used to reference the file.."
+ */
+
+template <typename T>
+T *ExtensionsLoader<T>::createInstance(const char *createFunctionName) {
+ (void)createFunctionName;
+ // create extended object if extensions-lib is available and
+ // AV_ENHANCEMENTS is enabled
+#if ENABLE_AV_ENHANCEMENTS
+ bool enabled = property_get_bool("media.avenhancements.enabled", false);
+ if (enabled) {
+ createFunction_t createFunc = loadCreateFunction(createFunctionName);
+ if (createFunc) {
+ return reinterpret_cast<T *>((*createFunc)());
+ }
+ }
+#endif
+ // Else, create the default object
+ return new T;
+}
+
+template <typename T>
+void ExtensionsLoader<T>::loadLib() {
+ if (!mLibHandle) {
+ mLibHandle = ::dlopen(CUSTOMIZATION_LIB_NAME, RTLD_LAZY);
+ if (!mLibHandle) {
+ ALOGV("%s", dlerror());
+ return;
+ }
+ ALOGV("Opened %s", CUSTOMIZATION_LIB_NAME);
+ }
+}
+
+template <typename T>
+createFunction_t ExtensionsLoader<T>::loadCreateFunction(const char *createFunctionName) {
+ loadLib();
+ if (!mLibHandle) {
+ return NULL;
+ }
+ createFunction_t func = (createFunction_t)dlsym(mLibHandle, createFunctionName);
+ if (!func) {
+ ALOGW("symbol %s not found: %s",createFunctionName, dlerror());
+ }
+ return func;
+}
+
+//static
+template <typename T>
+void *ExtensionsLoader<T>::mLibHandle = NULL;
+
+} //namespace android
diff --git a/media/libavextensions/media/AVMediaExtensions.h b/media/libavextensions/media/AVMediaExtensions.h
new file mode 100644
index 0000000..9622253
--- /dev/null
+++ b/media/libavextensions/media/AVMediaExtensions.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AV_MEDIA_EXTENSIONS_H_
+#define _AV_MEDIA_EXTENSIONS_H_
+
+#include <common/AVExtensionsCommon.h>
+#include <hardware/audio.h>
+#include <media/AudioTrack.h>
+#include <audio_utils/format.h>
+
+namespace android {
+
+class MediaRecorder;
+class Parcel;
+/*
+ * Common delegate to the classes in libstagefright
+ */
+struct AVMediaUtils {
+
+ virtual bool AudioTrackIsPcmOffloaded(const audio_format_t format) {
+ return audio_is_offload_pcm(format);
+ }
+
+ virtual status_t AudioTrackGetPosition(AudioTrack* track,
+ uint32_t* position) {
+ uint32_t tempPos = (track->mState == AudioTrack::STATE_STOPPED ||
+ track->mState == AudioTrack::STATE_FLUSHED) ? 0 :
+ track->updateAndGetPosition_l();
+ *position = (tempPos /
+ (track->channelCount() * audio_bytes_per_sample(track->format())));
+ return NO_ERROR;
+ }
+
+ virtual status_t AudioTrackGetTimestamp(AudioTrack* track,
+ AudioTimestamp* timestamp) {
+ if (!AudioTrackIsPcmOffloaded(track->format())) {
+ return NO_INIT;
+ }
+ uint32_t tempPos = (track->mState == AudioTrack::STATE_STOPPED ||
+ track->mState == AudioTrack::STATE_FLUSHED) ? 0 :
+ track->updateAndGetPosition_l();
+ timestamp->mPosition = (tempPos / (track->channelCount() *
+ audio_bytes_per_sample(track->format())));
+ clock_gettime(CLOCK_MONOTONIC, &timestamp->mTime);
+ return NO_ERROR;
+ }
+
+ virtual size_t AudioTrackGetOffloadFrameCount(size_t frameCount) {
+ return frameCount * 2;
+ }
+
+ virtual bool AudioTrackIsTrackOffloaded(audio_io_handle_t /*output*/) {
+ return false;
+ }
+
+ virtual sp<MediaRecorder> createMediaRecorder(const String16& opPackageName);
+ virtual void writeCustomData(
+ Parcel * /* reply */, void * /* buffer_data */) {}
+ virtual void readCustomData(
+ const Parcel * /* reply */, void ** /*buffer_data */ ) {}
+ virtual void closeFileDescriptor(void * /* buffer_ptr */) {}
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVMediaUtils);
+};
+
+} //namespace android
+
+#endif //_AV_MEDIA_EXTENSIONS_H_
+
diff --git a/media/libavextensions/media/AVMediaUtils.cpp b/media/libavextensions/media/AVMediaUtils.cpp
new file mode 100644
index 0000000..0f9e9eb
--- /dev/null
+++ b/media/libavextensions/media/AVMediaUtils.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AVMediaUtils"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/ACodec.h>
+
+#include <media/AudioTrack.h>
+#include <media/mediarecorder.h>
+
+#include "common/ExtensionsLoader.hpp"
+#include "media/AVMediaExtensions.h"
+
+namespace android {
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVMediaUtils::AVMediaUtils() {
+}
+
+AVMediaUtils::~AVMediaUtils() {
+}
+
+sp<MediaRecorder> AVMediaUtils::createMediaRecorder(const String16& opPackageName) {
+ return new MediaRecorder(opPackageName);
+}
+
+//static
+AVMediaUtils *AVMediaUtils::sInst =
+ ExtensionsLoader<AVMediaUtils>::createInstance("createExtendedMediaUtils");
+
+} //namespace android
+
diff --git a/media/libavextensions/mediaplayerservice/AVMediaServiceExtensions.h b/media/libavextensions/mediaplayerservice/AVMediaServiceExtensions.h
new file mode 100644
index 0000000..f2c789e
--- /dev/null
+++ b/media/libavextensions/mediaplayerservice/AVMediaServiceExtensions.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _AV_MEDIA_SERVICE_EXTENSIONS_H_
+#define _AV_MEDIA_SERVICE_EXTENSIONS_H_
+
+#include <common/AVExtensionsCommon.h>
+#include <MediaPlayerFactory.h>
+
+#include <utils/List.h>
+#include <utils/RefBase.h>
+#include <utils/String16.h>
+
+#include <media/Metadata.h>
+
+namespace android {
+
+struct StagefrightRecorder;
+struct ARTSPConnection;
+struct ARTPConnection;
+struct AString;
+struct MyHandler;
+struct ABuffer;
+
+/*
+ * Factory to create objects of base-classes in libmediaplayerservice
+ */
+struct AVMediaServiceFactory {
+ virtual StagefrightRecorder *createStagefrightRecorder(const String16 &);
+
+ // RTSP extensions
+ virtual sp<ARTSPConnection> createARTSPConnection(bool uidValid, uid_t uid);
+ virtual sp<ARTPConnection> createARTPConnection();
+
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVMediaServiceFactory);
+};
+
+/*
+ * Common delegate to the classes in libmediaplayerservice
+ */
+struct AVMediaServiceUtils {
+ virtual void getDashPlayerFactory(MediaPlayerFactory::IFactory *&, player_type ) {}
+ // RTSP IPV6 utils
+ virtual bool pokeAHole(sp<MyHandler> handler, int rtpSocket, int rtcpSocket,
+ const AString &transport, const AString &sessionHost);
+ virtual void makePortPair(int *rtpSocket, int *rtcpSocket, unsigned *rtpPort,
+ bool isIPV6);
+ virtual const char* parseURL(AString *host);
+ // RTSP customization utils
+ virtual bool parseTrackURL(AString url, AString val);
+ virtual void appendRange(AString *request);
+ virtual void setServerTimeoutUs(int64_t timeout);
+ virtual void appendMeta(media::Metadata *meta);
+ virtual bool checkNPTMapping(uint32_t *rtpInfoTime, int64_t *playTimeUs,
+ bool *nptValid, uint32_t rtpTime);
+ virtual void addH263AdvancedPacket(const sp<ABuffer> &buffer,
+ List<sp<ABuffer>> *packets, uint32_t rtpTime);
+ virtual bool parseNTPRange(const char *s, float *npt1, float *npt2);
+
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVMediaServiceUtils);
+};
+
+}
+
+#endif // _AV_EXTENSIONS__H_
diff --git a/media/libavextensions/mediaplayerservice/AVMediaServiceFactory.cpp b/media/libavextensions/mediaplayerservice/AVMediaServiceFactory.cpp
new file mode 100644
index 0000000..10b66f5
--- /dev/null
+++ b/media/libavextensions/mediaplayerservice/AVMediaServiceFactory.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define LOG_TAG "AVMediaServiceFactory"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include "ARTPConnection.h"
+#include "ARTSPConnection.h"
+
+#include "MediaRecorderClient.h"
+#include "MediaPlayerService.h"
+#include "StagefrightRecorder.h"
+
+#include "common/ExtensionsLoader.hpp"
+#include "mediaplayerservice/AVMediaServiceExtensions.h"
+
+namespace android {
+StagefrightRecorder *AVMediaServiceFactory::createStagefrightRecorder(
+ const String16 &opPackageName) {
+ return new StagefrightRecorder(opPackageName);
+}
+
+sp<ARTSPConnection> AVMediaServiceFactory::createARTSPConnection(
+ bool uidValid, uid_t uid) {
+ return new ARTSPConnection(uidValid, uid);
+}
+
+sp<ARTPConnection> AVMediaServiceFactory::createARTPConnection() {
+ return new ARTPConnection();
+}
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVMediaServiceFactory::AVMediaServiceFactory() {
+}
+
+AVMediaServiceFactory::~AVMediaServiceFactory() {
+}
+
+//static
+AVMediaServiceFactory *AVMediaServiceFactory::sInst =
+ ExtensionsLoader<AVMediaServiceFactory>::createInstance("createExtendedMediaServiceFactory");
+
+} //namespace android
+
diff --git a/media/libavextensions/mediaplayerservice/AVMediaServiceUtils.cpp b/media/libavextensions/mediaplayerservice/AVMediaServiceUtils.cpp
new file mode 100644
index 0000000..5306a39
--- /dev/null
+++ b/media/libavextensions/mediaplayerservice/AVMediaServiceUtils.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2013 - 2016, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define LOG_TAG "AVMediaServiceUtils"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include "ARTPConnection.h"
+#include "ASessionDescription.h"
+#include "MyHandler.h"
+
+#include "common/ExtensionsLoader.hpp"
+#include "mediaplayerservice/AVMediaServiceExtensions.h"
+
+namespace android {
+
+bool AVMediaServiceUtils::pokeAHole(sp<MyHandler> handler, int rtpSocket, int rtcpSocket,
+ const AString &transport, const AString &/*sessionHost*/) {
+ if (handler == NULL) {
+ ALOGW("MyHandler is NULL");
+ return false;
+ }
+ return handler->pokeAHole(rtpSocket, rtcpSocket, transport);
+}
+
+void AVMediaServiceUtils::makePortPair(int *rtpSocket, int *rtcpSocket, unsigned *rtpPort,
+ bool /*isIPV6*/) {
+ return ARTPConnection::MakePortPair(rtpSocket, rtcpSocket, rtpPort);
+}
+
+const char* AVMediaServiceUtils::parseURL(AString *host) {
+ return strchr(host->c_str(), ':');
+}
+
+bool AVMediaServiceUtils::parseTrackURL(AString /*url*/, AString /*val*/) {
+ return false;
+}
+
+void AVMediaServiceUtils::appendRange(AString * /*request*/) {
+ return;
+}
+
+void AVMediaServiceUtils::setServerTimeoutUs(int64_t /*timeout*/) {
+ return;
+}
+
+void AVMediaServiceUtils::addH263AdvancedPacket(const sp<ABuffer> &/*buffer*/,
+ List<sp<ABuffer>> * /*packets*/, uint32_t /*rtpTime*/) {
+ return;
+}
+
+void AVMediaServiceUtils::appendMeta(media::Metadata * /*meta*/) {
+ return;
+}
+
+bool AVMediaServiceUtils::checkNPTMapping(uint32_t * /*rtpInfoTime*/, int64_t * /*playTimeUs*/,
+ bool * /*nptValid*/, uint32_t /*rtpTime*/) {
+ return false;
+}
+
+bool AVMediaServiceUtils::parseNTPRange(const char *s, float *npt1, float *npt2) {
+ return ASessionDescription::parseNTPRange(s, npt1, npt2);
+}
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVMediaServiceUtils::AVMediaServiceUtils() {
+}
+
+AVMediaServiceUtils::~AVMediaServiceUtils() {
+}
+
+//static
+AVMediaServiceUtils *AVMediaServiceUtils::sInst =
+ ExtensionsLoader<AVMediaServiceUtils>::createInstance("createExtendedMediaServiceUtils");
+
+} //namespace android
+
diff --git a/media/libavextensions/mediaplayerservice/AVNuExtensions.h b/media/libavextensions/mediaplayerservice/AVNuExtensions.h
new file mode 100644
index 0000000..2fe56b8
--- /dev/null
+++ b/media/libavextensions/mediaplayerservice/AVNuExtensions.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _AV_NU_EXTENSIONS_H_
+#define _AV_NU_EXTENSIONS_H_
+
+#include <common/AVExtensionsCommon.h>
+
+namespace android {
+
+struct NuPlayer;
+/*
+ * Factory to create extended NuPlayer objects
+ */
+struct AVNuFactory {
+ virtual sp<NuPlayer> createNuPlayer(pid_t pid);
+
+ virtual sp<NuPlayer::DecoderBase> createPassThruDecoder(
+ const sp<AMessage> &notify,
+ const sp<NuPlayer::Source> &source,
+ const sp<NuPlayer::Renderer> &renderer);
+
+ virtual sp<NuPlayer::DecoderBase> createDecoder(
+ const sp<AMessage> &notify,
+ const sp<NuPlayer::Source> &source,
+ pid_t pid,
+ const sp<NuPlayer::Renderer> &renderer);
+
+ virtual sp<NuPlayer::Renderer> createRenderer(
+ const sp<MediaPlayerBase::AudioSink> &sink,
+ const sp<AMessage> &notify,
+ uint32_t flags);
+
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVNuFactory);
+};
+
+/*
+ * Common delegate to the classes in NuPlayer
+ */
+struct AVNuUtils {
+
+ virtual sp<MetaData> createPCMMetaFromSource(const sp<MetaData> &);
+ virtual bool pcmOffloadException(const sp<MetaData> &);
+ virtual bool isRAWFormat(const sp<MetaData> &);
+ virtual bool isRAWFormat(const sp<AMessage> &);
+ virtual bool isVorbisFormat(const sp<MetaData> &);
+ virtual int updateAudioBitWidth(audio_format_t audioFormat,
+ const sp<AMessage> &);
+ virtual audio_format_t getKeyPCMFormat(const sp<MetaData> &);
+ virtual void setKeyPCMFormat(const sp<MetaData> &, audio_format_t audioFormat);
+ virtual audio_format_t getPCMFormat(const sp<AMessage> &);
+ virtual void setPCMFormat(const sp<AMessage> &, audio_format_t audioFormat);
+ virtual void setSourcePCMFormat(const sp<MetaData> &);
+ virtual void setDecodedPCMFormat(const sp<AMessage> &);
+ virtual status_t convertToSinkFormatIfNeeded(const sp<ABuffer> &, sp<ABuffer> &,
+ audio_format_t sinkFormat, bool isOffload);
+#ifndef TARGET_8974
+ virtual uint32_t getFlags();
+ virtual bool canUseSetBuffers(const sp<MetaData> &Meta);
+#endif
+ virtual void printFileName(int fd);
+ virtual void checkFormatChange(bool *formatChange, const sp<ABuffer> &accessUnit);
+#ifdef TARGET_8974
+ virtual void addFlagsInMeta(const sp<ABuffer> &buffer, int32_t flags, bool isAudio);
+ virtual uint32_t getFlags();
+ virtual bool canUseSetBuffers(const sp<MetaData> &Meta);
+#endif
+ virtual bool dropCorruptFrame();
+
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVNuUtils);
+};
+
+}
+
+#endif // _AV_EXTENSIONS__H_
diff --git a/media/libavextensions/mediaplayerservice/AVNuFactory.cpp b/media/libavextensions/mediaplayerservice/AVNuFactory.cpp
new file mode 100644
index 0000000..ff7c074
--- /dev/null
+++ b/media/libavextensions/mediaplayerservice/AVNuFactory.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AVNuFactory"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <nuplayer/NuPlayer.h>
+#include <nuplayer/NuPlayerDecoderBase.h>
+#include <nuplayer/NuPlayerDecoderPassThrough.h>
+#include <nuplayer/NuPlayerDecoder.h>
+#include <nuplayer/NuPlayerCCDecoder.h>
+#include <gui/Surface.h>
+#include <nuplayer/NuPlayerSource.h>
+#include <nuplayer/NuPlayerRenderer.h>
+
+#include "common/ExtensionsLoader.hpp"
+#include "mediaplayerservice/AVNuExtensions.h"
+
+namespace android {
+
+sp<NuPlayer> AVNuFactory::createNuPlayer(pid_t pid) {
+ return new NuPlayer(pid);
+}
+
+sp<NuPlayer::DecoderBase> AVNuFactory::createPassThruDecoder(
+ const sp<AMessage> &notify,
+ const sp<NuPlayer::Source> &source,
+ const sp<NuPlayer::Renderer> &renderer) {
+ return new NuPlayer::DecoderPassThrough(notify, source, renderer);
+}
+
+sp<NuPlayer::DecoderBase> AVNuFactory::createDecoder(
+ const sp<AMessage> &notify,
+ const sp<NuPlayer::Source> &source,
+ pid_t pid,
+ const sp<NuPlayer::Renderer> &renderer) {
+ return new NuPlayer::Decoder(notify, source, pid, renderer);
+}
+
+sp<NuPlayer::Renderer> AVNuFactory::createRenderer(
+ const sp<MediaPlayerBase::AudioSink> &sink,
+ const sp<AMessage> &notify,
+ uint32_t flags) {
+ return new NuPlayer::Renderer(sink, notify, flags);
+}
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVNuFactory::AVNuFactory() {
+}
+
+AVNuFactory::~AVNuFactory() {
+}
+
+//static
+AVNuFactory *AVNuFactory::sInst =
+ ExtensionsLoader<AVNuFactory>::createInstance("createExtendedNuFactory");
+
+} //namespace android
+
diff --git a/media/libavextensions/mediaplayerservice/AVNuUtils.cpp b/media/libavextensions/mediaplayerservice/AVNuUtils.cpp
new file mode 100644
index 0000000..773a098
--- /dev/null
+++ b/media/libavextensions/mediaplayerservice/AVNuUtils.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define LOG_TAG "AVNuUtils"
+#include <utils/Log.h>
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/OMXCodec.h>
+#include <cutils/properties.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/MediaProfiles.h>
+#include <media/stagefright/Utils.h>
+
+#include <audio_utils/format.h>
+
+#include <nuplayer/NuPlayer.h>
+#include <nuplayer/NuPlayerDecoderBase.h>
+#include <nuplayer/NuPlayerDecoderPassThrough.h>
+#include <nuplayer/NuPlayerSource.h>
+#include <nuplayer/NuPlayerRenderer.h>
+
+#include "common/ExtensionsLoader.hpp"
+#include "mediaplayerservice/AVNuExtensions.h"
+
+namespace android {
+
+static bool is24bitPCMOffloadEnabled() {
+ return property_get_bool("audio.offload.pcm.24bit.enable", false);
+}
+
+static bool is16bitPCMOffloadEnabled() {
+ return property_get_bool("audio.offload.pcm.16bit.enable", false);
+}
+
+sp<MetaData> AVNuUtils::createPCMMetaFromSource(const sp<MetaData> &sMeta) {
+ sp<MetaData> tPCMMeta = new MetaData;
+ //hard code as RAW
+ tPCMMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+
+ int32_t bits = 16;
+ sMeta->findInt32(kKeyBitsPerSample, &bits);
+ tPCMMeta->setInt32(kKeyBitsPerSample, bits > 24 ? 24 : bits);
+
+ if (sMeta == NULL) {
+ ALOGW("no meta returning dummy meta");
+ return tPCMMeta;
+ }
+
+ int32_t srate = -1;
+ if (!sMeta->findInt32(kKeySampleRate, &srate)) {
+ ALOGV("No sample rate");
+ }
+ tPCMMeta->setInt32(kKeySampleRate, srate);
+
+ int32_t cmask = 0;
+ if (!sMeta->findInt32(kKeyChannelMask, &cmask) || (cmask == 0)) {
+ ALOGI("No channel mask, try channel count");
+ }
+ int32_t channelCount = 0;
+ if (!sMeta->findInt32(kKeyChannelCount, &channelCount)) {
+ ALOGI("No channel count either");
+ } else {
+ //if channel mask is not set till now, use channel count
+ //to retrieve channel count
+ if (!cmask) {
+ cmask = audio_channel_out_mask_from_count(channelCount);
+ }
+ }
+ tPCMMeta->setInt32(kKeyChannelCount, channelCount);
+ tPCMMeta->setInt32(kKeyChannelMask, cmask);
+
+ int64_t duration = INT_MAX;
+ if (!sMeta->findInt64(kKeyDuration, &duration)) {
+ ALOGW("No duration in meta setting max duration");
+ }
+ tPCMMeta->setInt64(kKeyDuration, duration);
+
+ int32_t bitRate = -1;
+ if (!sMeta->findInt32(kKeyBitRate, &bitRate)) {
+ ALOGW("No bitrate info");
+ } else {
+ tPCMMeta->setInt32(kKeyBitRate, bitRate);
+ }
+
+ return tPCMMeta;
+}
+
+bool AVNuUtils::pcmOffloadException(const sp<MetaData> &meta) {
+ bool decision = false;
+ const char *mime = {0};
+
+ if (meta == NULL) {
+ return true;
+ }
+ meta->findCString(kKeyMIMEType, &mime);
+
+ if (!mime) {
+ ALOGV("%s: no audio mime present, ignoring pcm offload", __func__);
+ return true;
+ }
+
+ if (!is24bitPCMOffloadEnabled() && !is16bitPCMOffloadEnabled()) {
+ return true;
+ }
+
+ const char * const ExceptionTable[] = {
+ MEDIA_MIMETYPE_AUDIO_AMR_NB,
+ MEDIA_MIMETYPE_AUDIO_AMR_WB,
+ MEDIA_MIMETYPE_AUDIO_QCELP,
+ MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+ MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+ MEDIA_MIMETYPE_AUDIO_EVRC
+ };
+ int countException = (sizeof(ExceptionTable) / sizeof(ExceptionTable[0]));
+
+ for(int i = 0; i < countException; i++) {
+ if (!strcasecmp(mime, ExceptionTable[i])) {
+ decision = true;
+ break;
+ }
+ }
+ ALOGI("decision %d mime %s", decision, mime);
+ return decision;
+#if 0
+ //if PCM offload flag is disabled, do not offload any sessions
+ //using pcm offload
+ decision = true;
+ ALOGI("decision %d mime %s", decision, mime);
+ return decision;
+#endif
+}
+
+bool AVNuUtils::isRAWFormat(const sp<MetaData> &meta) {
+ const char *mime = {0};
+ if (meta == NULL) {
+ return false;
+ }
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ if (!strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW, 9))
+ return true;
+ else
+ return false;
+}
+
+bool AVNuUtils::isRAWFormat(const sp<AMessage> &format) {
+ AString mime;
+ if (format == NULL) {
+ return false;
+ }
+ CHECK(format->findString("mime", &mime));
+ if (!strncasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_RAW, 9))
+ return true;
+ else
+ return false;
+
+}
+
+bool AVNuUtils::isVorbisFormat(const sp<MetaData> &) {
+ return false;
+}
+
+int AVNuUtils::updateAudioBitWidth(audio_format_t audioFormat,
+ const sp<AMessage> &format){
+ int bits = 16;
+ if (format.get() &&
+ (audio_is_linear_pcm(audioFormat) || audio_is_offload_pcm(audioFormat))) {
+ bits = audio_bytes_per_sample(audioFormat) * 8;
+ format->setInt32("bits-per-sample", bits);
+ }
+ return bits;
+}
+
+audio_format_t AVNuUtils::getKeyPCMFormat(const sp<MetaData> &meta) {
+ audio_format_t pcmFormat = AUDIO_FORMAT_INVALID;
+ if (meta.get())
+ meta->findInt32('pfmt', (int32_t *)&pcmFormat);
+ return pcmFormat;
+}
+
+void AVNuUtils::setKeyPCMFormat(const sp<MetaData> &meta, audio_format_t audioFormat) {
+ if (meta.get() && audio_is_linear_pcm(audioFormat))
+ meta->setInt32('pfmt', audioFormat);
+}
+
+audio_format_t AVNuUtils::getPCMFormat(const sp<AMessage> &format) {
+ audio_format_t pcmFormat = AUDIO_FORMAT_INVALID;
+ if (format.get())
+ format->findInt32("pcm-format", (int32_t *)&pcmFormat);
+ return pcmFormat;
+}
+
+void AVNuUtils::setPCMFormat(const sp<AMessage> &format, audio_format_t audioFormat) {
+ if (format.get() &&
+ (audio_is_linear_pcm(audioFormat) || audio_is_offload_pcm(audioFormat)))
+ format->setInt32("pcm-format", audioFormat);
+}
+
+void AVNuUtils::setSourcePCMFormat(const sp<MetaData> &audioMeta) {
+ if (!audioMeta.get() || !isRAWFormat(audioMeta))
+ return;
+
+ audio_format_t pcmFormat = getKeyPCMFormat(audioMeta);
+ if (pcmFormat == AUDIO_FORMAT_INVALID) {
+ int32_t bits = 16;
+ if (audioMeta->findInt32(kKeyBitsPerSample, &bits)) {
+ if (bits == 8)
+ pcmFormat = AUDIO_FORMAT_PCM_8_BIT;
+ else if (bits == 24)
+ pcmFormat = AUDIO_FORMAT_PCM_32_BIT;
+ else if (bits == 32)
+ pcmFormat = AUDIO_FORMAT_PCM_FLOAT;
+ else
+ pcmFormat = AUDIO_FORMAT_PCM_16_BIT;
+ setKeyPCMFormat(audioMeta, pcmFormat);
+ }
+ }
+}
+
+void AVNuUtils::setDecodedPCMFormat(const sp<AMessage> &) {
+
+}
+
+status_t AVNuUtils::convertToSinkFormatIfNeeded(
+ const sp<ABuffer> &buffer, sp<ABuffer> &newBuffer,
+ audio_format_t sinkFormat, bool isOffload) {
+
+ audio_format_t srcFormat = AUDIO_FORMAT_INVALID;
+ if (!isOffload
+ || !audio_is_offload_pcm(sinkFormat)
+ || !buffer->meta()->findInt32("pcm-format", (int32_t *)&srcFormat)
+ || ((int32_t)srcFormat < 0)) {
+ newBuffer = buffer;
+ return OK;
+ }
+
+ size_t bps = audio_bytes_per_sample(srcFormat);
+
+ if (bps <= 0) {
+ ALOGE("Invalid pcmformat %x given for conversion", srcFormat);
+ return INVALID_OPERATION;
+ }
+
+ size_t frames = buffer->size() / bps;
+
+ if (frames == 0) {
+ ALOGE("zero sized buffer, nothing to convert");
+ return BAD_VALUE;
+ }
+
+ ALOGV("convert %zu bytes (frames %zu) of format %x",
+ buffer->size(), frames, srcFormat);
+
+ audio_format_t dstFormat;
+ switch (sinkFormat) {
+ case AUDIO_FORMAT_PCM_16_BIT_OFFLOAD:
+ dstFormat = AUDIO_FORMAT_PCM_16_BIT;
+ break;
+ case AUDIO_FORMAT_PCM_24_BIT_OFFLOAD:
+ if (srcFormat == AUDIO_FORMAT_PCM_32_BIT)
+ dstFormat = AUDIO_FORMAT_PCM_32_BIT;
+ else
+ dstFormat = AUDIO_FORMAT_PCM_24_BIT_OFFLOAD;
+ break;
+ case AUDIO_FORMAT_DEFAULT:
+ ALOGI("OffloadInfo not yet initialized, retry");
+ return NO_INIT;
+ default:
+ ALOGE("Invalid offload format %x given for conversion",
+ sinkFormat);
+ return INVALID_OPERATION;
+ }
+ if (srcFormat == dstFormat) {
+ newBuffer = buffer;
+ return OK;
+ }
+
+ size_t dstFrameSize = audio_bytes_per_sample(dstFormat);
+ size_t dstBytes = frames * dstFrameSize;
+
+ newBuffer = new ABuffer(dstBytes);
+
+ memcpy_by_audio_format(newBuffer->data(), dstFormat,
+ buffer->data(), srcFormat, frames);
+
+ ALOGV("convert to format %x newBuffer->size() %zu",
+ dstFormat, newBuffer->size());
+
+ // copy over some meta info
+ int64_t timeUs = 0;
+ buffer->meta()->findInt64("timeUs", &timeUs);
+ newBuffer->meta()->setInt64("timeUs", timeUs);
+
+ int32_t eos = false;
+ buffer->meta()->findInt32("eos", &eos);
+ newBuffer->meta()->setInt32("eos", eos);
+
+ newBuffer->meta()->setInt32("pcm-format", (int32_t)dstFormat);
+ return OK;
+}
+
+void AVNuUtils::printFileName(int) {}
+
+void AVNuUtils::checkFormatChange(bool * /*formatChange*/,
+ const sp<ABuffer> & /*accessUnit*/) {
+}
+
+#ifdef TARGET_8974
+void AVNuUtils::addFlagsInMeta(const sp<ABuffer> & /*buffer*/,
+ int32_t /*flags*/, bool /*isAudio*/) {
+}
+#endif
+
+uint32_t AVNuUtils::getFlags() {
+ return 0;
+}
+
+bool AVNuUtils::canUseSetBuffers(const sp<MetaData> &/*Meta*/) {
+ return false;
+}
+
+bool AVNuUtils::dropCorruptFrame() { return false; }
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVNuUtils::AVNuUtils() {}
+
+AVNuUtils::~AVNuUtils() {}
+
+//static
+AVNuUtils *AVNuUtils::sInst =
+ ExtensionsLoader<AVNuUtils>::createInstance("createExtendedNuUtils");
+
+} //namespace android
+
diff --git a/media/libavextensions/stagefright/AVExtensions.h b/media/libavextensions/stagefright/AVExtensions.h
new file mode 100644
index 0000000..c4c9aae
--- /dev/null
+++ b/media/libavextensions/stagefright/AVExtensions.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _AV_EXTENSIONS_H_
+#define _AV_EXTENSIONS_H_
+
+#include <media/stagefright/DataSource.h>
+#include <common/AVExtensionsCommon.h>
+#include <system/audio.h>
+#include <camera/ICamera.h>
+#include <media/mediarecorder.h>
+#include <media/IOMX.h>
+#include <media/AudioParameter.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+class MediaExtractor;
+class MPEG4Writer;
+struct ABuffer;
+struct ACodec;
+struct ALooper;
+struct IMediaHTTPConnection;
+struct MediaCodec;
+struct MediaHTTP;
+struct NuCachedSource2;
+class CameraParameters;
+class MediaBuffer;
+struct AudioSource;
+class CameraSource;
+class CameraSourceTimeLapse;
+class ICamera;
+class ICameraRecordingProxy;
+class String16;
+class IGraphicBufferProducer;
+struct Size;
+class MPEG4Writer;
+
+/*
+ * Factory to create objects of base-classes in libstagefright
+ */
+struct AVFactory {
+ virtual sp<ACodec> createACodec();
+ virtual MediaExtractor* createExtendedExtractor(
+ const sp<DataSource> &source, const char *mime,
+ const sp<AMessage> &meta, const uint32_t flags);
+ virtual sp<MediaExtractor> updateExtractor(
+ sp<MediaExtractor> ext, const sp<DataSource> &source,
+ const char *mime, const sp<AMessage> &meta, const uint32_t flags);
+ virtual sp<NuCachedSource2> createCachedSource(
+ const sp<DataSource> &source,
+ const char *cacheConfig = NULL,
+ bool disconnectAtHighwatermark = false);
+ virtual MediaHTTP* createMediaHTTP(
+ const sp<IMediaHTTPConnection> &conn);
+
+ virtual AudioSource* createAudioSource(
+ audio_source_t inputSource,
+ const String16 &opPackageName,
+ uint32_t sampleRate,
+ uint32_t channels,
+ uint32_t outSampleRate = 0);
+
+ virtual CameraSource *CreateCameraSourceFromCamera(
+ const sp<ICamera> &camera,
+ const sp<ICameraRecordingProxy> &proxy,
+ int32_t cameraId,
+ const String16& clientName,
+ uid_t clientUid,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<IGraphicBufferProducer>& surface,
+ bool storeMetaDataInVideoBuffers = true);
+
+ virtual CameraSourceTimeLapse *CreateCameraSourceTimeLapseFromCamera(
+ const sp<ICamera> &camera,
+ const sp<ICameraRecordingProxy> &proxy,
+ int32_t cameraId,
+ const String16& clientName,
+ uid_t clientUid,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<IGraphicBufferProducer>& surface,
+ int64_t timeBetweenFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers = true);
+
+ virtual MPEG4Writer *CreateMPEG4Writer(int fd);
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVFactory);
+};
+
+/*
+ * Common delegate to the classes in libstagefright
+ */
+struct AVUtils {
+
+ virtual status_t convertMetaDataToMessage(
+ const sp<MetaData> &meta, sp<AMessage> *format);
+ virtual DataSource::SnifferFunc getExtendedSniffer();
+ virtual status_t mapMimeToAudioFormat( audio_format_t& format, const char* mime);
+ virtual status_t sendMetaDataToHal(const sp<MetaData>& meta, AudioParameter *param);
+
+ virtual sp<MediaCodec> createCustomComponentByName(const sp<ALooper> &looper,
+ const char* mime, bool encoder, const sp<AMessage> &format);
+ virtual bool isEnhancedExtension(const char *extension);
+
+ virtual bool is24bitPCMOffloadEnabled();
+ virtual bool is16bitPCMOffloadEnabled();
+ virtual int getAudioSampleBits(const sp<MetaData> &);
+ virtual int getAudioSampleBits(const sp<AMessage> &);
+ virtual void setPcmSampleBits(const sp<MetaData> &, int32_t /*bitWidth*/);
+ virtual void setPcmSampleBits(const sp<AMessage> &, int32_t /*bitWidth*/);
+
+ virtual audio_format_t updateAudioFormat(audio_format_t audioFormat,
+ const sp<MetaData> &);
+
+ virtual audio_format_t updateAudioFormat(audio_format_t audioFormat,
+ const sp<AMessage> &);
+
+ virtual bool canOffloadAPE(const sp<MetaData> &meta);
+
+ virtual int32_t getAudioMaxInputBufferSize(audio_format_t audioFormat,
+ const sp<AMessage> &);
+
+ virtual bool mapAACProfileToAudioFormat(const sp<MetaData> &,
+ audio_format_t &,
+ uint64_t /*eAacProfile*/);
+
+ virtual bool mapAACProfileToAudioFormat(const sp<AMessage> &,
+ audio_format_t &,
+ uint64_t /*eAacProfile*/);
+
+ virtual void extractCustomCameraKeys(
+ const CameraParameters& /*params*/, sp<MetaData> &/*meta*/);
+ virtual void printFileName(int /*fd*/) {}
+ virtual void addDecodingTimesFromBatch(MediaBuffer * /*buf*/,
+ List<int64_t> &/*decodeTimeQueue*/) {}
+
+ virtual bool useQCHWEncoder(const sp<AMessage> &, AString &) { return false; }
+
+ virtual bool canDeferRelease(const sp<MetaData> &meta) {
+ int32_t deferRelease = false;
+ return meta->findInt32(kKeyCanDeferRelease, &deferRelease) && deferRelease;
+ }
+
+ virtual void setDeferRelease(sp<MetaData> &meta) {
+ meta->setInt32(kKeyCanDeferRelease, true);
+ }
+
+ struct HEVCMuxer {
+
+ virtual bool reassembleHEVCCSD(const AString &mime, sp<ABuffer> csd0, sp<MetaData> &meta);
+
+ virtual void writeHEVCFtypBox(MPEG4Writer *writer);
+
+ virtual status_t makeHEVCCodecSpecificData(const uint8_t *data,
+ size_t size, void** codecSpecificData,
+ size_t *codecSpecificDataSize);
+
+ virtual const char *getFourCCForMime(const char *mime);
+
+ virtual void writeHvccBox(MPEG4Writer *writer,
+ void* codecSpecificData, size_t codecSpecificDataSize,
+ bool useNalLengthFour);
+
+ virtual bool isVideoHEVC(const char* mime);
+
+ virtual void getHEVCCodecSpecificDataFromInputFormatIfPossible(
+ sp<MetaData> meta, void **codecSpecificData,
+ size_t *codecSpecificDataSize, bool *gotAllCodecSpecificData);
+
+ protected:
+ HEVCMuxer() {};
+ virtual ~HEVCMuxer() {};
+ friend struct AVUtils;
+
+ private:
+ struct HEVCParamSet {
+ HEVCParamSet(uint16_t length, const uint8_t *data)
+ : mLength(length), mData(data) {}
+
+ uint16_t mLength;
+ const uint8_t *mData;
+ };
+
+ status_t extractNALRBSPData(const uint8_t *data, size_t size,
+ uint8_t **header, bool *alreadyFilled);
+
+ status_t parserProfileTierLevel(const uint8_t *data, size_t size,
+ uint8_t **header, bool *alreadyFilled);
+
+ const uint8_t *parseHEVCParamSet(const uint8_t *data, size_t length,
+ List<HEVCParamSet> &paramSetList, size_t *paramSetLen);
+
+ size_t parseHEVCCodecSpecificData(const uint8_t *data, size_t size,
+ List<HEVCParamSet> &vidParamSet, List<HEVCParamSet> &seqParamSet,
+ List<HEVCParamSet> &picParamSet );
+ };
+
+
+ virtual inline HEVCMuxer& HEVCMuxerUtils() {
+ return mHEVCMuxer;
+ }
+
+ virtual bool isAudioMuxFormatSupported(const char *mime);
+ virtual void cacheCaptureBuffers(sp<ICamera> camera, video_encoder encoder);
+ virtual const char *getCustomCodecsLocation();
+
+ virtual void setIntraPeriod(
+ int nPFrames, int nBFrames, const sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID);
+
+ /*
+ * This class is a placeholder for the set of methods used
+ * to enable HFR (High Frame Rate) Recording
+ *
+ * HFR is a slow-motion recording feature where framerate
+ * is increased at capture, but file is composed to play
+ * back at normal rate, giving a net result of slow-motion.
+ * If HFR factor = N
+ * framerate (at capture and encoder) = N * actual value
+ * bitrate = N * actual value
+ * (as the encoder still gets actual timestamps)
+ * timeStamps (at composition) = actual value
+ * timeScale (at composition) = actual value / N
+ * (when parser re-generates timestamps, they will be
+ * up-scaled by factor N, which results in slow-motion)
+ *
+ * HSR is a high-framerate recording variant where timestamps
+ * are not meddled with, yielding a video mux'ed at captured
+ * fps
+ */
+ struct HFR {
+ // set kKeyHFR when 'video-hfr' paramater is enabled
+ // or set kKeyHSR when 'video-hsr' paramater is enabled
+ virtual void setHFRIfEnabled(
+ const CameraParameters& params, sp<MetaData> &meta);
+
+ // recalculate file-duration when HFR is enabled
+ virtual status_t initializeHFR(
+ const sp<MetaData> &meta, sp<AMessage> &format,
+ int64_t &maxFileDurationUs, video_encoder videoEncoder);
+
+ virtual void setHFRRatio(
+ sp<MetaData> &meta, const int32_t hfrRatio);
+
+ virtual int32_t getHFRRatio(
+ const sp<MetaData> &meta);
+
+ protected:
+ HFR() {};
+ virtual ~HFR() {};
+ friend struct AVUtils;
+
+ private:
+ // Query supported capabilities from target-specific profiles
+ virtual int32_t getHFRCapabilities(
+ video_encoder codec,
+ int& maxHFRWidth, int& maxHFRHeight, int& maxHFRFps,
+ int& maxBitrate);
+ };
+ virtual inline HFR& HFRUtils() {
+ return mHFR;
+ }
+
+private:
+ HEVCMuxer mHEVCMuxer;
+ HFR mHFR;
+ // ----- NO TRESSPASSING BEYOND THIS LINE ------
+ DECLARE_LOADABLE_SINGLETON(AVUtils);
+
+};
+}
+
+#endif // _AV_EXTENSIONS__H_
diff --git a/media/libavextensions/stagefright/AVFactory.cpp b/media/libavextensions/stagefright/AVFactory.cpp
new file mode 100644
index 0000000..7420d12
--- /dev/null
+++ b/media/libavextensions/stagefright/AVFactory.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define LOG_TAG "AVFactory"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/ACodec.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/AudioSource.h>
+#include <media/stagefright/CameraSource.h>
+#include <media/stagefright/CameraSourceTimeLapse.h>
+#include <camera/CameraParameters.h>
+#include <media/stagefright/MPEG4Writer.h>
+
+#include "common/ExtensionsLoader.hpp"
+#include "stagefright/AVExtensions.h"
+#include "include/NuCachedSource2.h"
+
+namespace android {
+
+sp<ACodec> AVFactory::createACodec() {
+ return new ACodec;
+}
+
+MediaExtractor* AVFactory::createExtendedExtractor(
+ const sp<DataSource> &, const char *, const sp<AMessage> &,
+ const uint32_t) {
+ return NULL;
+}
+
+sp<MediaExtractor> AVFactory::updateExtractor(
+ sp<MediaExtractor> ext, const sp<DataSource> &,
+ const char *, const sp<AMessage> &, const uint32_t) {
+ return ext;
+}
+
+sp<NuCachedSource2> AVFactory::createCachedSource(
+ const sp<DataSource> &source,
+ const char *cacheConfig,
+ bool disconnectAtHighwatermark) {
+ return NuCachedSource2::Create(source, cacheConfig, disconnectAtHighwatermark);
+}
+
+MediaHTTP* AVFactory::createMediaHTTP(
+ const sp<IMediaHTTPConnection> &conn) {
+ return new MediaHTTP(conn);
+}
+
+AudioSource* AVFactory::createAudioSource(
+ audio_source_t inputSource,
+ const String16 &opPackageName,
+ uint32_t sampleRate,
+ uint32_t channels,
+ uint32_t outSampleRate) {
+ return new AudioSource(inputSource, opPackageName, sampleRate,
+ channels, outSampleRate);
+}
+
+CameraSource* AVFactory::CreateCameraSourceFromCamera(
+ const sp<ICamera> &camera,
+ const sp<ICameraRecordingProxy> &proxy,
+ int32_t cameraId,
+ const String16& clientName,
+ uid_t clientUid,
+ Size videoSize,
+ int32_t frameRate,
+ const sp<IGraphicBufferProducer>& surface,
+ bool storeMetaDataInVideoBuffers) {
+ return CameraSource::CreateFromCamera(camera, proxy, cameraId,
+ clientName, clientUid, videoSize, frameRate, surface,
+ storeMetaDataInVideoBuffers);
+}
+
+CameraSourceTimeLapse* AVFactory::CreateCameraSourceTimeLapseFromCamera(
+ const sp<ICamera> &camera,
+ const sp<ICameraRecordingProxy> &proxy,
+ int32_t cameraId,
+ const String16& clientName,
+ uid_t clientUid,
+ Size videoSize,
+ int32_t videoFrameRate,
+ const sp<IGraphicBufferProducer>& surface,
+ int64_t timeBetweenFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers) {
+ return CameraSourceTimeLapse::CreateFromCamera(camera, proxy, cameraId,
+ clientName, clientUid, videoSize, videoFrameRate, surface,
+ timeBetweenFrameCaptureUs, storeMetaDataInVideoBuffers);
+}
+
+MPEG4Writer* AVFactory::CreateMPEG4Writer(int fd) {
+ return new MPEG4Writer(fd);
+}
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVFactory::AVFactory() {
+}
+
+AVFactory::~AVFactory() {
+}
+
+//static
+AVFactory *AVFactory::sInst =
+ ExtensionsLoader<AVFactory>::createInstance("createExtendedFactory");
+
+} //namespace android
+
diff --git a/media/libavextensions/stagefright/AVUtils.cpp b/media/libavextensions/stagefright/AVUtils.cpp
new file mode 100644
index 0000000..a9cd4b2
--- /dev/null
+++ b/media/libavextensions/stagefright/AVUtils.cpp
@@ -0,0 +1,1139 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define LOG_TAG "AVUtils"
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/ACodec.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MPEG4Writer.h>
+#include <media/stagefright/Utils.h>
+#include <media/MediaProfiles.h>
+
+#if defined(QCOM_HARDWARE) || defined(FLAC_OFFLOAD_ENABLED)
+#include "QCMediaDefs.h"
+#include "QCMetaData.h"
+#ifdef FLAC_OFFLOAD_ENABLED
+#include "audio_defs.h"
+#endif
+#endif
+
+#include <binder/IPCThreadState.h>
+#include <camera/CameraParameters.h>
+
+#include "common/ExtensionsLoader.hpp"
+#include "stagefright/AVExtensions.h"
+
+namespace android {
+
+static const uint8_t kHEVCNalUnitTypeVidParamSet = 0x20;
+static const uint8_t kHEVCNalUnitTypeSeqParamSet = 0x21;
+static const uint8_t kHEVCNalUnitTypePicParamSet = 0x22;
+
+enum MetaKeyType{
+ INT32, INT64, STRING, DATA, CSD
+};
+
+struct MetaKeyEntry{
+ int MetaKey;
+ const char* MsgKey;
+ MetaKeyType KeyType;
+};
+
+static const MetaKeyEntry MetaKeyTable[] {
+#ifdef QCOM_HARDWARE
+ {kKeyAacCodecSpecificData , "aac-codec-specific-data", CSD},
+ {kKeyDivXVersion , "divx-version" , INT32}, // int32_t
+ {kKeyDivXDrm , "divx-drm" , DATA}, // void *
+ {kKeyWMAEncodeOpt , "wma-encode-opt" , INT32}, // int32_t
+ {kKeyWMABlockAlign , "wma-block-align" , INT32}, // int32_t
+ {kKeyWMAAdvEncOpt1 , "wma-adv-enc-opt1" , INT32}, // int16_t
+ {kKeyWMAAdvEncOpt2 , "wma-adv-enc-opt2" , INT32}, // int32_t
+ {kKeyWMAFormatTag , "wma-format-tag" , INT32}, // int32_t
+ {kKeyWMABitspersample , "wma-bits-per-sample" , INT32}, // int32_t
+ {kKeyWMAVirPktSize , "wma-vir-pkt-size" , INT32}, // int32_t
+ {kKeyWMAChannelMask , "wma-channel-mask" , INT32}, // int32_t
+ {kKeyFileFormat , "file-format" , STRING}, // cstring
+
+ {kkeyAacFormatAdif , "aac-format-adif" , INT32}, // bool (int32_t)
+ {kkeyAacFormatLtp , "aac-format-ltp" , INT32},
+
+ //DTS subtype
+ {kKeyDTSSubtype , "dts-subtype" , INT32}, //int32_t
+
+ //Extractor sets this
+ {kKeyUseArbitraryMode , "use-arbitrary-mode" , INT32}, //bool (int32_t)
+ {kKeySmoothStreaming , "smooth-streaming" , INT32}, //bool (int32_t)
+ {kKeyHFR , "hfr" , INT32}, // int32_t
+#endif
+#ifdef FLAC_OFFLOAD_ENABLED
+ {kKeyMinBlkSize , "min-block-size" , INT32},
+ {kKeyMaxBlkSize , "max-block-size" , INT32},
+ {kKeyMinFrmSize , "min-frame-size" , INT32},
+ {kKeyMaxFrmSize , "max-frame-size" , INT32},
+#endif
+
+
+ {kKeyBitRate , "bitrate" , INT32},
+ {kKeySampleRate , "sample-rate" , INT32},
+ {kKeyChannelCount , "channel-count" , INT32},
+ {kKeyRawCodecSpecificData , "raw-codec-specific-data", CSD},
+
+ {kKeyBitsPerSample , "bits-per-sample" , INT32},
+ {kKeyCodecId , "codec-id" , INT32},
+ {kKeySampleFormat , "sample-format" , INT32},
+ {kKeyBlockAlign , "block-align" , INT32},
+ {kKeyCodedSampleBits , "coded-sample-bits" , INT32},
+ {kKeyAACAOT , "aac-profile" , INT32},
+ {kKeyRVVersion , "rv-version" , INT32},
+ {kKeyWMAVersion , "wma-version" , INT32}, // int32_t
+ {kKeyWMVVersion , "wmv-version" , INT32},
+};
+
+status_t AVUtils::convertMetaDataToMessage(
+ const sp<MetaData> &meta, sp<AMessage> *format) {
+ const char * str_val;
+ int32_t int32_val;
+ int64_t int64_val;
+ uint32_t data_type;
+ const void * data;
+ size_t size;
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (MetaKeyTable[i].KeyType == INT32 &&
+ meta->findInt32(MetaKeyTable[i].MetaKey, &int32_val)) {
+ ALOGV("found metakey %s of type int32", MetaKeyTable[i].MsgKey);
+ format->get()->setInt32(MetaKeyTable[i].MsgKey, int32_val);
+ } else if (MetaKeyTable[i].KeyType == INT64 &&
+ meta->findInt64(MetaKeyTable[i].MetaKey, &int64_val)) {
+ ALOGV("found metakey %s of type int64", MetaKeyTable[i].MsgKey);
+ format->get()->setInt64(MetaKeyTable[i].MsgKey, int64_val);
+ } else if (MetaKeyTable[i].KeyType == STRING &&
+ meta->findCString(MetaKeyTable[i].MetaKey, &str_val)) {
+ ALOGV("found metakey %s of type string", MetaKeyTable[i].MsgKey);
+ format->get()->setString(MetaKeyTable[i].MsgKey, str_val);
+ } else if ( (MetaKeyTable[i].KeyType == DATA ||
+ MetaKeyTable[i].KeyType == CSD) &&
+ meta->findData(MetaKeyTable[i].MetaKey, &data_type, &data, &size)) {
+ ALOGV("found metakey %s of type data", MetaKeyTable[i].MsgKey);
+ if (MetaKeyTable[i].KeyType == CSD) {
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ if (strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-0", buffer);
+ } else {
+ const uint8_t *ptr = (const uint8_t *)data;
+ CHECK(size >= 8);
+ int seqLength = 0, picLength = 0;
+ for (size_t i = 4; i < (size - 4); i++)
+ {
+ if ((*(ptr + i) == 0) && (*(ptr + i + 1) == 0) &&
+ (*(ptr + i + 2) == 0) && (*(ptr + i + 3) == 1))
+ seqLength = i;
+ }
+ sp<ABuffer> buffer = new ABuffer(seqLength);
+ memcpy(buffer->data(), data, seqLength);
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-0", buffer);
+ picLength=size-seqLength;
+ sp<ABuffer> buffer1 = new ABuffer(picLength);
+ memcpy(buffer1->data(), (const uint8_t *)data + seqLength, picLength);
+ buffer1->meta()->setInt32("csd", true);
+ buffer1->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-1", buffer1);
+ }
+ } else {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+ format->get()->setBuffer(MetaKeyTable[i].MsgKey, buffer);
+ }
+ }
+ }
+ return OK;
+}
+
+struct mime_conv_t {
+ const char* mime;
+ audio_format_t format;
+};
+
+static const struct mime_conv_t mimeLookup[] = {
+ { MEDIA_MIMETYPE_AUDIO_MPEG, AUDIO_FORMAT_MP3 },
+ { MEDIA_MIMETYPE_AUDIO_RAW, AUDIO_FORMAT_PCM_16_BIT },
+ { MEDIA_MIMETYPE_AUDIO_AMR_NB, AUDIO_FORMAT_AMR_NB },
+ { MEDIA_MIMETYPE_AUDIO_AMR_WB, AUDIO_FORMAT_AMR_WB },
+ { MEDIA_MIMETYPE_AUDIO_AAC, AUDIO_FORMAT_AAC },
+ { MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
+ { MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
+#ifdef QCOM_HARDWARE
+ { MEDIA_MIMETYPE_AUDIO_AC3, AUDIO_FORMAT_AC3 },
+ { MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS, AUDIO_FORMAT_AMR_WB_PLUS },
+ { MEDIA_MIMETYPE_AUDIO_DTS, AUDIO_FORMAT_DTS },
+ { MEDIA_MIMETYPE_AUDIO_EAC3, AUDIO_FORMAT_E_AC3 },
+ { MEDIA_MIMETYPE_AUDIO_EVRC, AUDIO_FORMAT_EVRC },
+ { MEDIA_MIMETYPE_AUDIO_QCELP, AUDIO_FORMAT_QCELP },
+ { MEDIA_MIMETYPE_AUDIO_WMA, AUDIO_FORMAT_WMA },
+ { MEDIA_MIMETYPE_AUDIO_FLAC, AUDIO_FORMAT_FLAC },
+ { MEDIA_MIMETYPE_CONTAINER_QTIFLAC, AUDIO_FORMAT_FLAC },
+#ifdef DOLBY_UDC
+ { MEDIA_MIMETYPE_AUDIO_EAC3_JOC, AUDIO_FORMAT_E_AC3_JOC },
+#endif
+#endif
+ { 0, AUDIO_FORMAT_INVALID }
+};
+
+status_t AVUtils::mapMimeToAudioFormat(
+ audio_format_t& format, const char* mime) {
+ const struct mime_conv_t* p = &mimeLookup[0];
+ while (p->mime != NULL) {
+ if (0 == strcasecmp(mime, p->mime)) {
+ format = p->format;
+ return OK;
+ }
+ ++p;
+ }
+
+ return BAD_VALUE;
+}
+
+status_t AVUtils::sendMetaDataToHal(
+ const sp<MetaData>& meta, AudioParameter *param){
+#ifdef FLAC_OFFLOAD_ENABLED
+ int32_t minBlkSize, maxBlkSize, minFrmSize, maxFrmSize; //FLAC params
+ if (meta->findInt32(kKeyMinBlkSize, &minBlkSize)) {
+ param->addInt(String8(AUDIO_OFFLOAD_CODEC_FLAC_MIN_BLK_SIZE), minBlkSize);
+ }
+ if (meta->findInt32(kKeyMaxBlkSize, &maxBlkSize)) {
+ param->addInt(String8(AUDIO_OFFLOAD_CODEC_FLAC_MAX_BLK_SIZE), maxBlkSize);
+ }
+ if (meta->findInt32(kKeyMinFrmSize, &minFrmSize)) {
+ param->addInt(String8(AUDIO_OFFLOAD_CODEC_FLAC_MIN_FRAME_SIZE), minFrmSize);
+ }
+ if (meta->findInt32(kKeyMaxFrmSize, &maxFrmSize)) {
+ param->addInt(String8(AUDIO_OFFLOAD_CODEC_FLAC_MAX_FRAME_SIZE), maxFrmSize);
+ }
+#else
+ (void)meta;
+ (void)param;
+#endif
+ return OK;
+}
+
+bool AVUtils::is24bitPCMOffloadEnabled() {
+ char propPCMOfload[PROPERTY_VALUE_MAX] = {0};
+ property_get("audio.offload.pcm.24bit.enable", propPCMOfload, "0");
+ if (!strncmp(propPCMOfload, "true", 4) || atoi(propPCMOfload))
+ return true;
+ else
+ return false;
+}
+
+bool AVUtils::is16bitPCMOffloadEnabled() {
+ char propPCMOfload[PROPERTY_VALUE_MAX] = {0};
+ property_get("audio.offload.pcm.16bit.enable", propPCMOfload, "0");
+ if (!strncmp(propPCMOfload, "true", 4) || atoi(propPCMOfload))
+ return true;
+ else
+ return false;
+}
+
+
+int AVUtils::getAudioSampleBits(const sp<MetaData> &meta) {
+ int32_t bits = 16;
+ audio_format_t audioFormat = AUDIO_FORMAT_INVALID;
+ if (meta->findInt32('pfmt', (int32_t *)&audioFormat)) {
+ bits = audio_bytes_per_sample(audioFormat) * 8;
+ } else if (meta->findInt32(kKeyBitsPerSample, &bits)) {
+ return bits;
+ }
+ return bits;
+}
+
+int AVUtils::getAudioSampleBits(const sp<AMessage> &format) {
+ int32_t bits = 16;
+ audio_format_t audioFormat = AUDIO_FORMAT_INVALID;
+ if (format->findInt32("pcm-format", (int32_t *)&audioFormat)) {
+ bits = audio_bytes_per_sample(audioFormat) * 8;
+ } else if (format->findInt32("bits-per-sample", &bits)) {
+ return bits;
+ }
+ return bits;
+}
+
+void AVUtils::setPcmSampleBits(const sp<AMessage> &format, int32_t bitWidth) {
+ format->setInt32("bits-per-sample", bitWidth);
+}
+
+void AVUtils::setPcmSampleBits(const sp<MetaData> &meta, int32_t bitWidth) {
+ meta->setInt32(kKeyBitsPerSample, bitWidth);
+}
+
+audio_format_t AVUtils::updateAudioFormat(audio_format_t audioFormat,
+ const sp<MetaData> &meta){
+ int32_t bits = getAudioSampleBits(meta);
+
+ ALOGV("updateAudioFormat %x %d", audioFormat, bits);
+ meta->dumpToLog();
+
+ // Override audio format for PCM offload
+ if (audio_is_linear_pcm(audioFormat)) {
+ if (bits > 16 && is24bitPCMOffloadEnabled()) {
+ audioFormat = AUDIO_FORMAT_PCM_24_BIT_OFFLOAD;
+ meta->setInt32(kKeyBitsPerSample, 24);
+ } else if (bits == 16 && is16bitPCMOffloadEnabled()) {
+ audioFormat = AUDIO_FORMAT_PCM_16_BIT_OFFLOAD;
+ }
+ }
+
+ return audioFormat;
+}
+
+audio_format_t AVUtils::updateAudioFormat(audio_format_t audioFormat,
+ const sp<AMessage> &format){
+ int32_t bits = getAudioSampleBits(format);
+
+ ALOGV("updateAudioFormat %x %d %s", audioFormat, bits, format->debugString().c_str());
+
+ // Override audio format for PCM offload
+ if (audio_is_linear_pcm(audioFormat)) {
+ if (bits > 16 && is24bitPCMOffloadEnabled()) {
+ audioFormat = AUDIO_FORMAT_PCM_24_BIT_OFFLOAD;
+ format->setInt32("bits-per-sample", 24);
+ } else if (bits == 16 && is16bitPCMOffloadEnabled()) {
+ audioFormat = AUDIO_FORMAT_PCM_16_BIT_OFFLOAD;
+ }
+ }
+
+ return audioFormat;
+}
+
+static bool dumbSniffer(
+ const sp<DataSource> &, String8 *,
+ float *, sp<AMessage> *) {
+ return false;
+}
+
+DataSource::SnifferFunc AVUtils::getExtendedSniffer() {
+ return dumbSniffer;
+}
+
+sp<MediaCodec> AVUtils::createCustomComponentByName(
+ const sp<ALooper> &, const char* , bool, const sp<AMessage> &) {
+ return NULL;
+}
+
+bool AVUtils::canOffloadAPE(const sp<MetaData> &) {
+ return true;
+}
+
+int32_t AVUtils::getAudioMaxInputBufferSize(audio_format_t, const sp<AMessage> &) {
+ return 0;
+}
+
+bool AVUtils::mapAACProfileToAudioFormat(const sp<MetaData> &, audio_format_t &,
+ uint64_t /*eAacProfile*/) {
+ return false ;
+}
+
+bool AVUtils::mapAACProfileToAudioFormat(const sp<AMessage> &, audio_format_t &,
+ uint64_t /*eAacProfile*/) {
+ return false ;
+}
+
+bool AVUtils::isEnhancedExtension(const char *) {
+ return false;
+}
+
+bool AVUtils::HEVCMuxer::reassembleHEVCCSD(const AString &mime, sp<ABuffer> csd0, sp<MetaData> &meta) {
+ if (!isVideoHEVC(mime.c_str())) {
+ return false;
+ }
+ void *csd = NULL;
+ size_t size = 0;
+
+ if (makeHEVCCodecSpecificData(csd0->data(), csd0->size(), &csd, &size) == OK) {
+ meta->setData(kKeyHVCC, kTypeHVCC, csd, size);
+ free(csd);
+ return true;
+ }
+ ALOGE("Failed to reassemble HVCC data");
+ return false;
+}
+
+void AVUtils::HEVCMuxer::writeHEVCFtypBox(MPEG4Writer *writer) {
+ ALOGV("writeHEVCFtypBox called");
+ writer->writeFourcc("3gp5");
+ writer->writeInt32(0);
+ writer->writeFourcc("hvc1");
+ writer->writeFourcc("hev1");
+ writer->writeFourcc("3gp5");
+}
+
+status_t AVUtils::HEVCMuxer::makeHEVCCodecSpecificData(
+ const uint8_t *data, size_t size, void** codecSpecificData,
+ size_t *codecSpecificDataSize) {
+ ALOGV("makeHEVCCodecSpecificData called");
+
+ if (*codecSpecificData != NULL) {
+ ALOGE("Already have codec specific data");
+ return ERROR_MALFORMED;
+ }
+
+ if (size < 4) {
+ ALOGE("Codec specific data length too short: %zu", size);
+ return ERROR_MALFORMED;
+ }
+
+ // Data is in the form of HVCCodecSpecificData
+ if (memcmp("\x00\x00\x00\x01", data, 4)) {
+ // 23 byte fixed header
+ if (size < 23) {
+ ALOGE("Codec specific data length too short: %zu", size);
+ return ERROR_MALFORMED;
+ }
+
+ *codecSpecificData = malloc(size);
+
+ if (*codecSpecificData != NULL) {
+ *codecSpecificDataSize = size;
+ memcpy(*codecSpecificData, data, size);
+ return OK;
+ }
+
+ return NO_MEMORY;
+ }
+
+ List<HEVCParamSet> vidParamSets;
+ List<HEVCParamSet> seqParamSets;
+ List<HEVCParamSet> picParamSets;
+
+ if ((*codecSpecificDataSize = parseHEVCCodecSpecificData(data, size,
+ vidParamSets, seqParamSets, picParamSets)) == 0) {
+ ALOGE("cannot parser codec specific data, bailing out");
+ return ERROR_MALFORMED;
+ }
+
+ size_t numOfNALArray = 0;
+ bool doneWritingVPS = true, doneWritingSPS = true, doneWritingPPS = true;
+
+ if (!vidParamSets.empty()) {
+ doneWritingVPS = false;
+ ++numOfNALArray;
+ }
+
+ if (!seqParamSets.empty()) {
+ doneWritingSPS = false;
+ ++numOfNALArray;
+ }
+
+ if (!picParamSets.empty()) {
+ doneWritingPPS = false;
+ ++numOfNALArray;
+ }
+
+ //additional 23 bytes needed (22 bytes for hvc1 header + 1 byte for number of arrays)
+ *codecSpecificDataSize += 23;
+ //needed 3 bytes per NAL array
+ *codecSpecificDataSize += 3 * numOfNALArray;
+
+ int count = 0;
+ void *codecConfigData = malloc(*codecSpecificDataSize);
+ if (codecSpecificData == NULL) {
+ ALOGE("Failed to allocate memory, bailing out");
+ return NO_MEMORY;
+ }
+
+ uint8_t *header = (uint8_t *)codecConfigData;
+ // 8 - bit version
+ header[0] = 1;
+ //Profile space 2 bit, tier flag 1 bit and profile IDC 5 bit
+ header[1] = 0x00;
+ // 32 - bit compatibility flag
+ header[2] = 0x00;
+ header[3] = 0x00;
+ header[4] = 0x00;
+ header[5] = 0x00;
+ // 48 - bit general constraint indicator flag
+ header[6] = header[7] = header[8] = 0x00;
+ header[9] = header[10] = header[11] = 0x00;
+ // 8 - bit general IDC level
+ header[12] = 0x00;
+ // 4 - bit reserved '1111'
+ // 12 - bit spatial segmentation idc
+ header[13] = 0xf0;
+ header[14] = 0x00;
+ // 6 - bit reserved '111111'
+ // 2 - bit parallelism Type
+ header[15] = 0xfc;
+ // 6 - bit reserved '111111'
+ // 2 - bit chromaFormat
+ header[16] = 0xfc;
+ // 5 - bit reserved '11111'
+ // 3 - bit DepthLumaMinus8
+ header[17] = 0xf8;
+ // 5 - bit reserved '11111'
+ // 3 - bit DepthChromaMinus8
+ header[18] = 0xf8;
+ // 16 - bit average frame rate
+ header[19] = header[20] = 0x00;
+ // 2 - bit constant frame rate
+ // 3 - bit num temporal layers
+ // 1 - bit temoral nested
+ // 2 - bit lengthSizeMinusOne
+ header[21] = 0x07;
+
+ // 8-bit number of NAL types
+ header[22] = (uint8_t)numOfNALArray;
+
+ header += 23;
+ count += 23;
+
+ bool ifProfileIDCAlreadyFilled = false;
+
+ if (!doneWritingVPS) {
+ doneWritingVPS = true;
+ ALOGV("Writing VPS");
+ //8-bit, last 6 bit for NAL type
+ header[0] = 0x20; // NAL type is VPS
+ //16-bit, number of nal Units
+ uint16_t vidParamSetLength = vidParamSets.size();
+ header[1] = vidParamSetLength >> 8;
+ header[2] = vidParamSetLength & 0xff;
+
+ header += 3;
+ count += 3;
+
+ for (List<HEVCParamSet>::iterator it = vidParamSets.begin();
+ it != vidParamSets.end(); ++it) {
+ // 16-bit video parameter set length
+ uint16_t vidParamSetLength = it->mLength;
+ header[0] = vidParamSetLength >> 8;
+ header[1] = vidParamSetLength & 0xff;
+
+ extractNALRBSPData(it->mData, it->mLength,
+ (uint8_t **)&codecConfigData,
+ &ifProfileIDCAlreadyFilled);
+
+ // VPS NAL unit (video parameter length bytes)
+ memcpy(&header[2], it->mData, vidParamSetLength);
+ header += (2 + vidParamSetLength);
+ count += (2 + vidParamSetLength);
+ }
+ }
+
+ if (!doneWritingSPS) {
+ doneWritingSPS = true;
+ ALOGV("Writting SPS");
+ //8-bit, last 6 bit for NAL type
+ header[0] = 0x21; // NAL type is SPS
+ //16-bit, number of nal Units
+ uint16_t seqParamSetLength = seqParamSets.size();
+ header[1] = seqParamSetLength >> 8;
+ header[2] = seqParamSetLength & 0xff;
+
+ header += 3;
+ count += 3;
+
+ for (List<HEVCParamSet>::iterator it = seqParamSets.begin();
+ it != seqParamSets.end(); ++it) {
+ // 16-bit sequence parameter set length
+ uint16_t seqParamSetLength = it->mLength;
+
+ // 16-bit number of NAL units of this type
+ header[0] = seqParamSetLength >> 8;
+ header[1] = seqParamSetLength & 0xff;
+
+ extractNALRBSPData(it->mData, it->mLength,
+ (uint8_t **)&codecConfigData,
+ &ifProfileIDCAlreadyFilled);
+
+ // SPS NAL unit (sequence parameter length bytes)
+ memcpy(&header[2], it->mData, seqParamSetLength);
+ header += (2 + seqParamSetLength);
+ count += (2 + seqParamSetLength);
+ }
+ }
+
+ if (!doneWritingPPS) {
+ doneWritingPPS = true;
+ ALOGV("writing PPS");
+ //8-bit, last 6 bit for NAL type
+ header[0] = 0x22; // NAL type is PPS
+ //16-bit, number of nal Units
+ uint16_t picParamSetLength = picParamSets.size();
+ header[1] = picParamSetLength >> 8;
+ header[2] = picParamSetLength & 0xff;
+
+ header += 3;
+ count += 3;
+
+ for (List<HEVCParamSet>::iterator it = picParamSets.begin();
+ it != picParamSets.end(); ++it) {
+ // 16-bit picture parameter set length
+ uint16_t picParamSetLength = it->mLength;
+ header[0] = picParamSetLength >> 8;
+ header[1] = picParamSetLength & 0xff;
+
+ // PPS Nal unit (picture parameter set length bytes)
+ memcpy(&header[2], it->mData, picParamSetLength);
+ header += (2 + picParamSetLength);
+ count += (2 + picParamSetLength);
+ }
+ }
+ *codecSpecificData = codecConfigData;
+ return OK;
+}
+
+const char *AVUtils::HEVCMuxer::getFourCCForMime(const char * mime) {
+ if (isVideoHEVC(mime)) {
+ return "hvc1";
+ }
+ return NULL;
+}
+
+void AVUtils::HEVCMuxer::writeHvccBox(MPEG4Writer *writer,
+ void *codecSpecificData, size_t codecSpecificDataSize,
+ bool useNalLengthFour) {
+ ALOGV("writeHvccBox called");
+ CHECK(codecSpecificData);
+ CHECK_GE(codecSpecificDataSize, 23);
+
+ // Patch hvcc's lengthSize field to match the number
+ // of bytes we use to indicate the size of a nal unit.
+ uint8_t *ptr = (uint8_t *)codecSpecificData;
+ ptr[21] = (ptr[21] & 0xfc) | (useNalLengthFour? 3 : 1);
+ writer->beginBox("hvcC");
+ writer->write(codecSpecificData, codecSpecificDataSize);
+ writer->endBox(); // hvcC
+}
+
+bool AVUtils::HEVCMuxer::isVideoHEVC(const char * mime) {
+ return (!strncasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC,
+ strlen(MEDIA_MIMETYPE_VIDEO_HEVC)));
+}
+
+status_t AVUtils::HEVCMuxer::extractNALRBSPData(const uint8_t *data,
+ size_t size,
+ uint8_t **header,
+ bool *alreadyFilled) {
+ ALOGV("extractNALRBSPData called");
+ CHECK_GE(size, 2);
+
+ uint8_t type = data[0] >> 1;
+ type = 0x3f & type;
+
+ //start parsing here
+ size_t rbspSize = 0;
+ uint8_t *rbspData = (uint8_t *) malloc(size);
+
+ if (rbspData == NULL) {
+ ALOGE("allocation failed");
+ return UNKNOWN_ERROR;
+ }
+
+ //populate rbsp data start from i+2, search for 0x000003,
+ //and ignore emulation_prevention byte
+ size_t itt = 2;
+ while (itt < size) {
+ if ((itt+2 < size) && (!memcmp("\x00\x00\x03", &data[itt], 3) )) {
+ rbspData[rbspSize++] = data[itt++];
+ rbspData[rbspSize++] = data[itt++];
+ itt++;
+ } else {
+ rbspData[rbspSize++] = data[itt++];
+ }
+ }
+
+ uint8_t maxSubLayerMinus1 = 0;
+
+ //parser profileTierLevel
+ if (type == kHEVCNalUnitTypeVidParamSet) { // if VPS
+ ALOGV("its VPS ... start with 5th byte");
+ if (rbspSize < 5) {
+ free(rbspData);
+ return ERROR_MALFORMED;
+ }
+
+ maxSubLayerMinus1 = 0x0E & rbspData[1];
+ maxSubLayerMinus1 = maxSubLayerMinus1 >> 1;
+ parserProfileTierLevel(&rbspData[4], rbspSize - 4, header, alreadyFilled);
+
+ } else if (type == kHEVCNalUnitTypeSeqParamSet) {
+ ALOGV("its SPS .. start with 2nd byte");
+ if (rbspSize < 2) {
+ free(rbspData);
+ return ERROR_MALFORMED;
+ }
+
+ maxSubLayerMinus1 = 0x0E & rbspData[0];
+ maxSubLayerMinus1 = maxSubLayerMinus1 >> 1;
+
+ parserProfileTierLevel(&rbspData[1], rbspSize - 1, header, alreadyFilled);
+ }
+ free(rbspData);
+ return OK;
+}
+
+status_t AVUtils::HEVCMuxer::parserProfileTierLevel(const uint8_t *data, size_t size,
+ uint8_t **header, bool *alreadyFilled) {
+ CHECK_GE(size, 12);
+ uint8_t *tmpHeader = *header;
+ ALOGV("parserProfileTierLevel called");
+ uint8_t generalProfileSpace; //2 bit
+ uint8_t generalTierFlag; //1 bit
+ uint8_t generalProfileIdc; //5 bit
+ uint8_t generalProfileCompatibilityFlag[4];
+ uint8_t generalConstraintIndicatorFlag[6];
+ uint8_t generalLevelIdc; //8 bit
+
+ // Need first 12 bytes
+
+ // First byte will give below info
+ generalProfileSpace = 0xC0 & data[0];
+ generalProfileSpace = generalProfileSpace > 6;
+ generalTierFlag = 0x20 & data[0];
+ generalTierFlag = generalTierFlag > 5;
+ generalProfileIdc = 0x1F & data[0];
+
+ // Next 4 bytes is compatibility flag
+ memcpy(&generalProfileCompatibilityFlag, &data[1], 4);
+
+ // Next 6 bytes is constraint indicator flag
+ memcpy(&generalConstraintIndicatorFlag, &data[5], 6);
+
+ // Next 1 byte is general Level IDC
+ generalLevelIdc = data[11];
+
+ if (*alreadyFilled) {
+ bool overwriteTierValue = false;
+
+ //find profile space
+ uint8_t prvGeneralProfileSpace; //2 bit
+ prvGeneralProfileSpace = 0xC0 & tmpHeader[1];
+ prvGeneralProfileSpace = prvGeneralProfileSpace > 6;
+ //prev needs to be same as current
+ if (prvGeneralProfileSpace != generalProfileSpace) {
+ ALOGW("Something wrong!!! profile space mismatch");
+ }
+
+ uint8_t prvGeneralTierFlag = 0x20 & tmpHeader[1];
+ prvGeneralTierFlag = prvGeneralTierFlag > 5;
+
+ if (prvGeneralTierFlag < generalTierFlag) {
+ overwriteTierValue = true;
+ ALOGV("Found higher tier value, replacing old one");
+ }
+
+ uint8_t prvGeneralProfileIdc = 0x1F & tmpHeader[1];
+
+ if (prvGeneralProfileIdc != generalProfileIdc) {
+ ALOGW("Something is wrong!!! profile space mismatch");
+ }
+
+ if (overwriteTierValue) {
+ tmpHeader[1] = data[0];
+ }
+
+ //general level IDC should be set highest among all
+ if (tmpHeader[12] < data[11]) {
+ tmpHeader[12] = data[11];
+ ALOGV("Found higher level IDC value, replacing old one");
+ }
+
+ } else {
+ *alreadyFilled = true;
+ tmpHeader[1] = data[0];
+ memcpy(&tmpHeader[2], &data[1], 4);
+ memcpy(&tmpHeader[6], &data[5], 6);
+ tmpHeader[12] = data[11];
+ }
+
+ char printCodecConfig[PROPERTY_VALUE_MAX];
+ property_get("hevc.mux.print.codec.config", printCodecConfig, "0");
+
+ if (atoi(printCodecConfig)) {
+ //if property enabled, print these values
+ ALOGI("Start::-----------------");
+ ALOGI("generalProfileSpace = %2x", generalProfileSpace);
+ ALOGI("generalTierFlag = %2x", generalTierFlag);
+ ALOGI("generalProfileIdc = %2x", generalProfileIdc);
+ ALOGI("generalLevelIdc = %2x", generalLevelIdc);
+ ALOGI("generalProfileCompatibilityFlag = %2x %2x %2x %2x", generalProfileCompatibilityFlag[0],
+ generalProfileCompatibilityFlag[1], generalProfileCompatibilityFlag[2],
+ generalProfileCompatibilityFlag[3]);
+ ALOGI("generalConstraintIndicatorFlag = %2x %2x %2x %2x %2x %2x", generalConstraintIndicatorFlag[0],
+ generalConstraintIndicatorFlag[1], generalConstraintIndicatorFlag[2],
+ generalConstraintIndicatorFlag[3], generalConstraintIndicatorFlag[4],
+ generalConstraintIndicatorFlag[5]);
+ ALOGI("End::-----------------");
+ }
+
+ return OK;
+}
+static const uint8_t *findNextStartCode(
+ const uint8_t *data, size_t length) {
+ ALOGV("findNextStartCode: %p %zu", data, length);
+
+ size_t bytesLeft = length;
+
+ while (bytesLeft > 4 &&
+ memcmp("\x00\x00\x00\x01", &data[length - bytesLeft], 4)) {
+ --bytesLeft;
+ }
+
+ if (bytesLeft <= 4) {
+ bytesLeft = 0; // Last parameter set
+ }
+
+ return &data[length - bytesLeft];
+}
+
+const uint8_t *AVUtils::HEVCMuxer::parseHEVCParamSet(
+ const uint8_t *data, size_t length, List<HEVCParamSet> &paramSetList, size_t *paramSetLen) {
+ ALOGV("parseHEVCParamSet called");
+ const uint8_t *nextStartCode = findNextStartCode(data, length);
+ *paramSetLen = nextStartCode - data;
+ if (*paramSetLen == 0) {
+ ALOGE("Param set is malformed, since its length is 0");
+ return NULL;
+ }
+
+ HEVCParamSet paramSet(*paramSetLen, data);
+ paramSetList.push_back(paramSet);
+
+ return nextStartCode;
+}
+
+static void getHEVCNalUnitType(uint8_t byte, uint8_t* type) {
+ ALOGV("getNalUnitType: %d", (int)byte);
+ // nal_unit_type: 6-bit unsigned integer
+ *type = (byte & 0x7E) >> 1;
+}
+
+size_t AVUtils::HEVCMuxer::parseHEVCCodecSpecificData(
+ const uint8_t *data, size_t size,List<HEVCParamSet> &vidParamSet,
+ List<HEVCParamSet> &seqParamSet, List<HEVCParamSet> &picParamSet ) {
+ ALOGV("parseHEVCCodecSpecificData called");
+ // Data starts with a start code.
+ // VPS, SPS and PPS are separated with start codes.
+ uint8_t type = kHEVCNalUnitTypeVidParamSet;
+ bool gotVps = false;
+ bool gotSps = false;
+ bool gotPps = false;
+ const uint8_t *tmp = data;
+ const uint8_t *nextStartCode = data;
+ size_t bytesLeft = size;
+ size_t paramSetLen = 0;
+ size_t codecSpecificDataSize = 0;
+ while (bytesLeft > 4 && !memcmp("\x00\x00\x00\x01", tmp, 4)) {
+ getHEVCNalUnitType(*(tmp + 4), &type);
+ if (type == kHEVCNalUnitTypeVidParamSet) {
+ nextStartCode = parseHEVCParamSet(tmp + 4, bytesLeft - 4, vidParamSet, &paramSetLen);
+ if (!gotVps) {
+ gotVps = true;
+ }
+ } else if (type == kHEVCNalUnitTypeSeqParamSet) {
+ nextStartCode = parseHEVCParamSet(tmp + 4, bytesLeft - 4, seqParamSet, &paramSetLen);
+ if (!gotSps) {
+ gotSps = true;
+ }
+
+ } else if (type == kHEVCNalUnitTypePicParamSet) {
+ nextStartCode = parseHEVCParamSet(tmp + 4, bytesLeft - 4, picParamSet, &paramSetLen);
+ if (!gotPps) {
+ gotPps = true;
+ }
+ } else {
+ ALOGE("Only VPS, SPS and PPS Nal units are expected");
+ return 0;
+ }
+
+ if (nextStartCode == NULL) {
+ ALOGE("Next start code is NULL");
+ return 0;
+ }
+
+ // Move on to find the next parameter set
+ bytesLeft -= nextStartCode - tmp;
+ tmp = nextStartCode;
+ codecSpecificDataSize += (2 + paramSetLen);
+ }
+
+#if 0
+//not adding this check now, but might be needed
+ if (!gotVps || !gotVps || !gotVps ) {
+ return 0;
+ }
+#endif
+
+ return codecSpecificDataSize;
+}
+
+void AVUtils::HEVCMuxer::getHEVCCodecSpecificDataFromInputFormatIfPossible(
+ sp<MetaData> meta, void ** codecSpecificData,
+ size_t * codecSpecificDataSize, bool * gotAllCodecSpecificData) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ //kKeyHVCC needs to be populated
+ if (meta->findData(kKeyHVCC, &type, &data, &size)) {
+ *codecSpecificData = malloc(size);
+ CHECK(*codecSpecificData != NULL);
+ *codecSpecificDataSize = size;
+ memcpy(*codecSpecificData, data, size);
+ *gotAllCodecSpecificData = true;
+ } else {
+ ALOGW("getHEVCCodecConfigData:: failed to find kKeyHvcc");
+ }
+}
+
+bool AVUtils::isAudioMuxFormatSupported(const char * mime) {
+ if (mime == NULL) {
+ ALOGE("NULL audio mime type");
+ return false;
+ }
+
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mime)
+ || !strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mime)
+ || !strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime)) {
+ return true;
+ }
+ return false;
+}
+
+void AVUtils::cacheCaptureBuffers(sp<ICamera> camera, video_encoder encoder) {
+ if (camera != NULL) {
+ char mDeviceName[PROPERTY_VALUE_MAX];
+ property_get("ro.board.platform", mDeviceName, "0");
+ if (!strncmp(mDeviceName, "msm8909", 7)) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ String8 s = camera->getParameters();
+ CameraParameters params(s);
+ const char *enable;
+ if (encoder == VIDEO_ENCODER_H263 ||
+ encoder == VIDEO_ENCODER_MPEG_4_SP) {
+ enable = "1";
+ } else {
+ enable = "0";
+ }
+ params.set("cache-video-buffers", enable);
+ if (camera->setParameters(params.flatten()) != OK) {
+ ALOGE("Failed to enabled cached camera buffers");
+ }
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ }
+ }
+}
+
+const char *AVUtils::getCustomCodecsLocation() {
+ return "/etc/media_codecs.xml";
+}
+
+void AVUtils::setIntraPeriod(
+ int, int, const sp<IOMX>,
+ IOMX::node_id) {
+ return;
+}
+
+#ifdef QCOM_HARDWARE
+void AVUtils::HFR::setHFRIfEnabled(
+ const CameraParameters& params,
+ sp<MetaData> &meta) {
+ const char *hfrParam = params.get("video-hfr");
+ int32_t hfr = -1;
+ if (hfrParam != NULL) {
+ hfr = atoi(hfrParam);
+ if (hfr > 0) {
+ ALOGI("Enabling HFR @ %d fps", hfr);
+ meta->setInt32(kKeyHFR, hfr);
+ return;
+ } else {
+ ALOGI("Invalid HFR rate specified : %d", hfr);
+ }
+ }
+
+ const char *hsrParam = params.get("video-hsr");
+ int32_t hsr = -1;
+ if (hsrParam != NULL ) {
+ hsr = atoi(hsrParam);
+ if (hsr > 0) {
+ ALOGI("Enabling HSR @ %d fps", hsr);
+ meta->setInt32(kKeyHSR, hsr);
+ } else {
+ ALOGI("Invalid HSR rate specified : %d", hfr);
+ }
+ }
+}
+
+status_t AVUtils::HFR::initializeHFR(
+ const sp<MetaData> &meta, sp<AMessage> &format,
+ int64_t & /*maxFileDurationUs*/, video_encoder videoEncoder) {
+ status_t retVal = OK;
+
+ int32_t hsr = 0;
+ if (meta->findInt32(kKeyHSR, &hsr) && hsr > 0) {
+ ALOGI("HSR cue found. Override encode fps to %d", hsr);
+ format->setInt32("frame-rate", hsr);
+ return retVal;
+ }
+
+ int32_t hfr = 0;
+ if (!meta->findInt32(kKeyHFR, &hfr) || (hfr <= 0)) {
+ ALOGW("Invalid HFR rate specified");
+ return retVal;
+ }
+
+ int32_t width = 0, height = 0;
+ CHECK(meta->findInt32(kKeyWidth, &width));
+ CHECK(meta->findInt32(kKeyHeight, &height));
+
+ int maxW, maxH, MaxFrameRate, maxBitRate = 0;
+ if (getHFRCapabilities(videoEncoder,
+ maxW, maxH, MaxFrameRate, maxBitRate) < 0) {
+ ALOGE("Failed to query HFR target capabilities");
+ return ERROR_UNSUPPORTED;
+ }
+
+ if ((width * height * hfr) > (maxW * maxH * MaxFrameRate)) {
+ ALOGE("HFR request [%d x %d @%d fps] exceeds "
+ "[%d x %d @%d fps]. Will stay disabled",
+ width, height, hfr, maxW, maxH, MaxFrameRate);
+ return ERROR_UNSUPPORTED;
+ }
+
+ int32_t frameRate = 0, bitRate = 0;
+ CHECK(meta->findInt32(kKeyFrameRate, &frameRate));
+ CHECK(format->findInt32("bitrate", &bitRate));
+
+ if (frameRate) {
+ // scale the bitrate proportional to the hfr ratio
+ // to maintain quality, but cap it to max-supported.
+ bitRate = (hfr * bitRate) / frameRate;
+ bitRate = bitRate > maxBitRate ? maxBitRate : bitRate;
+ format->setInt32("bitrate", bitRate);
+
+ int32_t hfrRatio = hfr / frameRate;
+ format->setInt32("frame-rate", hfr);
+ format->setInt32("hfr-ratio", hfrRatio);
+ } else {
+ ALOGE("HFR: Invalid framerate");
+ return BAD_VALUE;
+ }
+
+ return retVal;
+}
+
+void AVUtils::HFR::setHFRRatio(
+ sp<MetaData> &meta, const int32_t hfrRatio) {
+ if (hfrRatio > 0) {
+ meta->setInt32(kKeyHFR, hfrRatio);
+ }
+}
+
+int32_t AVUtils::HFR::getHFRRatio(
+ const sp<MetaData> &meta) {
+ int32_t hfrRatio = 0;
+ meta->findInt32(kKeyHFR, &hfrRatio);
+ return hfrRatio ? hfrRatio : 1;
+}
+
+int32_t AVUtils::HFR::getHFRCapabilities(
+ video_encoder codec,
+ int& maxHFRWidth, int& maxHFRHeight, int& maxHFRFps,
+ int& maxBitRate) {
+ maxHFRWidth = maxHFRHeight = maxHFRFps = maxBitRate = 0;
+ MediaProfiles *profiles = MediaProfiles::getInstance();
+
+ if (profiles) {
+ maxHFRWidth = profiles->getVideoEncoderParamByName("enc.vid.hfr.width.max", codec);
+ maxHFRHeight = profiles->getVideoEncoderParamByName("enc.vid.hfr.height.max", codec);
+ maxHFRFps = profiles->getVideoEncoderParamByName("enc.vid.hfr.mode.max", codec);
+ maxBitRate = profiles->getVideoEncoderParamByName("enc.vid.bps.max", codec);
+ }
+
+ return (maxHFRWidth > 0) && (maxHFRHeight > 0) &&
+ (maxHFRFps > 0) && (maxBitRate > 0) ? 1 : -1;
+}
+#else
+void AVUtils::HFR::setHFRIfEnabled(
+ const CameraParameters& /*params*/,
+ sp<MetaData> & /*meta*/) {}
+
+status_t AVUtils::HFR::initializeHFR(
+ const sp<MetaData> & /*meta*/, sp<AMessage> & /*format*/,
+ int64_t & /*maxFileDurationUs*/, video_encoder /*videoEncoder*/) {
+ return OK;
+}
+
+void AVUtils::HFR::setHFRRatio(
+ sp<MetaData> & /*meta*/, const int32_t /*hfrRatio*/) {}
+
+int32_t AVUtils::HFR::getHFRRatio(
+ const sp<MetaData> & /*meta */) {
+ return 1;
+}
+
+int32_t AVUtils::HFR::getHFRCapabilities(
+ video_encoder /*codec*/,
+ int& /*maxHFRWidth*/, int& /*maxHFRHeight*/, int& /*maxHFRFps*/,
+ int& /*maxBitRate*/) {
+ return -1;
+}
+#endif
+
+void AVUtils::extractCustomCameraKeys(
+ const CameraParameters& params, sp<MetaData> &meta) {
+ mHFR.setHFRIfEnabled(params, meta);
+}
+
+// ----- NO TRESSPASSING BEYOND THIS LINE ------
+AVUtils::AVUtils() {}
+
+AVUtils::~AVUtils() {}
+
+//static
+AVUtils *AVUtils::sInst =
+ ExtensionsLoader<AVUtils>::createInstance("createAVUtils");
+
+} //namespace android
+
diff --git a/media/libavextensions/stagefright/ExtendedMediaDefs.cpp b/media/libavextensions/stagefright/ExtendedMediaDefs.cpp
new file mode 100644
index 0000000..cf2683c
--- /dev/null
+++ b/media/libavextensions/stagefright/ExtendedMediaDefs.cpp
@@ -0,0 +1,68 @@
+/*Copyright (c) 2012 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stagefright/ExtendedMediaDefs.h>
+
+namespace android {
+
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+const char *MEDIA_MIMETYPE_VIDEO_WMV = "video/x-ms-wmv";
+const char *MEDIA_MIMETYPE_VIDEO_WMV_VC1 = "video/wvc1";
+const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
+const char *MEDIA_MIMETYPE_AUDIO_WMA_PRO = "audio/x-ms-wma-pro";
+const char *MEDIA_MIMETYPE_AUDIO_WMA_LOSSLESS = "audio/x-ms-wma-lossless";
+const char *MEDIA_MIMETYPE_CONTAINER_ASF = "video/x-ms-asf";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX = "video/divx";
+const char *MEDIA_MIMETYPE_CONTAINER_AAC = "audio/aac";
+const char *MEDIA_MIMETYPE_CONTAINER_QCP = "audio/vnd.qcelp";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX311 = "video/divx311";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX4 = "video/divx4";
+const char *MEDIA_MIMETYPE_CONTAINER_MPEG2 = "video/mp2";
+const char *MEDIA_MIMETYPE_CONTAINER_3G2 = "video/3g2";
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/dts";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_LBR = "audio/dts-lbr";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb-plus";
+const char *MEDIA_MIMETYPE_AUDIO_AIFF = "audio/x-aiff";
+const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
+const char *MEDIA_MIMETYPE_AUDIO_APE = "audio/x-ape";
+const char *MEDIA_MIMETYPE_CONTAINER_QCAMR_NB = "audio/qc-amr";
+const char *MEDIA_MIMETYPE_CONTAINER_QCAMR_WB = "audio/qc-amr-wb";
+const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG = "audio/qc-mpeg";
+const char *MEDIA_MIMETYPE_CONTAINER_QCWAV = "audio/qc-wav";
+const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG2TS = "video/qc-mp2ts";
+const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG2PS = "video/qc-mp2ps";
+const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG4 = "video/qc-mp4";
+const char *MEDIA_MIMETYPE_CONTAINER_QCMATROSKA = "video/qc-matroska";
+const char *MEDIA_MIMETYPE_CONTAINER_QCOGG = "video/qc-ogg";
+const char *MEDIA_MIMETYPE_CONTAINER_QCFLV = "video/qc-flv";
+const char *MEDIA_MIMETYPE_VIDEO_VPX = "video/x-vnd.on2.vp8"; //backward compatibility
+const char *MEDIA_MIMETYPE_CONTAINER_QTIFLAC = "audio/qti-flac";
+const char *MEDIA_MIMETYPE_VIDEO_MPEG4_DP = "video/mp4v-esdp";
+
+} // namespace android
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 4a41037..18059b2 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -624,9 +624,12 @@ int Downmix_Configure(downmix_module_t *pDwmModule, effect_config_t *pConfig, bo
pDownmixer->apply_volume_correction = false;
pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
} else {
- // when configuring the effect, do not allow a blank channel mask
- if (pConfig->inputCfg.channels == 0) {
- ALOGE("Downmix_Configure error: input channel mask can't be 0");
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ if ((pConfig->inputCfg.channels == 0) ||
+ (Downmix_foldGeneric(pConfig->inputCfg.channels,
+ NULL, NULL, 0, false) == false)) {
+ ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+ pConfig->inputCfg.channels);
return -EINVAL;
}
pDownmixer->input_channel_count =
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index db7865a..c0a1c57 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -456,7 +456,9 @@ int init() {
if (ignoreFxConfFiles) {
ALOGI("Audio effects in configuration files will be ignored");
} else {
- if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
+ if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE2, R_OK) == 0) {
+ loadEffectConfigFile(AUDIO_EFFECT_VENDOR_CONFIG_FILE2);
+ } else if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
loadEffectConfigFile(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
} else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
loadEffectConfigFile(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 14a1a74..f0afd39 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -29,6 +29,7 @@
#include "EffectBundle.h"
#include "math.h"
+#include <media/stagefright/foundation/ADebug.h>
// effect_handle_t interface implementation for bass boost
extern "C" const struct effect_interface_s gLvmEffectInterface;
@@ -226,7 +227,7 @@ extern "C" int EffectCreate(const effect_uuid_t *uuid,
pContext->pBundledContext->bVirtualizerTempDisabled = LVM_FALSE;
pContext->pBundledContext->nOutputDevice = AUDIO_DEVICE_NONE;
pContext->pBundledContext->nVirtualizerForcedDevice = AUDIO_DEVICE_NONE;
- pContext->pBundledContext->NumberEffectsEnabled = 0;
+ pContext->pBundledContext->EffectsBitMap = 0;
pContext->pBundledContext->NumberEffectsCalled = 0;
pContext->pBundledContext->firstVolume = LVM_TRUE;
pContext->pBundledContext->volume = 0;
@@ -366,28 +367,28 @@ extern "C" int EffectRelease(effect_handle_t handle){
ALOGV("\tEffectRelease LVM_BASS_BOOST Clearing global intstantiated flag");
pSessionContext->bBassInstantiated = LVM_FALSE;
if(pContext->pBundledContext->SamplesToExitCountBb > 0){
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_BASS_BOOST);
}
pContext->pBundledContext->SamplesToExitCountBb = 0;
} else if(pContext->EffectType == LVM_VIRTUALIZER) {
ALOGV("\tEffectRelease LVM_VIRTUALIZER Clearing global intstantiated flag");
pSessionContext->bVirtualizerInstantiated = LVM_FALSE;
if(pContext->pBundledContext->SamplesToExitCountVirt > 0){
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_VIRTUALIZER);
}
pContext->pBundledContext->SamplesToExitCountVirt = 0;
} else if(pContext->EffectType == LVM_EQUALIZER) {
ALOGV("\tEffectRelease LVM_EQUALIZER Clearing global intstantiated flag");
pSessionContext->bEqualizerInstantiated =LVM_FALSE;
if(pContext->pBundledContext->SamplesToExitCountEq > 0){
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_EQUALIZER);
}
pContext->pBundledContext->SamplesToExitCountEq = 0;
} else if(pContext->EffectType == LVM_VOLUME) {
ALOGV("\tEffectRelease LVM_VOLUME Clearing global intstantiated flag");
pSessionContext->bVolumeInstantiated = LVM_FALSE;
if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE){
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_VOLUME);
}
} else {
ALOGV("\tLVM_ERROR : EffectRelease : Unsupported effect\n\n\n\n\n\n\n");
@@ -563,6 +564,7 @@ int LvmBundle_init(EffectContext *pContext){
for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
if (MemTab.Region[i].Size != 0){
MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
+ CHECK(MemTab.Region[i].pBaseAddress != NULL);
if (MemTab.Region[i].pBaseAddress == LVM_NULL){
ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32
@@ -729,6 +731,7 @@ int LvmBundle_process(LVM_INT16 *pIn,
}
pContext->pBundledContext->workBuffer =
(LVM_INT16 *)malloc(frameCount * sizeof(LVM_INT16) * 2);
+ CHECK(pContext->pBundledContext->workBuffer != NULL);
pContext->pBundledContext->frameCount = frameCount;
}
pOutTmp = pContext->pBundledContext->workBuffer;
@@ -2753,7 +2756,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
- pContext->pBundledContext->NumberEffectsEnabled++;
+ pContext->pBundledContext->EffectsBitMap |= (1 << LVM_BASS_BOOST);
}
pContext->pBundledContext->SamplesToExitCountBb =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
@@ -2766,7 +2769,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
- pContext->pBundledContext->NumberEffectsEnabled++;
+ pContext->pBundledContext->EffectsBitMap |= (1 << LVM_EQUALIZER);
}
pContext->pBundledContext->SamplesToExitCountEq =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
@@ -2778,7 +2781,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
- pContext->pBundledContext->NumberEffectsEnabled++;
+ pContext->pBundledContext->EffectsBitMap |= (1 << LVM_VIRTUALIZER);
}
pContext->pBundledContext->SamplesToExitCountVirt =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
@@ -2790,7 +2793,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
return -EINVAL;
}
- pContext->pBundledContext->NumberEffectsEnabled++;
+ pContext->pBundledContext->EffectsBitMap |= (1 << LVM_VOLUME);
pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
break;
default:
@@ -2807,6 +2810,9 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
ALOGV("\tEffect_setEnabled() LVM_BASS_BOOST is already disabled");
return -EINVAL;
}
+ if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_BASS_BOOST);
+ }
pContext->pBundledContext->bBassEnabled = LVM_FALSE;
break;
case LVM_EQUALIZER:
@@ -2814,6 +2820,9 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
ALOGV("\tEffect_setEnabled() LVM_EQUALIZER is already disabled");
return -EINVAL;
}
+ if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_EQUALIZER);
+ }
pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
break;
case LVM_VIRTUALIZER:
@@ -2821,6 +2830,9 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
ALOGV("\tEffect_setEnabled() LVM_VIRTUALIZER is already disabled");
return -EINVAL;
}
+ if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_VIRTUALIZER);
+ }
pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
break;
case LVM_VOLUME:
@@ -2828,6 +2840,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
ALOGV("\tEffect_setEnabled() LVM_VOLUME is already disabled");
return -EINVAL;
}
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_VOLUME);
pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
break;
default:
@@ -2877,7 +2890,7 @@ int Effect_process(effect_handle_t self,
LVM_INT16 *out = (LVM_INT16 *)outBuffer->raw;
//ALOGV("\tEffect_process Start : Enabled = %d Called = %d (%8d %8d %8d)",
-//pContext->pBundledContext->NumberEffectsEnabled,pContext->pBundledContext->NumberEffectsCalled,
+//popcount(pContext->pBundledContext->EffectsBitMap), pContext->pBundledContext->NumberEffectsCalled,
// pContext->pBundledContext->SamplesToExitCountBb,
// pContext->pBundledContext->SamplesToExitCountVirt,
// pContext->pBundledContext->SamplesToExitCountEq);
@@ -2911,7 +2924,7 @@ int Effect_process(effect_handle_t self,
}
if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_BASS_BOOST);
ALOGV("\tEffect_process() this is the last frame for LVM_BASS_BOOST");
}
}
@@ -2919,7 +2932,7 @@ int Effect_process(effect_handle_t self,
(pContext->EffectType == LVM_VOLUME)){
//ALOGV("\tEffect_process() LVM_VOLUME Effect is not enabled");
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_VOLUME);
}
if ((pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE)&&
(pContext->EffectType == LVM_EQUALIZER)){
@@ -2931,7 +2944,7 @@ int Effect_process(effect_handle_t self,
}
if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_EQUALIZER);
ALOGV("\tEffect_process() this is the last frame for LVM_EQUALIZER");
}
}
@@ -2945,7 +2958,7 @@ int Effect_process(effect_handle_t self,
}
if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ pContext->pBundledContext->EffectsBitMap &= ~(1 << LVM_VIRTUALIZER);
ALOGV("\tEffect_process() this is the last frame for LVM_VIRTUALIZER");
}
}
@@ -2955,9 +2968,9 @@ int Effect_process(effect_handle_t self,
}
if(pContext->pBundledContext->NumberEffectsCalled ==
- pContext->pBundledContext->NumberEffectsEnabled){
+ popcount(pContext->pBundledContext->EffectsBitMap)){
//ALOGV("\tEffect_process Calling process with %d effects enabled, %d called: Effect %d",
- //pContext->pBundledContext->NumberEffectsEnabled,
+ //popcount(pContext->pBundledContext->EffectsBitMap),
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
if(status == -ENODATA){
@@ -2976,7 +2989,7 @@ int Effect_process(effect_handle_t self,
}
} else {
//ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
- //pContext->pBundledContext->NumberEffectsEnabled,
+ //popcount(pContext->pBundledContext->EffectsBitMap),
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
// 2 is for stereo input
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
@@ -3028,9 +3041,9 @@ int Effect_command(effect_handle_t self,
// called the number of effect called could be greater
// pContext->pBundledContext->NumberEffectsCalled = 0;
- //ALOGV("\tEffect_command NumberEffectsCalled = %d, NumberEffectsEnabled = %d",
- // pContext->pBundledContext->NumberEffectsCalled,
- // pContext->pBundledContext->NumberEffectsEnabled);
+ //ALOGV("\tEffect_command: Enabled = %d Called = %d",
+ // popcount(pContext->pBundledContext->EffectsBitMap),
+ // pContext->pBundledContext->NumberEffectsCalled);
switch (cmdCode){
case EFFECT_CMD_INIT:
@@ -3314,7 +3327,7 @@ int Effect_command(effect_handle_t self,
(device == AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER)){
ALOGV("\tEFFECT_CMD_SET_DEVICE device is invalid for LVM_BASS_BOOST %d",
*(int32_t *)pCmdData);
- ALOGV("\tEFFECT_CMD_SET_DEVICE temporary disable LVM_BAS_BOOST");
+ ALOGV("\tEFFECT_CMD_SET_DEVICE temporary disable LVM_BASS_BOOST");
// If a device doesnt support bassboost the effect must be temporarily disabled
// the effect must still report its original state as this can only be changed
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 9459b87..5fa5668 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -75,8 +75,8 @@ struct BundledEffectContext{
bool bVirtualizerTempDisabled; /* Flag for effect to be re-enabled */
audio_devices_t nOutputDevice; /* Output device for the effect */
audio_devices_t nVirtualizerForcedDevice; /* Forced device virtualization mode*/
- int NumberEffectsEnabled; /* Effects in this session */
int NumberEffectsCalled; /* Effects called so far */
+ uint32_t EffectsBitMap; /* Effects enable bit mask */
bool firstVolume; /* No smoothing on first Vol change */
// Saved parameters for each effect */
// Bass Boost
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 9f836f0..efcd541 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -70,12 +70,21 @@ LOCAL_SRC_FILES:= \
StringArray.cpp \
AudioPolicy.cpp
+
+#QTI Resampler
+ifeq ($(call is-vendor-board-platform,QCOM), true)
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_EXTN_RESAMPLER)), true)
+LOCAL_CFLAGS += -DQTI_RESAMPLER
+endif
+endif
+#QTI Resampler
+
LOCAL_SHARED_LIBRARIES := \
libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
libcamera_client libstagefright_foundation \
libgui libdl libaudioutils libnbaio
-LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper libavmediaextentions
LOCAL_MODULE:= libmedia
@@ -85,6 +94,7 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/frameworks/av/include/media/ \
$(TOP)/frameworks/av/media/libstagefright \
+ $(TOP)/frameworks/av/media/libavextensions \
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 8c8cf45..535f459 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -32,6 +32,7 @@ const char * const AudioParameter::keyChannels = AUDIO_PARAMETER_STREAM_CHANNELS
const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
+const char * const AudioParameter::keyDevShutdown = AUDIO_PARAMETER_KEY_DEV_SHUTDOWN;
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 011b31f..40cad59 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -26,6 +26,7 @@
#include <utils/Log.h>
#include <private/media/AudioTrackShared.h>
#include <media/IAudioFlinger.h>
+#include "SeempLog.h"
#define WAIT_PERIOD_MS 10
@@ -66,7 +67,7 @@ status_t AudioRecord::getMinFrameCount(
// ---------------------------------------------------------------------------
AudioRecord::AudioRecord(const String16 &opPackageName)
- : mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+ : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
@@ -88,7 +89,8 @@ AudioRecord::AudioRecord(
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
- : mStatus(NO_INIT),
+ : mActive(false),
+ mStatus(NO_INIT),
mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -272,10 +274,14 @@ status_t AudioRecord::set(
}
mStatus = NO_ERROR;
- mActive = false;
mUserData = user;
// TODO: add audio hardware input latency here
- mLatency = (1000*mFrameCount) / sampleRate;
+ if (mTransfer == TRANSFER_CALLBACK ||
+ mTransfer == TRANSFER_SYNC) {
+ mLatency = (1000*mNotificationFramesAct) / sampleRate;
+ } else {
+ mLatency = (1000*mFrameCount) / sampleRate;
+ }
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
@@ -293,6 +299,7 @@ status_t AudioRecord::set(
status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
ALOGV("start, sync event %d trigger session %d", event, triggerSession);
+ SEEMPLOG_RECORD(71,"");
AutoMutex lock(mLock);
if (mActive) {
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 9d645f0..2e9fca9 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -37,7 +37,7 @@ sp<IAudioFlinger> AudioSystem::gAudioFlinger;
sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL;
-
+audio_session_callback AudioSystem::gAudioSessionCallback = NULL;
// establish binder interface to AudioFlinger service
const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
@@ -652,6 +652,17 @@ status_t AudioSystem::AudioFlingerClient::removeAudioDeviceCallback(
gDynPolicyCallback = cb;
}
+/*static*/ status_t AudioSystem::setAudioSessionCallback(audio_session_callback cb)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ Mutex::Autolock _l(gLock);
+ gAudioSessionCallback = cb;
+
+ return NO_ERROR;
+}
+
// client singleton for AudioPolicyService binder interface
// protected by gLockAPS
sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
@@ -1223,6 +1234,32 @@ void AudioSystem::AudioPolicyServiceClient::onDynamicPolicyMixStateUpdate(
}
}
+// ---------------------------------------------------------------------------
+
+status_t AudioSystem::listAudioSessions(audio_stream_type_t stream,
+ Vector< sp<AudioSessionInfo>> &sessions)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->listAudioSessions(stream, sessions);
+}
+
+void AudioSystem::AudioPolicyServiceClient::onOutputSessionEffectsUpdate(
+ sp<AudioSessionInfo>& info, bool added)
+{
+ ALOGV("AudioPolicyServiceClient::onOutputSessionEffectsUpdate(%d, %d, %d)",
+ info->mStream, info->mSessionId, added);
+ audio_session_callback cb = NULL;
+ {
+ Mutex::Autolock _l(AudioSystem::gLock);
+ cb = gAudioSessionCallback;
+ }
+
+ if (cb != NULL) {
+ cb(AUDIO_OUTPUT_SESSION_EFFECTS_UPDATE, info, added);
+ }
+}
+
void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
{
{
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index ff5fe1d..ae016ef 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -30,6 +30,8 @@
#include <media/IAudioFlinger.h>
#include <media/AudioPolicyHelper.h>
#include <media/AudioResamplerPublic.h>
+#include "media/AVMediaExtensions.h"
+#include <cutils/properties.h>
#define WAIT_PERIOD_MS 10
#define WAIT_STREAM_END_TIMEOUT_SEC 120
@@ -163,11 +165,13 @@ status_t AudioTrack::getMinFrameCount(
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPlaybackRateSet(false)
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
@@ -193,11 +197,13 @@ AudioTrack::AudioTrack(
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPlaybackRateSet(false)
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
@@ -223,11 +229,13 @@ AudioTrack::AudioTrack(
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPlaybackRateSet(false)
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -473,7 +481,6 @@ status_t AudioTrack::set(
}
mStatus = NO_ERROR;
- mState = STATE_STOPPED;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -541,6 +548,12 @@ status_t AudioTrack::start()
// force refresh of remaining frames by processAudioBuffer() as last
// write before stop could be partial.
mRefreshRemaining = true;
+
+ // for static track, clear the old flags when start from stopped state
+ if (mSharedBuffer != 0)
+ android_atomic_and(
+ ~(CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
+ &mCblk->mFlags);
}
mNewPosition = mPosition + mUpdatePeriod;
int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
@@ -706,7 +719,7 @@ status_t AudioTrack::setVolume(float left, float right)
mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
- if (isOffloaded_l()) {
+ if (isOffloaded_l() && mAudioTrack != NULL) {
mAudioTrack->signal();
}
return NO_ERROR;
@@ -831,13 +844,13 @@ status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
}
// Check resampler ratios are within bounds
- if (effectiveRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
+ if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
playbackRate.mSpeed, playbackRate.mPitch);
return BAD_VALUE;
}
- if (effectiveRate * AUDIO_RESAMPLER_UP_RATIO_MAX < mSampleRate) {
+ if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
playbackRate.mSpeed, playbackRate.mPitch);
return BAD_VALUE;
@@ -846,6 +859,14 @@ status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
//set effective rates
mProxy->setPlaybackRate(playbackRateTemp);
mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
+
+ // fallback out of Direct PCM if setPlaybackRate is called on a track offloaded
+ // session. Do this by setting mPlaybackRateSet to true
+ if (mTrackOffloaded) {
+ mPlaybackRateSet = true;
+ android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ }
+
return NO_ERROR;
}
@@ -1001,10 +1022,18 @@ status_t AudioTrack::getPosition(uint32_t *position)
return NO_ERROR;
}
+ if (AVMediaUtils::get()->AudioTrackIsPcmOffloaded(mFormat) &&
+ AVMediaUtils::get()->AudioTrackGetPosition(this, position) == NO_ERROR) {
+ return NO_ERROR;
+ }
+
if (mOutput != AUDIO_IO_HANDLE_NONE) {
uint32_t halFrames; // actually unused
- (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
- // FIXME: on getRenderPosition() error, we return OK with frame position 0.
+ status_t status = AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
+ if (status != NO_ERROR) {
+ ALOGW("failed to getRenderPosition for offload session status %d", status);
+ return INVALID_OPERATION;
+ }
}
// FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
// due to hardware latency. We leave this behavior for now.
@@ -1129,11 +1158,16 @@ status_t AudioTrack::createTrack_l()
audio_stream_type_t streamType = mStreamType;
audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
- status_t status;
- status = AudioSystem::getOutputForAttr(attr, &output,
+ audio_offload_info_t tOffloadInfo = AUDIO_INFO_INITIALIZER;
+ if (mPlaybackRateSet == true && mOffloadInfo == NULL && mFormat == AUDIO_FORMAT_PCM_16_BIT) {
+ mOffloadInfo = &tOffloadInfo;
+ }
+ status_t status = AudioSystem::getOutputForAttr(attr, &output,
(audio_session_t)mSessionId, &streamType, mClientUid,
mSampleRate, mFormat, mChannelMask,
mFlags, mSelectedDeviceId, mOffloadInfo);
+ //reset offload info if forced
+ mOffloadInfo = (mOffloadInfo == &tOffloadInfo) ? NULL : mOffloadInfo;
if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
@@ -1141,6 +1175,7 @@ status_t AudioTrack::createTrack_l()
mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
return BAD_VALUE;
}
+ mTrackOffloaded = AVMediaUtils::get()->AudioTrackIsTrackOffloaded(output);
{
// Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
// we must release it ourselves if anything goes wrong.
@@ -1203,6 +1238,7 @@ status_t AudioTrack::createTrack_l()
frameCount = mSharedBuffer->size();
} else if (frameCount == 0) {
frameCount = mAfFrameCount;
+ frameCount = AVMediaUtils::get()->AudioTrackGetOffloadFrameCount(frameCount);
}
if (mNotificationFramesAct != frameCount) {
mNotificationFramesAct = frameCount;
@@ -1263,7 +1299,7 @@ status_t AudioTrack::createTrack_l()
trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
}
- if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ if ((mFlags & AUDIO_OUTPUT_FLAG_DIRECT) || mTrackOffloaded) {
trackFlags |= IAudioFlinger::TRACK_DIRECT;
}
@@ -1838,6 +1874,36 @@ nsecs_t AudioTrack::processAudioBuffer()
// get anchor time to account for callbacks.
const nsecs_t timeBeforeCallbacks = systemTime();
+ // perform callbacks while unlocked
+ if (newUnderrun) {
+ mCbf(EVENT_UNDERRUN, mUserData, NULL);
+ }
+ while (loopCountNotifications > 0) {
+ mCbf(EVENT_LOOP_END, mUserData, NULL);
+ --loopCountNotifications;
+ }
+ if (flags & CBLK_BUFFER_END) {
+ mCbf(EVENT_BUFFER_END, mUserData, NULL);
+ }
+ if (markerReached) {
+ mCbf(EVENT_MARKER, mUserData, &markerPosition);
+ }
+ while (newPosCount > 0) {
+ size_t temp = newPosition;
+ mCbf(EVENT_NEW_POS, mUserData, &temp);
+ newPosition += updatePeriod;
+ newPosCount--;
+ }
+
+ if (mObservedSequence != sequence) {
+ mObservedSequence = sequence;
+ mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+ // for offloaded tracks, just wait for the upper layers to recreate the track
+ if (isOffloadedOrDirect()) {
+ return NS_INACTIVE;
+ }
+ }
+
if (waitStreamEnd) {
// FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
// should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
@@ -1852,6 +1918,12 @@ nsecs_t AudioTrack::processAudioBuffer()
case NO_ERROR:
case DEAD_OBJECT:
case TIMED_OUT:
+ if (isOffloaded_l()) {
+ if (mCblk->mFlags & (CBLK_INVALID)){
+ // will trigger EVENT_STREAM_END in next iteration
+ return 0;
+ }
+ }
if (status != DEAD_OBJECT) {
// for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
// instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
@@ -1876,36 +1948,6 @@ nsecs_t AudioTrack::processAudioBuffer()
return 0;
}
- // perform callbacks while unlocked
- if (newUnderrun) {
- mCbf(EVENT_UNDERRUN, mUserData, NULL);
- }
- while (loopCountNotifications > 0) {
- mCbf(EVENT_LOOP_END, mUserData, NULL);
- --loopCountNotifications;
- }
- if (flags & CBLK_BUFFER_END) {
- mCbf(EVENT_BUFFER_END, mUserData, NULL);
- }
- if (markerReached) {
- mCbf(EVENT_MARKER, mUserData, &markerPosition);
- }
- while (newPosCount > 0) {
- size_t temp = newPosition;
- mCbf(EVENT_NEW_POS, mUserData, &temp);
- newPosition += updatePeriod;
- newPosCount--;
- }
-
- if (mObservedSequence != sequence) {
- mObservedSequence = sequence;
- mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
- // for offloaded tracks, just wait for the upper layers to recreate the track
- if (isOffloadedOrDirect()) {
- return NS_INACTIVE;
- }
- }
-
// if inactive, then don't run me again until re-started
if (!active) {
return NS_INACTIVE;
@@ -2161,8 +2203,7 @@ uint32_t AudioTrack::updateAndGetPosition_l()
{
// This is the sole place to read server consumed frames
uint32_t newServer = mProxy->getPosition();
- int32_t delta = newServer - mServer;
- mServer = newServer;
+ uint32_t delta = newServer > mServer ? newServer - mServer : 0;
// TODO There is controversy about whether there can be "negative jitter" in server position.
// This should be investigated further, and if possible, it should be addressed.
// A more definite failure mode is infrequent polling by client.
@@ -2171,11 +2212,12 @@ uint32_t AudioTrack::updateAndGetPosition_l()
// That should ensure delta never goes negative for infrequent polling
// unless the server has more than 2^31 frames in its buffer,
// in which case the use of uint32_t for these counters has bigger issues.
- if (delta < 0) {
- ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
- delta = 0;
+ if (newServer < mServer) {
+ ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d",
+ (int32_t) newServer - mServer);
}
- return mPosition += (uint32_t) delta;
+ mServer = newServer;
+ return mPosition += delta;
}
bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
@@ -2237,14 +2279,22 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
}
}
- // The presented frame count must always lag behind the consumed frame count.
- // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
- status_t status = mAudioTrack->getTimestamp(timestamp);
- if (status != NO_ERROR) {
- ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
- return status;
+ status_t status = UNKNOWN_ERROR;
+ //call Timestamp only if its NOT PCM offloaded and NOT Track Offloaded
+ if (!AVMediaUtils::get()->AudioTrackIsPcmOffloaded(mFormat) && !mTrackOffloaded) {
+ // The presented frame count must always lag behind the consumed frame count.
+ // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
+
+ status = mAudioTrack->getTimestamp(timestamp);
+ if (status != NO_ERROR) {
+ ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
+ return status;
+ }
+
}
- if (isOffloadedOrDirect_l()) {
+
+ if (isOffloadedOrDirect_l() && !AVMediaUtils::get()->AudioTrackIsPcmOffloaded(mFormat)
+ && !mTrackOffloaded) {
if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
// use cached paused position in case another offloaded track is running.
timestamp.mPosition = mPausedPosition;
@@ -2302,6 +2352,11 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
}
} else {
// Update the mapping between local consumed (mPosition) and server consumed (mServer)
+
+ if (AVMediaUtils::get()->AudioTrackGetTimestamp(this, &timestamp) == NO_ERROR) {
+ return NO_ERROR;
+ }
+
(void) updateAndGetPosition_l();
// Server consumed (mServer) and presented both use the same server time base,
// and server consumed is always >= presented.
@@ -2315,9 +2370,9 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
// Convert timestamp position from server time base to client time base.
// TODO The following code should work OK now because timestamp.mPosition is 32-bit.
// But if we change it to 64-bit then this could fail.
- // If (mPosition - mServer) can be negative then should use:
- // (int32_t)(mPosition - mServer)
- timestamp.mPosition += mPosition - mServer;
+ // Split this out instead of using += to prevent unsigned overflow
+ // checks in the outer sum.
+ timestamp.mPosition = timestamp.mPosition + static_cast<int32_t>(mPosition) - mServer;
// Immediately after a call to getPosition_l(), mPosition and
// mServer both represent the same frame position. mPosition is
// in client's point of view, and mServer is in server's point of
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index caa84fb..a6eab12 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -38,7 +38,7 @@ size_t clampToSize(T x) {
// In general, this means (new_self) returned is max(self, other) + 1.
static uint32_t incrementSequence(uint32_t self, uint32_t other) {
- int32_t diff = self - other;
+ int32_t diff = (int32_t) self - (int32_t) other;
if (diff >= 0 && diff < INT32_MAX) {
return self + 1; // we're already ahead of other.
}
@@ -240,6 +240,7 @@ status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *reques
errno = 0;
(void) syscall(__NR_futex, &cblk->mFutex,
mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
+ status_t error = errno; // clock_gettime can affect errno
// update total elapsed time spent waiting
if (measure) {
struct timespec after;
@@ -257,7 +258,7 @@ status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *reques
before = after;
beforeIsValid = true;
}
- switch (errno) {
+ switch (error) {
case 0: // normal wakeup by server, or by binderDied()
case EWOULDBLOCK: // benign race condition with server
case EINTR: // wait was interrupted by signal or other spurious wakeup
@@ -265,7 +266,7 @@ status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *reques
// FIXME these error/non-0 status are being dropped
break;
default:
- status = errno;
+ status = error;
ALOGE("%s unexpected error %s", __func__, strerror(status));
goto end;
}
@@ -893,7 +894,7 @@ ssize_t StaticAudioTrackServerProxy::pollPosition()
if (mObserver.poll(state)) {
StaticAudioTrackState trystate = mState;
bool result;
- const int32_t diffSeq = state.mLoopSequence - state.mPositionSequence;
+ const int32_t diffSeq = (int32_t) state.mLoopSequence - (int32_t) state.mPositionSequence;
if (diffSeq < 0) {
result = updateStateWithLoop(&trystate, state) == OK &&
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 76b5924..abae614 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -74,6 +74,7 @@ enum {
START_AUDIO_SOURCE,
STOP_AUDIO_SOURCE,
SET_AUDIO_PORT_CALLBACK_ENABLED,
+ LIST_AUDIO_SESSIONS,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -767,6 +768,30 @@ public:
status = (status_t)reply.readInt32();
return status;
}
+
+ virtual status_t listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(streams);
+ status_t status = remote()->transact(LIST_AUDIO_SESSIONS, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ status = reply.readInt32();
+ if (status == NO_ERROR) {
+ size_t size = (size_t)reply.readUint32();
+ for (size_t i = 0; i < size && reply.dataAvail() > 0; i++) {
+ sp<AudioSessionInfo> info = new AudioSessionInfo();
+ info->readFromParcel(reply);
+ sessions.push_back(info);
+ }
+ }
+ return status;
+ }
+
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1238,6 +1263,23 @@ status_t BnAudioPolicyService::onTransact(
return NO_ERROR;
} break;
+ case LIST_AUDIO_SESSIONS: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_stream_type_t streams = (audio_stream_type_t)data.readInt32();
+
+ Vector< sp<AudioSessionInfo>> sessions;
+ status_t status = listAudioSessions(streams, sessions);
+
+ reply->writeInt32(status);
+ if (status == NO_ERROR) {
+ reply->writeUint32(static_cast<uint32_t>(sessions.size()));
+ for (size_t i = 0; i < sessions.size(); i++) {
+ sessions[i]->writeToParcel(reply);
+ }
+ }
+ return NO_ERROR;
+ }
+
case ACQUIRE_SOUNDTRIGGER_SESSION: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
diff --git a/media/libmedia/IAudioPolicyServiceClient.cpp b/media/libmedia/IAudioPolicyServiceClient.cpp
index 65cc7d6..a325996 100644
--- a/media/libmedia/IAudioPolicyServiceClient.cpp
+++ b/media/libmedia/IAudioPolicyServiceClient.cpp
@@ -30,7 +30,8 @@ namespace android {
enum {
PORT_LIST_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
PATCH_LIST_UPDATE,
- MIX_STATE_UPDATE
+ MIX_STATE_UPDATE,
+ OUTPUT_SESSION_EFFECTS_UPDATE
};
class BpAudioPolicyServiceClient : public BpInterface<IAudioPolicyServiceClient>
@@ -63,6 +64,19 @@ public:
data.writeInt32(state);
remote()->transact(MIX_STATE_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
}
+
+ void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
+ data.writeInt32(info->mStream);
+ data.writeInt32(info->mSessionId);
+ data.writeInt32(info->mFlags);
+ data.writeInt32(info->mChannelMask);
+ data.writeInt32(info->mUid);
+ data.writeInt32(added ? 1 : 0);
+ remote()->transact(OUTPUT_SESSION_EFFECTS_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
+ }
};
IMPLEMENT_META_INTERFACE(AudioPolicyServiceClient, "android.media.IAudioPolicyServiceClient");
@@ -90,6 +104,20 @@ status_t BnAudioPolicyServiceClient::onTransact(
onDynamicPolicyMixStateUpdate(regId, state);
return NO_ERROR;
}
+ case OUTPUT_SESSION_EFFECTS_UPDATE: {
+ CHECK_INTERFACE(IAudioPolicyServiceClient, data, reply);
+ audio_stream_type_t stream = static_cast<audio_stream_type_t>(data.readInt32());
+ audio_session_t sessionId = static_cast<audio_session_t>(data.readInt32());
+ audio_output_flags_t flags = static_cast<audio_output_flags_t>(data.readInt32());
+ audio_channel_mask_t channelMask = static_cast<audio_channel_mask_t>(data.readInt32());
+ uid_t uid = static_cast<uid_t>(data.readInt32());
+ bool added = data.readInt32() > 0;
+
+ sp<AudioSessionInfo> info = new AudioSessionInfo(
+ sessionId, stream, flags, channelMask, uid);
+ onOutputSessionEffectsUpdate(info, added);
+ return NO_ERROR;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 22f8af7..bc696ca 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -24,6 +24,7 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
+#include <media/AVMediaExtensions.h>
namespace android {
@@ -136,6 +137,7 @@ struct BpCrypto : public BpInterface<ICrypto> {
if (secure) {
data.writeInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(dstPtr)));
+ AVMediaUtils::get()->writeCustomData(&data, dstPtr);
}
remote()->transact(DECRYPT, data, &reply);
@@ -235,6 +237,7 @@ status_t BnCrypto::onTransact(
if (opaqueSize > 0) {
opaqueData = malloc(opaqueSize);
+ CHECK(opaqueData != NULL);
data.read(opaqueData, opaqueSize);
}
@@ -296,6 +299,7 @@ status_t BnCrypto::onTransact(
void *secureBufferId, *dstPtr;
if (secure) {
secureBufferId = reinterpret_cast<void *>(static_cast<uintptr_t>(data.readInt64()));
+ AVMediaUtils::get()->readCustomData(&data, &secureBufferId);
} else {
dstPtr = calloc(1, totalSize);
}
@@ -350,6 +354,8 @@ status_t BnCrypto::onTransact(
}
free(dstPtr);
dstPtr = NULL;
+ } else {
+ AVMediaUtils::get()->closeFileDescriptor(secureBufferId);
}
delete[] subSamples;
diff --git a/media/libmedia/IEffectClient.cpp b/media/libmedia/IEffectClient.cpp
index 1322e72..531f767 100644
--- a/media/libmedia/IEffectClient.cpp
+++ b/media/libmedia/IEffectClient.cpp
@@ -22,6 +22,8 @@
#include <sys/types.h>
#include <media/IEffectClient.h>
+#include <media/stagefright/foundation/ADebug.h>
+
namespace android {
enum {
@@ -117,12 +119,14 @@ status_t BnEffectClient::onTransact(
char *cmd = NULL;
if (cmdSize) {
cmd = (char *)malloc(cmdSize);
+ CHECK(cmd != NULL);
data.read(cmd, cmdSize);
}
uint32_t replySize = data.readInt32();
char *resp = NULL;
if (replySize) {
resp = (char *)malloc(replySize);
+ CHECK(resp != NULL);
data.read(resp, replySize);
}
commandExecuted(cmdCode, cmdSize, cmd, replySize, resp);
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index 0dda0be..23fa084 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -124,6 +124,14 @@ struct BpMediaHTTPConnection : public BpInterface<IMediaHTTPConnection> {
ALOGE("got %zu, but memory has %zu", len, mMemory->size());
return ERROR_OUT_OF_RANGE;
}
+ if(buffer == NULL) {
+ ALOGE("readAt got a NULL buffer");
+ return UNKNOWN_ERROR;
+ }
+ if (mMemory->pointer() == NULL) {
+ ALOGE("readAt got a NULL mMemory->pointer()");
+ return UNKNOWN_ERROR;
+ }
memcpy(buffer, mMemory->pointer(), len);
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 9765f0d..dbf524e 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -240,7 +240,7 @@ status_t BnMediaMetadataRetriever::onTransact(
} break;
case SET_DATA_SOURCE_FD: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
- int fd = dup(data.readFileDescriptor());
+ int fd = data.readFileDescriptor();
int64_t offset = data.readInt64();
int64_t length = data.readInt64();
reply->writeInt32(setDataSource(fd, offset, length));
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 942aec3..bad84b7 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -67,6 +67,8 @@ enum {
SET_RETRANSMIT_ENDPOINT,
GET_RETRANSMIT_ENDPOINT,
SET_NEXT_PLAYER,
+ SUSPEND,
+ RESUME,
};
class BpMediaPlayer: public BpInterface<IMediaPlayer>
@@ -419,6 +421,22 @@ public:
return err;
}
+
+ status_t suspend()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ remote()->transact(SUSPEND, data, &reply);
+ return reply.readInt32();
+ }
+
+ status_t resume()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ remote()->transact(RESUME, data, &reply);
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -682,6 +700,18 @@ status_t BnMediaPlayer::onTransact(
return NO_ERROR;
} break;
+ case SUSPEND: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ status_t ret = suspend();
+ reply->writeInt32(ret);
+ return NO_ERROR;
+ } break;
+ case RESUME: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ status_t ret = resume();
+ reply->writeInt32(ret);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index ee3b584..4711c1b 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -39,6 +39,7 @@ enum {
QUERY_SURFACE_MEDIASOURCE,
RESET,
STOP,
+ PAUSE,
START,
PREPARE,
GET_MAX_AMPLITUDE,
@@ -258,6 +259,15 @@ public:
return reply.readInt32();
}
+ status_t pause()
+ {
+ ALOGV("pause");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ remote()->transact(PAUSE, data, &reply);
+ return reply.readInt32();
+ }
+
status_t stop()
{
ALOGV("stop");
@@ -334,6 +344,12 @@ status_t BnMediaRecorder::onTransact(
reply->writeInt32(stop());
return NO_ERROR;
} break;
+ case PAUSE: {
+ ALOGV("PAUSE");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ reply->writeInt32(pause());
+ return NO_ERROR;
+ } break;
case START: {
ALOGV("START");
CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index 365d9ac..7e951c9 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -24,6 +24,7 @@
#include <binder/Parcel.h>
#include <media/IOMX.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/AVMediaExtensions.h>
#include <media/openmax/OMX_IndexExt.h>
namespace android {
@@ -475,6 +476,7 @@ public:
*buffer = (buffer_id)reply.readInt32();
*buffer_data = (void *)reply.readInt64();
+ AVMediaUtils::get()->readCustomData(&reply, buffer_data);
return err;
}
@@ -1045,6 +1047,7 @@ status_t BnOMX::onTransact(
if (err == OK) {
reply->writeInt32((int32_t)buffer);
reply->writeInt64((uintptr_t)buffer_data);
+ AVMediaUtils::get()->writeCustomData(reply, buffer_data);
}
return NO_ERROR;
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index c5790fb..52da7ed 100644..100755
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -1,4 +1,6 @@
/*
+** Copyright (c) 2014, The Linux Foundation. All rights reserved.
+** Not a Contribution.
**
** Copyright 2010, The Android Open Source Project
**
@@ -37,7 +39,8 @@ MediaProfiles *MediaProfiles::sInstance = NULL;
const MediaProfiles::NameToTagMap MediaProfiles::sVideoEncoderNameMap[] = {
{"h263", VIDEO_ENCODER_H263},
{"h264", VIDEO_ENCODER_H264},
- {"m4v", VIDEO_ENCODER_MPEG_4_SP}
+ {"m4v", VIDEO_ENCODER_MPEG_4_SP},
+ {"h265", VIDEO_ENCODER_H265}
};
const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
@@ -45,7 +48,8 @@ const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
{"amrwb", AUDIO_ENCODER_AMR_WB},
{"aac", AUDIO_ENCODER_AAC},
{"heaac", AUDIO_ENCODER_HE_AAC},
- {"aaceld", AUDIO_ENCODER_AAC_ELD}
+ {"aaceld", AUDIO_ENCODER_AAC_ELD},
+ {"lpcm", AUDIO_ENCODER_LPCM},
};
const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
@@ -88,6 +92,19 @@ const MediaProfiles::NameToTagMap MediaProfiles::sCamcorderQualityNameMap[] = {
{"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
{"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
{"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P},
+
+ // Vendor-specific profiles
+ {"vga", CAMCORDER_QUALITY_VGA},
+ {"4kdci", CAMCORDER_QUALITY_4KDCI},
+ {"timelapsevga", CAMCORDER_QUALITY_TIME_LAPSE_VGA},
+ {"timelapse4kdci", CAMCORDER_QUALITY_TIME_LAPSE_4KDCI},
+ {"highspeedcif", CAMCORDER_QUALITY_HIGH_SPEED_CIF},
+ {"highspeedvga", CAMCORDER_QUALITY_HIGH_SPEED_VGA},
+ {"highspeed4kdci", CAMCORDER_QUALITY_HIGH_SPEED_4KDCI},
+ {"qhd", CAMCORDER_QUALITY_QHD},
+ {"2k", CAMCORDER_QUALITY_2k},
+ {"timelapseqhd", CAMCORDER_QUALITY_TIME_LAPSE_QHD},
+ {"timelapse2k", CAMCORDER_QUALITY_TIME_LAPSE_2k},
};
#if LOG_NDEBUG
@@ -126,6 +143,8 @@ MediaProfiles::logVideoEncoderCap(const MediaProfiles::VideoEncoderCap& cap UNUS
ALOGV("frame width: min = %d and max = %d", cap.mMinFrameWidth, cap.mMaxFrameWidth);
ALOGV("frame height: min = %d and max = %d", cap.mMinFrameHeight, cap.mMaxFrameHeight);
ALOGV("frame rate: min = %d and max = %d", cap.mMinFrameRate, cap.mMaxFrameRate);
+ ALOGV("max HFR width: = %d max HFR height: = %d", cap.mMaxHFRFrameWidth, cap.mMaxHFRFrameHeight);
+ ALOGV("max HFR mode: = %d", cap.mMaxHFRMode);
}
/*static*/ void
@@ -261,10 +280,23 @@ MediaProfiles::createVideoEncoderCap(const char **atts)
const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]);
CHECK(codec != -1);
+ int maxHFRWidth = 0, maxHFRHeight = 0, maxHFRMode = 0;
+ // Check if there are enough (start through end) attributes in the
+ // 0-terminated list, to include our additional HFR params. Then check
+ // if each of those match the expected names.
+ if (atts[20] && atts[21] && !strcmp("maxHFRFrameWidth", atts[20]) &&
+ atts[22] && atts[23] && !strcmp("maxHFRFrameHeight", atts[22]) &&
+ atts[24] && atts[25] && !strcmp("maxHFRMode", atts[24])) {
+ maxHFRWidth = atoi(atts[21]);
+ maxHFRHeight = atoi(atts[23]);
+ maxHFRMode = atoi(atts[25]);
+ }
+
MediaProfiles::VideoEncoderCap *cap =
new MediaProfiles::VideoEncoderCap(static_cast<video_encoder>(codec),
atoi(atts[5]), atoi(atts[7]), atoi(atts[9]), atoi(atts[11]), atoi(atts[13]),
- atoi(atts[15]), atoi(atts[17]), atoi(atts[19]));
+ atoi(atts[15]), atoi(atts[17]), atoi(atts[19]),
+ maxHFRWidth, maxHFRHeight, maxHFRMode);
logVideoEncoderCap(*cap);
return cap;
}
@@ -423,8 +455,10 @@ MediaProfiles::startElementHandler(void *userData, const char *name, const char
}
static bool isCamcorderProfile(camcorder_quality quality) {
- return quality >= CAMCORDER_QUALITY_LIST_START &&
- quality <= CAMCORDER_QUALITY_LIST_END;
+ return (quality >= CAMCORDER_QUALITY_LIST_START &&
+ quality <= CAMCORDER_QUALITY_LIST_END) ||
+ (quality >= CAMCORDER_QUALITY_VENDOR_START &&
+ quality <= CAMCORDER_QUALITY_VENDOR_END);
}
static bool isTimelapseProfile(camcorder_quality quality) {
@@ -616,14 +650,14 @@ MediaProfiles::getInstance()
MediaProfiles::createDefaultH263VideoEncoderCap()
{
return new MediaProfiles::VideoEncoderCap(
- VIDEO_ENCODER_H263, 192000, 420000, 176, 352, 144, 288, 1, 20);
+ VIDEO_ENCODER_H263, 192000, 420000, 176, 352, 144, 288, 1, 20, 0, 0, 0);
}
/*static*/ MediaProfiles::VideoEncoderCap*
MediaProfiles::createDefaultM4vVideoEncoderCap()
{
return new MediaProfiles::VideoEncoderCap(
- VIDEO_ENCODER_MPEG_4_SP, 192000, 420000, 176, 352, 144, 288, 1, 20);
+ VIDEO_ENCODER_MPEG_4_SP, 192000, 420000, 176, 352, 144, 288, 1, 20, 0, 0, 0);
}
@@ -778,6 +812,8 @@ MediaProfiles::createDefaultCamcorderProfiles(MediaProfiles *profiles)
MediaProfiles::createDefaultAudioEncoders(MediaProfiles *profiles)
{
profiles->mAudioEncoders.add(createDefaultAmrNBEncoderCap());
+ profiles->mAudioEncoders.add(createDefaultAacEncoderCap());
+ profiles->mAudioEncoders.add(createDefaultLpcmEncoderCap());
}
/*static*/ void
@@ -812,6 +848,20 @@ MediaProfiles::createDefaultAmrNBEncoderCap()
AUDIO_ENCODER_AMR_NB, 5525, 12200, 8000, 8000, 1, 1);
}
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultAacEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_AAC, 64000, 156000, 8000, 48000, 1, 2);
+}
+
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultLpcmEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_LPCM, 768000, 4608000, 8000, 48000, 1, 6);
+}
+
/*static*/ void
MediaProfiles::createDefaultImageEncodingQualityLevels(MediaProfiles *profiles)
{
@@ -927,6 +977,9 @@ int MediaProfiles::getVideoEncoderParamByName(const char *name, video_encoder co
if (!strcmp("enc.vid.bps.max", name)) return mVideoEncoders[index]->mMaxBitRate;
if (!strcmp("enc.vid.fps.min", name)) return mVideoEncoders[index]->mMinFrameRate;
if (!strcmp("enc.vid.fps.max", name)) return mVideoEncoders[index]->mMaxFrameRate;
+ if (!strcmp("enc.vid.hfr.width.max", name)) return mVideoEncoders[index]->mMaxHFRFrameWidth;
+ if (!strcmp("enc.vid.hfr.height.max", name)) return mVideoEncoders[index]->mMaxHFRFrameHeight;
+ if (!strcmp("enc.vid.hfr.mode.max", name)) return mVideoEncoders[index]->mMaxHFRMode;
ALOGE("The given video encoder param name %s is not found", name);
return -1;
diff --git a/media/libmedia/MediaScanner.cpp b/media/libmedia/MediaScanner.cpp
index dcbb769..dac0a9e 100644
--- a/media/libmedia/MediaScanner.cpp
+++ b/media/libmedia/MediaScanner.cpp
@@ -24,6 +24,8 @@
#include <sys/stat.h>
#include <dirent.h>
+#include <media/stagefright/foundation/ADebug.h>
+
namespace android {
MediaScanner::MediaScanner()
@@ -240,6 +242,7 @@ MediaScanResult MediaScanner::doProcessDirectoryEntry(
MediaAlbumArt *MediaAlbumArt::clone() {
size_t byte_size = this->size() + sizeof(MediaAlbumArt);
MediaAlbumArt *result = reinterpret_cast<MediaAlbumArt *>(malloc(byte_size));
+ CHECK(result != NULL);
result->mSize = this->size();
memcpy(&result->mData[0], &this->mData[0], this->size());
return result;
@@ -253,6 +256,7 @@ void MediaAlbumArt::init(MediaAlbumArt *instance, int32_t dataSize, const void *
MediaAlbumArt *MediaAlbumArt::fromData(int32_t dataSize, const void* data) {
size_t byte_size = sizeof(MediaAlbumArt) + dataSize;
MediaAlbumArt *result = reinterpret_cast<MediaAlbumArt *>(malloc(byte_size));
+ CHECK(result != NULL);
init(result, dataSize, data);
return result;
}
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 6da5348..af75e0f 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -698,7 +698,11 @@ const ToneGenerator::ToneDescriptor ToneGenerator::sToneDescriptors[] = {
{ .segments = { { .duration = 0, .waveFreq = { 0 }, 0, 0 }},
.repeatCnt = 0,
.repeatSegment = 0 }, // TONE_CDMA_SIGNAL_OFF
-
+ { .segments = { { .duration = 15000, .waveFreq = { 0 }, 0, 0 },
+ { .duration = 500, .waveFreq = { 450, 0 }, 0, 0 },
+ { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
+ .repeatCnt = ToneGenerator::TONEGEN_INF,
+ .repeatSegment = 0 }, // TONE_HOLD_RECALL
{ .segments = { { .duration = ToneGenerator::TONEGEN_INF, .waveFreq = { 350, 440, 0 }, 0, 0 },
{ .duration = 0 , .waveFreq = { 0 }, 0, 0}},
.repeatCnt = ToneGenerator::TONEGEN_INF,
@@ -804,6 +808,12 @@ ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool
ALOGE("Unable to marshal AudioFlinger");
return;
}
+
+ if (mSamplingRate > 48000) {
+ ALOGW("mSamplingRate %d . limit to 48k", mSamplingRate);
+ mSamplingRate = 48000;
+ }
+
mThreadCanCallJava = threadCanCallJava;
mStreamType = streamType;
mVolume = volume;
@@ -1046,7 +1056,7 @@ bool ToneGenerator::initAudioTrack() {
ALOGV("Create Track: %p", mpAudioTrack.get());
mpAudioTrack->set(mStreamType,
- 0, // sampleRate
+ mSamplingRate,
AUDIO_FORMAT_PCM_16_BIT,
AUDIO_CHANNEL_OUT_MONO,
0, // frameCount
@@ -1580,7 +1590,8 @@ void ToneGenerator::WaveGenerator::getSamples(short *outBuffer,
}
long dec = lAmplitude/count;
// loop generation
- while (count--) {
+ while (count) {
+ count--;
Sample = ((lA1 * lS1) >> S_Q14) - lS2;
// shift delay
lS2 = lS1;
@@ -1591,7 +1602,8 @@ void ToneGenerator::WaveGenerator::getSamples(short *outBuffer,
}
} else {
// loop generation
- while (count--) {
+ while (count) {
+ count--;
Sample = ((lA1 * lS1) >> S_Q14) - lS2;
// shift delay
lS2 = lS1;
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index f5c1b1f..8d83c1b 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -94,6 +94,14 @@ status_t Visualizer::setEnabled(bool enabled)
return status;
}
+void Visualizer::cancelCaptureCallBack()
+{
+ sp<CaptureThread> t = mCaptureThread;
+ if (t != 0) {
+ t->requestExitAndWait();
+ }
+}
+
status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
uint32_t rate, bool force)
{
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index dcce3cc..1057fc0 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -333,6 +333,9 @@ status_t MediaPlayer::start()
ALOGV("playback completed immediately following start()");
}
}
+ } else if ( (mPlayer != 0) && (mCurrentState & MEDIA_PLAYER_SUSPENDED) ) {
+ ALOGV("start while suspended, so ignore this start");
+ ret = NO_ERROR;
} else {
ALOGE("start called in state %d", mCurrentState);
ret = INVALID_OPERATION;
@@ -395,6 +398,10 @@ bool MediaPlayer::isPlaying()
ALOGE("internal/external state mismatch corrected");
mCurrentState = MEDIA_PLAYER_STARTED;
}
+ if ((mCurrentState & MEDIA_PLAYER_PLAYBACK_COMPLETE) && temp) {
+ ALOGE("internal/external state mismatch corrected");
+ mCurrentState = MEDIA_PLAYER_STARTED;
+ }
return temp;
}
ALOGV("isPlaying: no active player");
@@ -484,7 +491,8 @@ status_t MediaPlayer::getDuration_l(int *msec)
{
ALOGV("getDuration_l");
bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED |
- MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE));
+ MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE |
+ MEDIA_PLAYER_SUSPENDED));
if (mPlayer != 0 && isValidState) {
int durationMs;
status_t ret = mPlayer->getDuration(&durationMs);
@@ -514,7 +522,7 @@ status_t MediaPlayer::seekTo_l(int msec)
{
ALOGV("seekTo %d", msec);
if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
- MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
+ MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_SUSPENDED) ) ) {
if ( msec < 0 ) {
ALOGW("Attempt to seek to invalid position: %d", msec);
msec = 0;
@@ -935,4 +943,54 @@ status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) {
return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
}
-} // namespace android
+status_t MediaPlayer::suspend() {
+ ALOGV("MediaPlayer::suspend()");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGE("mPlayer = NULL");
+ return NO_INIT;
+ }
+
+ bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_SUSPENDED));
+
+ if (!isValidState) {
+ ALOGE("suspend while in a invalid state = %d", mCurrentState);
+ return UNKNOWN_ERROR;
+ }
+
+ status_t ret = mPlayer->suspend();
+
+ if (OK != ret) {
+ ALOGE("MediaPlayer::suspend() return with error ret = %d", ret);
+ return ret;
+ }
+ mCurrentState = MEDIA_PLAYER_SUSPENDED;
+ return OK;
+}
+
+status_t MediaPlayer::resume() {
+ ALOGV("MediaPlayer::resume()");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGE("mPlayer == NULL");
+ return NO_INIT;
+ }
+
+ bool isValidState = (mCurrentState == MEDIA_PLAYER_SUSPENDED);
+
+ if (!isValidState) {
+ ALOGE("resume while in a invalid state = %d", mCurrentState);
+ return UNKNOWN_ERROR;
+ }
+
+ status_t ret = mPlayer->resume();
+
+ if (OK != ret) {
+ ALOGE("MediaPlayer::resume() return with error ret = %d", ret);
+ return ret;
+ }
+ mCurrentState = MEDIA_PLAYER_PREPARED;
+ return OK;
+}
+
+}; // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 0eb7de5..144654c 100644..100755
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -482,7 +482,7 @@ status_t MediaRecorder::start()
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
- if (!(mCurrentState & MEDIA_RECORDER_PREPARED)) {
+ if (!(mCurrentState & (MEDIA_RECORDER_PREPARED | MEDIA_RECORDER_PAUSED))) {
ALOGE("start called in an invalid state: %d", mCurrentState);
return INVALID_OPERATION;
}
@@ -497,6 +497,29 @@ status_t MediaRecorder::start()
return ret;
}
+status_t MediaRecorder::pause()
+{
+ ALOGV("pause");
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ if (!(mCurrentState & MEDIA_RECORDER_RECORDING)) {
+ ALOGE("pause called in an invalid state: %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ status_t ret = mMediaRecorder->pause();
+ if (OK != ret) {
+ ALOGE("pause failed: %d", ret);
+ mCurrentState = MEDIA_RECORDER_ERROR;
+ return ret;
+ }
+
+ mCurrentState = MEDIA_RECORDER_PAUSED;
+ return ret;
+}
+
status_t MediaRecorder::stop()
{
ALOGV("stop");
@@ -504,7 +527,7 @@ status_t MediaRecorder::stop()
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
- if (!(mCurrentState & MEDIA_RECORDER_RECORDING)) {
+ if (!(mCurrentState & (MEDIA_RECORDER_RECORDING | MEDIA_RECORDER_PAUSED))) {
ALOGE("stop called in an invalid state: %d", mCurrentState);
return INVALID_OPERATION;
}
@@ -540,6 +563,7 @@ status_t MediaRecorder::reset()
ret = OK;
break;
+ case MEDIA_RECORDER_PAUSED:
case MEDIA_RECORDER_RECORDING:
case MEDIA_RECORDER_DATASOURCE_CONFIGURED:
case MEDIA_RECORDER_PREPARED:
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 4d1b587..6575625 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -18,7 +18,6 @@ LOCAL_SRC_FILES:= \
MetadataRetrieverClient.cpp \
RemoteDisplay.cpp \
SharedLibrary.cpp \
- StagefrightPlayer.cpp \
StagefrightRecorder.cpp \
TestPlayerStub.cpp \
@@ -41,11 +40,15 @@ LOCAL_SHARED_LIBRARIES := \
libstagefright_wfd \
libutils \
libvorbisidec \
+ libaudioutils \
LOCAL_STATIC_LIBRARIES := \
libstagefright_nuplayer \
libstagefright_rtsp \
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libavmediaserviceextensions \
+
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/include \
$(TOP)/frameworks/av/media/libstagefright/rtsp \
@@ -53,13 +56,18 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/webm \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/tremolo/Tremolo \
+ $(TOP)/frameworks/av/media/libavextensions \
-LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall #-DLOG_NDEBUG=0
LOCAL_CLANG := true
LOCAL_MODULE:= libmediaplayerservice
-LOCAL_32_BIT_ONLY := true
+#LOCAL_32_BIT_ONLY := true
+
+ifeq ($(TARGET_BOARD_PLATFORM),msm8974)
+ LOCAL_CFLAGS += -DTARGET_8974
+endif
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index d5d12f7..f0afc5a 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -31,8 +31,8 @@
#include "MediaPlayerFactory.h"
#include "TestPlayerStub.h"
-#include "StagefrightPlayer.h"
#include "nuplayer/NuPlayerDriver.h"
+#include <mediaplayerservice/AVMediaServiceExtensions.h>
namespace android {
@@ -64,12 +64,6 @@ status_t MediaPlayerFactory::registerFactory_l(IFactory* factory,
}
static player_type getDefaultPlayerType() {
- char value[PROPERTY_VALUE_MAX];
- if (property_get("media.stagefright.use-awesome", value, NULL)
- && (!strcmp("1", value) || !strcasecmp("true", value))) {
- return STAGEFRIGHT_PLAYER;
- }
-
return NU_PLAYER;
}
@@ -87,7 +81,7 @@ void MediaPlayerFactory::unregisterFactory(player_type type) {
#define GET_PLAYER_TYPE_IMPL(a...) \
Mutex::Autolock lock_(&sLock); \
\
- player_type ret = STAGEFRIGHT_PLAYER; \
+ player_type ret = NU_PLAYER; \
float bestScore = 0.0; \
\
for (size_t i = 0; i < sFactoryMap.size(); ++i) { \
@@ -176,63 +170,6 @@ sp<MediaPlayerBase> MediaPlayerFactory::createPlayer(
* *
*****************************************************************************/
-class StagefrightPlayerFactory :
- public MediaPlayerFactory::IFactory {
- public:
- virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
- int fd,
- int64_t offset,
- int64_t length,
- float /*curScore*/) {
- if (legacyDrm()) {
- sp<DataSource> source = new FileSource(dup(fd), offset, length);
- String8 mimeType;
- float confidence;
- if (SniffWVM(source, &mimeType, &confidence, NULL /* format */)) {
- return 1.0;
- }
- }
-
- if (getDefaultPlayerType() == STAGEFRIGHT_PLAYER) {
- char buf[20];
- lseek(fd, offset, SEEK_SET);
- read(fd, buf, sizeof(buf));
- lseek(fd, offset, SEEK_SET);
-
- uint32_t ident = *((uint32_t*)buf);
-
- // Ogg vorbis?
- if (ident == 0x5367674f) // 'OggS'
- return 1.0;
- }
-
- return 0.0;
- }
-
- virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
- const char* url,
- float /*curScore*/) {
- if (legacyDrm() && !strncasecmp("widevine://", url, 11)) {
- return 1.0;
- }
- return 0.0;
- }
-
- virtual sp<MediaPlayerBase> createPlayer(pid_t /* pid */) {
- ALOGV(" create StagefrightPlayer");
- return new StagefrightPlayer();
- }
- private:
- bool legacyDrm() {
- char value[PROPERTY_VALUE_MAX];
- if (property_get("persist.sys.media.legacy-drm", value, NULL)
- && (!strcmp("1", value) || !strcasecmp("true", value))) {
- return true;
- }
- return false;
- }
-};
-
class NuPlayerFactory : public MediaPlayerFactory::IFactory {
public:
virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
@@ -305,14 +242,20 @@ class TestPlayerFactory : public MediaPlayerFactory::IFactory {
};
void MediaPlayerFactory::registerBuiltinFactories() {
+
+ MediaPlayerFactory::IFactory* pCustomFactory = NULL;
Mutex::Autolock lock_(&sLock);
if (sInitComplete)
return;
- registerFactory_l(new StagefrightPlayerFactory(), STAGEFRIGHT_PLAYER);
registerFactory_l(new NuPlayerFactory(), NU_PLAYER);
registerFactory_l(new TestPlayerFactory(), TEST_PLAYER);
+ AVMediaServiceUtils::get()->getDashPlayerFactory(pCustomFactory, DASH_PLAYER);
+ if(pCustomFactory != NULL) {
+ ALOGV("Registering DASH_PLAYER");
+ registerFactory_l(pCustomFactory, DASH_PLAYER);
+ }
sInitComplete = true;
}
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index f802de1..61afe99 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -21,6 +21,7 @@
#define LOG_TAG "MediaPlayerService"
#include <utils/Log.h>
+#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
@@ -73,7 +74,6 @@
#include "MediaPlayerFactory.h"
#include "TestPlayerStub.h"
-#include "StagefrightPlayer.h"
#include "nuplayer/NuPlayerDriver.h"
#include <OMX.h>
@@ -148,7 +148,7 @@ bool unmarshallFilter(const Parcel& p,
if (p.dataAvail() < size)
{
- ALOGE("Filter too short expected %d but got %d", size, p.dataAvail());
+ ALOGE("Filter too short expected %zu but got %zu", size, p.dataAvail());
*status = NOT_ENOUGH_DATA;
return false;
}
@@ -740,7 +740,7 @@ status_t MediaPlayerService::Client::setDataSource(
status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64_t length)
{
- ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
+ ALOGV("setDataSource fd=%d, offset=%" PRId64 ", length=%" PRId64 "", fd, offset, length);
struct stat sb;
int ret = fstat(fd, &sb);
if (ret != 0) {
@@ -748,20 +748,19 @@ status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64
return UNKNOWN_ERROR;
}
- ALOGV("st_dev = %llu", static_cast<uint64_t>(sb.st_dev));
+ ALOGV("st_dev = %" PRIu64 "", static_cast<uint64_t>(sb.st_dev));
ALOGV("st_mode = %u", sb.st_mode);
ALOGV("st_uid = %lu", static_cast<unsigned long>(sb.st_uid));
ALOGV("st_gid = %lu", static_cast<unsigned long>(sb.st_gid));
- ALOGV("st_size = %llu", sb.st_size);
+ ALOGV("st_size = %" PRId64 "", sb.st_size);
if (offset >= sb.st_size) {
ALOGE("offset error");
- ::close(fd);
return UNKNOWN_ERROR;
}
if (offset + length > sb.st_size) {
length = sb.st_size - offset;
- ALOGV("calculated length = %lld", length);
+ ALOGV("calculated length = %" PRId64 "", length);
}
player_type playerType = MediaPlayerFactory::getPlayerType(this,
@@ -1260,8 +1259,17 @@ void MediaPlayerService::Client::notify(
if (msg == MEDIA_PLAYBACK_COMPLETE && client->mNextClient != NULL) {
if (client->mAudioOutput != NULL)
client->mAudioOutput->switchToNextOutput();
- client->mNextClient->start();
- client->mNextClient->mClient->notify(MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+ ALOGD("gapless:current track played back");
+ ALOGD("gapless:try to do a gapless switch to next track");
+ status_t ret;
+ ret = client->mNextClient->start();
+ if (ret == NO_ERROR) {
+ client->mNextClient->mClient->notify(MEDIA_INFO,
+ MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+ } else {
+ client->mClient->notify(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN , 0, obj);
+ ALOGW("gapless:start playback for next track failed");
+ }
}
}
@@ -1308,6 +1316,22 @@ void MediaPlayerService::Client::addNewMetadataUpdate(media::Metadata::Type meta
}
}
+status_t MediaPlayerService::Client::suspend()
+{
+ ALOGV("[%d] suspend", mConnId);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == NULL) return NO_INIT;
+ return p->suspend();
+}
+
+status_t MediaPlayerService::Client::resume()
+{
+ ALOGV("[%d] resume", mConnId);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == NULL) return NO_INIT;
+ return p->resume();
+}
+
#if CALLBACK_ANTAGONIZER
const int Antagonizer::interval = 10000; // 10 msecs
@@ -1377,6 +1401,7 @@ MediaPlayerService::AudioOutput::AudioOutput(int sessionId, int uid, int pid,
}
setMinBufferCount();
+ mBitWidth = 16;
}
MediaPlayerService::AudioOutput::~AudioOutput()
@@ -1633,6 +1658,15 @@ status_t MediaPlayerService::AudioOutput::open(
} else if (mRecycledTrack->format() != format) {
reuse = false;
}
+
+ if (bothOffloaded) {
+ if (mBitWidth != offloadInfo->bit_width) {
+ ALOGV("output bit width differs %d v/s %d",
+ mBitWidth, offloadInfo->bit_width);
+ reuse = false;
+ }
+ }
+
} else {
ALOGV("no track available to recycle");
}
@@ -1713,7 +1747,7 @@ status_t MediaPlayerService::AudioOutput::open(
if (!bothOffloaded) {
if (mRecycledTrack->frameCount() != t->frameCount()) {
- ALOGV("framecount differs: %u/%u frames",
+ ALOGV("framecount differs: %zu/%zu frames",
mRecycledTrack->frameCount(), t->frameCount());
reuse = false;
}
@@ -1748,6 +1782,13 @@ status_t MediaPlayerService::AudioOutput::open(
mFlags = flags;
mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
mFrameSize = t->frameSize();
+
+ if (offloadInfo) {
+ mBitWidth = offloadInfo->bit_width;
+ } else {
+ mBitWidth = 16;
+ }
+
uint32_t pos;
if (t->getPosition(&pos) == OK) {
mBytesWritten = uint64_t(pos) * mFrameSize;
@@ -1848,6 +1889,7 @@ void MediaPlayerService::AudioOutput::switchToNextOutput() {
mNextOutput->mBytesWritten = mBytesWritten;
mNextOutput->mFlags = mFlags;
mNextOutput->mFrameSize = mFrameSize;
+ mNextOutput->mBitWidth = mBitWidth;
close_l();
mCallbackData = NULL; // destruction handled by mNextOutput
} else {
@@ -1905,8 +1947,13 @@ void MediaPlayerService::AudioOutput::pause()
void MediaPlayerService::AudioOutput::close()
{
ALOGV("close");
- Mutex::Autolock lock(mLock);
- close_l();
+ sp<AudioTrack> track;
+ {
+ Mutex::Autolock lock(mLock);
+ track = mTrack;
+ close_l(); // clears mTrack
+ }
+ // destruction of the track occurs outside of mutex.
}
void MediaPlayerService::AudioOutput::setVolume(float left, float right)
@@ -2112,6 +2159,7 @@ bool CallbackThread::threadLoop() {
if (mBuffer == NULL) {
mBufferSize = sink->bufferSize();
mBuffer = malloc(mBufferSize);
+ CHECK(mBuffer != NULL);
}
size_t actualSize =
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index e8e5360..2bd7ec5 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -160,6 +160,7 @@ class MediaPlayerService : public BnMediaPlayerService
// static variables below not protected by mutex
static bool mIsOnEmulator;
static int mMinBufferCount; // 12 for emulator; otherwise 4
+ uint16_t mBitWidth;
// CallbackData is what is passed to the AudioTrack as the "user" data.
// We need to be able to target this to a different Output on the fly,
@@ -334,6 +335,9 @@ private:
int getAudioSessionId() { return mAudioSessionId; }
+ virtual status_t suspend();
+ virtual status_t resume();
+
private:
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index f761dec..1e112c8 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "MediaRecorderService"
#include <utils/Log.h>
+#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
@@ -39,6 +40,7 @@
#include "StagefrightRecorder.h"
#include <gui/IGraphicBufferProducer.h>
+#include "mediaplayerservice/AVMediaServiceExtensions.h"
namespace android {
@@ -166,7 +168,7 @@ status_t MediaRecorderClient::setAudioEncoder(int ae)
status_t MediaRecorderClient::setOutputFile(int fd, int64_t offset, int64_t length)
{
- ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
+ ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
Mutex::Autolock lock(mLock);
if (mRecorder == NULL) {
ALOGE("recorder is not initialized");
@@ -242,6 +244,17 @@ status_t MediaRecorderClient::start()
}
+status_t MediaRecorderClient::pause()
+{
+ ALOGV("pause");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder == NULL) {
+ ALOGE("recorder is not initialized");
+ return NO_INIT;
+ }
+ return mRecorder->pause();
+}
+
status_t MediaRecorderClient::stop()
{
ALOGV("stop");
@@ -305,7 +318,7 @@ MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service,
{
ALOGV("Client constructor");
mPid = pid;
- mRecorder = new StagefrightRecorder(opPackageName);
+ mRecorder = AVMediaServiceFactory::get()->createStagefrightRecorder(opPackageName);
mMediaPlayerService = service;
}
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 05130d4..cfe332c 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -71,8 +71,11 @@ private:
Mutex mLock;
MediaRecorderBase *mRecorder;
sp<MediaPlayerService> mMediaPlayerService;
-};
+public:
+ virtual status_t pause();
+
+};
}; // namespace android
#endif // ANDROID_MEDIARECORDERCLIENT_H
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index f6acdf6..f725b90 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "MetadataRetrieverClient"
#include <utils/Log.h>
+#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
@@ -84,7 +85,6 @@ static sp<MediaMetadataRetrieverBase> createRetriever(player_type playerType)
{
sp<MediaMetadataRetrieverBase> p;
switch (playerType) {
- case STAGEFRIGHT_PLAYER:
case NU_PLAYER:
{
p = new StagefrightMetadataRetriever;
@@ -133,7 +133,7 @@ status_t MetadataRetrieverClient::setDataSource(
status_t MetadataRetrieverClient::setDataSource(int fd, int64_t offset, int64_t length)
{
- ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
+ ALOGV("setDataSource fd=%d, offset=%" PRId64 ", length=%" PRId64 "", fd, offset, length);
Mutex::Autolock lock(mLock);
struct stat sb;
int ret = fstat(fd, &sb);
@@ -141,20 +141,19 @@ status_t MetadataRetrieverClient::setDataSource(int fd, int64_t offset, int64_t
ALOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
return BAD_VALUE;
}
- ALOGV("st_dev = %llu", static_cast<uint64_t>(sb.st_dev));
+ ALOGV("st_dev = %" PRIu64 "", static_cast<uint64_t>(sb.st_dev));
ALOGV("st_mode = %u", sb.st_mode);
ALOGV("st_uid = %lu", static_cast<unsigned long>(sb.st_uid));
ALOGV("st_gid = %lu", static_cast<unsigned long>(sb.st_gid));
- ALOGV("st_size = %llu", sb.st_size);
+ ALOGV("st_size = %" PRIu64 "", sb.st_size);
if (offset >= sb.st_size) {
- ALOGE("offset (%lld) bigger than file size (%llu)", offset, sb.st_size);
- ::close(fd);
+ ALOGE("offset (%" PRId64 ") bigger than file size (%" PRIu64 ")", offset, sb.st_size);
return BAD_VALUE;
}
if (offset + length > sb.st_size) {
length = sb.st_size - offset;
- ALOGV("calculated length = %lld", length);
+ ALOGV("calculated length = %" PRId64 "", length);
}
player_type playerType =
@@ -165,12 +164,10 @@ status_t MetadataRetrieverClient::setDataSource(int fd, int64_t offset, int64_t
ALOGV("player type = %d", playerType);
sp<MediaMetadataRetrieverBase> p = createRetriever(playerType);
if (p == NULL) {
- ::close(fd);
return NO_INIT;
}
status_t status = p->setDataSource(fd, offset, length);
if (status == NO_ERROR) mRetriever = p;
- ::close(fd);
return status;
}
@@ -195,7 +192,7 @@ Mutex MetadataRetrieverClient::sLock;
sp<IMemory> MetadataRetrieverClient::getFrameAtTime(int64_t timeUs, int option)
{
- ALOGV("getFrameAtTime: time(%lld us) option(%d)", timeUs, option);
+ ALOGV("getFrameAtTime: time(%" PRId64 " us) option(%d)", timeUs, option);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
mThumbnail.clear();
@@ -217,7 +214,7 @@ sp<IMemory> MetadataRetrieverClient::getFrameAtTime(int64_t timeUs, int option)
}
mThumbnail = new MemoryBase(heap, 0, size);
if (mThumbnail == NULL) {
- ALOGE("not enough memory for VideoFrame size=%u", size);
+ ALOGE("not enough memory for VideoFrame size=%zu", size);
delete frame;
return NULL;
}
@@ -259,7 +256,7 @@ sp<IMemory> MetadataRetrieverClient::extractAlbumArt()
}
mAlbumArt = new MemoryBase(heap, 0, size);
if (mAlbumArt == NULL) {
- ALOGE("not enough memory for MediaAlbumArt size=%u", size);
+ ALOGE("not enough memory for MediaAlbumArt size=%zu", size);
delete albumArt;
return NULL;
}
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
deleted file mode 100644
index 3fedd9b..0000000
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "StagefrightPlayer"
-#include <utils/Log.h>
-
-#include "StagefrightPlayer.h"
-
-#include "AwesomePlayer.h"
-
-#include <media/Metadata.h>
-#include <media/stagefright/MediaExtractor.h>
-
-namespace android {
-
-StagefrightPlayer::StagefrightPlayer()
- : mPlayer(new AwesomePlayer) {
- ALOGV("StagefrightPlayer");
-
- mPlayer->setListener(this);
-}
-
-StagefrightPlayer::~StagefrightPlayer() {
- ALOGV("~StagefrightPlayer");
- reset();
-
- delete mPlayer;
- mPlayer = NULL;
-}
-
-status_t StagefrightPlayer::initCheck() {
- ALOGV("initCheck");
- return OK;
-}
-
-status_t StagefrightPlayer::setUID(uid_t uid) {
- mPlayer->setUID(uid);
-
- return OK;
-}
-
-status_t StagefrightPlayer::setDataSource(
- const sp<IMediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers) {
- return mPlayer->setDataSource(httpService, url, headers);
-}
-
-// Warning: The filedescriptor passed into this method will only be valid until
-// the method returns, if you want to keep it, dup it!
-status_t StagefrightPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
- ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
- return mPlayer->setDataSource(dup(fd), offset, length);
-}
-
-status_t StagefrightPlayer::setDataSource(const sp<IStreamSource> &source) {
- return mPlayer->setDataSource(source);
-}
-
-status_t StagefrightPlayer::setVideoSurfaceTexture(
- const sp<IGraphicBufferProducer> &bufferProducer) {
- ALOGV("setVideoSurfaceTexture");
-
- return mPlayer->setSurfaceTexture(bufferProducer);
-}
-
-status_t StagefrightPlayer::prepare() {
- return mPlayer->prepare();
-}
-
-status_t StagefrightPlayer::prepareAsync() {
- return mPlayer->prepareAsync();
-}
-
-status_t StagefrightPlayer::start() {
- ALOGV("start");
-
- return mPlayer->play();
-}
-
-status_t StagefrightPlayer::stop() {
- ALOGV("stop");
-
- return pause(); // what's the difference?
-}
-
-status_t StagefrightPlayer::pause() {
- ALOGV("pause");
-
- return mPlayer->pause();
-}
-
-bool StagefrightPlayer::isPlaying() {
- ALOGV("isPlaying");
- return mPlayer->isPlaying();
-}
-
-status_t StagefrightPlayer::seekTo(int msec) {
- ALOGV("seekTo %.2f secs", msec / 1E3);
-
- status_t err = mPlayer->seekTo((int64_t)msec * 1000);
-
- return err;
-}
-
-status_t StagefrightPlayer::getCurrentPosition(int *msec) {
- ALOGV("getCurrentPosition");
-
- int64_t positionUs;
- status_t err = mPlayer->getPosition(&positionUs);
-
- if (err != OK) {
- return err;
- }
-
- *msec = (positionUs + 500) / 1000;
-
- return OK;
-}
-
-status_t StagefrightPlayer::getDuration(int *msec) {
- ALOGV("getDuration");
-
- int64_t durationUs;
- status_t err = mPlayer->getDuration(&durationUs);
-
- if (err != OK) {
- *msec = 0;
- return OK;
- }
-
- *msec = (durationUs + 500) / 1000;
-
- return OK;
-}
-
-status_t StagefrightPlayer::reset() {
- ALOGV("reset");
-
- mPlayer->reset();
-
- return OK;
-}
-
-status_t StagefrightPlayer::setLooping(int loop) {
- ALOGV("setLooping");
-
- return mPlayer->setLooping(loop);
-}
-
-player_type StagefrightPlayer::playerType() {
- ALOGV("playerType");
- return STAGEFRIGHT_PLAYER;
-}
-
-status_t StagefrightPlayer::invoke(const Parcel &request, Parcel *reply) {
- ALOGV("invoke()");
- return mPlayer->invoke(request, reply);
-}
-
-void StagefrightPlayer::setAudioSink(const sp<AudioSink> &audioSink) {
- MediaPlayerInterface::setAudioSink(audioSink);
-
- mPlayer->setAudioSink(audioSink);
-}
-
-status_t StagefrightPlayer::setParameter(int key, const Parcel &request) {
- ALOGV("setParameter(key=%d)", key);
- return mPlayer->setParameter(key, request);
-}
-
-status_t StagefrightPlayer::getParameter(int key, Parcel *reply) {
- ALOGV("getParameter");
- return mPlayer->getParameter(key, reply);
-}
-
-status_t StagefrightPlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
- return mPlayer->setPlaybackSettings(rate);
-}
-
-status_t StagefrightPlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
- return mPlayer->getPlaybackSettings(rate);
-}
-
-status_t StagefrightPlayer::getMetadata(
- const media::Metadata::Filter& /* ids */, Parcel *records) {
- using media::Metadata;
-
- uint32_t flags = mPlayer->flags();
-
- Metadata metadata(records);
-
- metadata.appendBool(
- Metadata::kPauseAvailable,
- flags & MediaExtractor::CAN_PAUSE);
-
- metadata.appendBool(
- Metadata::kSeekBackwardAvailable,
- flags & MediaExtractor::CAN_SEEK_BACKWARD);
-
- metadata.appendBool(
- Metadata::kSeekForwardAvailable,
- flags & MediaExtractor::CAN_SEEK_FORWARD);
-
- metadata.appendBool(
- Metadata::kSeekAvailable,
- flags & MediaExtractor::CAN_SEEK);
-
- return OK;
-}
-
-status_t StagefrightPlayer::dump(int fd, const Vector<String16> &args) const {
- return mPlayer->dump(fd, args);
-}
-
-} // namespace android
diff --git a/media/libmediaplayerservice/StagefrightPlayer.h b/media/libmediaplayerservice/StagefrightPlayer.h
deleted file mode 100644
index 96013df..0000000
--- a/media/libmediaplayerservice/StagefrightPlayer.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-**
-** Copyright 2009, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_STAGEFRIGHTPLAYER_H
-#define ANDROID_STAGEFRIGHTPLAYER_H
-
-#include <media/MediaPlayerInterface.h>
-
-namespace android {
-
-struct AwesomePlayer;
-
-class StagefrightPlayer : public MediaPlayerInterface {
-public:
- StagefrightPlayer();
- virtual ~StagefrightPlayer();
-
- virtual status_t initCheck();
-
- virtual status_t setUID(uid_t uid);
-
- virtual status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers);
-
- virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
-
- virtual status_t setDataSource(const sp<IStreamSource> &source);
-
- virtual status_t setVideoSurfaceTexture(
- const sp<IGraphicBufferProducer> &bufferProducer);
- virtual status_t prepare();
- virtual status_t prepareAsync();
- virtual status_t start();
- virtual status_t stop();
- virtual status_t pause();
- virtual bool isPlaying();
- virtual status_t seekTo(int msec);
- virtual status_t getCurrentPosition(int *msec);
- virtual status_t getDuration(int *msec);
- virtual status_t reset();
- virtual status_t setLooping(int loop);
- virtual player_type playerType();
- virtual status_t invoke(const Parcel &request, Parcel *reply);
- virtual void setAudioSink(const sp<AudioSink> &audioSink);
- virtual status_t setParameter(int key, const Parcel &request);
- virtual status_t getParameter(int key, Parcel *reply);
- virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
- virtual status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
-
- virtual status_t getMetadata(
- const media::Metadata::Filter& ids, Parcel *records);
-
- virtual status_t dump(int fd, const Vector<String16> &args) const;
-
-private:
- AwesomePlayer *mPlayer;
-
- StagefrightPlayer(const StagefrightPlayer &);
- StagefrightPlayer &operator=(const StagefrightPlayer &);
-};
-
-} // namespace android
-
-#endif // ANDROID_STAGEFRIGHTPLAYER_H
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e521fae..442dba1 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -19,6 +19,7 @@
#include <inttypes.h>
#include <utils/Log.h>
+#include <inttypes.h>
#include "WebmWriter.h"
#include "StagefrightRecorder.h"
@@ -43,6 +44,7 @@
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/WAVEWriter.h>
#include <media/MediaProfiles.h>
#include <camera/ICamera.h>
#include <camera/CameraParameters.h>
@@ -55,9 +57,12 @@
#include <system/audio.h>
#include "ARTPWriter.h"
+#include <stagefright/AVExtensions.h>
namespace android {
+static const int64_t kMax32BitFileSize = 0x00ffffffffLL; // 4GB
+
// To collect the encoder usage for the battery app
static void addBatteryData(uint32_t params) {
sp<IBinder> binder =
@@ -75,7 +80,8 @@ StagefrightRecorder::StagefrightRecorder(const String16 &opPackageName)
mOutputFd(-1),
mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
- mStarted(false) {
+ mStarted(false),
+ mRecPaused(false) {
ALOGV("Constructor");
reset();
@@ -179,7 +185,8 @@ status_t StagefrightRecorder::setAudioEncoder(audio_encoder ae) {
status_t StagefrightRecorder::setVideoEncoder(video_encoder ve) {
ALOGV("setVideoEncoder: %d", ve);
if (ve < VIDEO_ENCODER_DEFAULT ||
- ve >= VIDEO_ENCODER_LIST_END) {
+ (ve >= VIDEO_ENCODER_LIST_END && ve <= VIDEO_ENCODER_LIST_VENDOR_START) ||
+ ve >= VIDEO_ENCODER_LIST_VENDOR_END) {
ALOGE("Invalid video encoder: %d", ve);
return BAD_VALUE;
}
@@ -249,7 +256,7 @@ status_t StagefrightRecorder::setInputSurface(
}
status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
- ALOGV("setOutputFile: %d, %lld, %lld", fd, offset, length);
+ ALOGV("setOutputFile: %d, %" PRId64 ", %" PRId64 "", fd, offset, length);
// These don't make any sense, do they?
CHECK_EQ(offset, 0ll);
CHECK_EQ(length, 0ll);
@@ -364,7 +371,7 @@ status_t StagefrightRecorder::setParamAudioSamplingRate(int32_t sampleRate) {
status_t StagefrightRecorder::setParamAudioNumberOfChannels(int32_t channels) {
ALOGV("setParamAudioNumberOfChannels: %d", channels);
- if (channels <= 0 || channels >= 3) {
+ if (channels <= 0 || channels >= 7) {
ALOGE("Invalid number of audio channels: %d", channels);
return BAD_VALUE;
}
@@ -416,42 +423,46 @@ status_t StagefrightRecorder::setParamVideoRotation(int32_t degrees) {
}
status_t StagefrightRecorder::setParamMaxFileDurationUs(int64_t timeUs) {
- ALOGV("setParamMaxFileDurationUs: %lld us", timeUs);
+ ALOGV("setParamMaxFileDurationUs: %" PRId64 " us", timeUs);
// This is meant for backward compatibility for MediaRecorder.java
if (timeUs <= 0) {
- ALOGW("Max file duration is not positive: %lld us. Disabling duration limit.", timeUs);
+ ALOGW("Max file duration is not positive: %" PRId64 " us. Disabling duration limit.", timeUs);
timeUs = 0; // Disable the duration limit for zero or negative values.
} else if (timeUs <= 100000LL) { // XXX: 100 milli-seconds
- ALOGE("Max file duration is too short: %lld us", timeUs);
+ ALOGE("Max file duration is too short: %" PRId64 " us", timeUs);
return BAD_VALUE;
}
if (timeUs <= 15 * 1000000LL) {
- ALOGW("Target duration (%lld us) too short to be respected", timeUs);
+ ALOGW("Target duration (%" PRId64 " us) too short to be respected", timeUs);
}
mMaxFileDurationUs = timeUs;
return OK;
}
status_t StagefrightRecorder::setParamMaxFileSizeBytes(int64_t bytes) {
- ALOGV("setParamMaxFileSizeBytes: %lld bytes", bytes);
+ ALOGV("setParamMaxFileSizeBytes: %" PRId64 " bytes", bytes);
// This is meant for backward compatibility for MediaRecorder.java
if (bytes <= 0) {
- ALOGW("Max file size is not positive: %lld bytes. "
+ ALOGW("Max file size is not positive: %" PRId64 " bytes. "
"Disabling file size limit.", bytes);
bytes = 0; // Disable the file size limit for zero or negative values.
} else if (bytes <= 1024) { // XXX: 1 kB
- ALOGE("Max file size is too small: %lld bytes", bytes);
+ ALOGE("Max file size is too small: %" PRId64 " bytes", bytes);
return BAD_VALUE;
}
if (bytes <= 100 * 1024) {
- ALOGW("Target file size (%lld bytes) is too small to be respected", bytes);
+ ALOGW("Target file size (%" PRId64 " bytes) is too small to be respected", bytes);
}
mMaxFileSizeBytes = bytes;
+
+ // If requested size is >4GB, force 64-bit offsets
+ mUse64BitFileOffset |= (bytes >= kMax32BitFileSize);
+
return OK;
}
@@ -500,9 +511,9 @@ status_t StagefrightRecorder::setParamVideoCameraId(int32_t cameraId) {
}
status_t StagefrightRecorder::setParamTrackTimeStatus(int64_t timeDurationUs) {
- ALOGV("setParamTrackTimeStatus: %lld", timeDurationUs);
+ ALOGV("setParamTrackTimeStatus: %" PRId64 "", timeDurationUs);
if (timeDurationUs < 20000) { // Infeasible if shorter than 20 ms?
- ALOGE("Tracking time duration too short: %lld us", timeDurationUs);
+ ALOGE("Tracking time duration too short: %" PRId64 " us", timeDurationUs);
return BAD_VALUE;
}
mTrackEveryTimeDurationUs = timeDurationUs;
@@ -581,11 +592,11 @@ status_t StagefrightRecorder::setParamCaptureFpsEnable(int32_t captureFpsEnable)
status_t StagefrightRecorder::setParamCaptureFps(float fps) {
ALOGV("setParamCaptureFps: %.2f", fps);
- int64_t timeUs = (int64_t) (1000000.0 / fps + 0.5f);
+ int64_t timeUs = (int64_t) (1000000.0f / fps + 0.5f);
// Not allowing time more than a day
if (timeUs <= 0 || timeUs > 86400*1E6) {
- ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", timeUs);
+ ALOGE("Time between frame capture (%" PRId64 ") is out of range [0, 1 Day]", timeUs);
return BAD_VALUE;
}
@@ -813,9 +824,15 @@ status_t StagefrightRecorder::prepareInternal() {
status = setupMPEG2TSRecording();
break;
+ case OUTPUT_FORMAT_WAVE:
+ status = setupWAVERecording();
+ break;
+
default:
- ALOGE("Unsupported output file format: %d", mOutputFormat);
- status = UNKNOWN_ERROR;
+ if (handleCustomRecording() != OK) {
+ ALOGE("Unsupported output file format: %d", mOutputFormat);
+ status = UNKNOWN_ERROR;
+ }
break;
}
@@ -836,6 +853,22 @@ status_t StagefrightRecorder::start() {
return INVALID_OPERATION;
}
+ if (mRecPaused == true) {
+ status_t err = mWriter->start();
+ if (err != OK) {
+ ALOGE("Writer start in StagefrightRecorder pause failed");
+ return err;
+ }
+
+ err = setSourcePause(false);
+ if (err != OK) {
+ ALOGE("Source start after pause failed");
+ return err;
+ }
+
+ mRecPaused = false;
+ return OK;
+ }
status_t status = OK;
if (mVideoSource != VIDEO_SOURCE_SURFACE) {
@@ -872,6 +905,7 @@ status_t StagefrightRecorder::start() {
case OUTPUT_FORMAT_AAC_ADTS:
case OUTPUT_FORMAT_RTP_AVP:
case OUTPUT_FORMAT_MPEG2TS:
+ case OUTPUT_FORMAT_WAVE:
{
status = mWriter->start();
break;
@@ -879,8 +913,10 @@ status_t StagefrightRecorder::start() {
default:
{
- ALOGE("Unsupported output file format: %d", mOutputFormat);
- status = UNKNOWN_ERROR;
+ if (handleCustomOutputFormats() != OK) {
+ ALOGE("Unsupported output file format: %d", mOutputFormat);
+ status = UNKNOWN_ERROR;
+ }
break;
}
}
@@ -927,13 +963,36 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
}
}
- sp<AudioSource> audioSource =
- new AudioSource(
- mAudioSource,
- mOpPackageName,
- sourceSampleRate,
- mAudioChannels,
- mSampleRate);
+ // If using QCOM extension (Camera 1 HAL) for slow motion recording
+ // mCaptureFpsEnable and mCaptureFps will not be set via setCaptureRate
+ // We need to query from AVUtil, in order to support slow motion audio recording
+ if (mVideoSourceNode != NULL) {
+ int hfrRatio = AVUtils::get()->HFRUtils().getHFRRatio(mVideoSourceNode->getFormat());
+ if (hfrRatio != 1) {
+ // Upscale the sample rate for slow motion recording.
+ // Fail audio source creation if source sample rate is too high, as it could
+ // cause out-of-memory due to large input buffer size. And audio recording
+ // probably doesn't make sense in the scenario, since the slow-down factor
+ // is probably huge (eg. mSampleRate=48K, hfrRatio=240, mFrameRate=1).
+ const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
+ sourceSampleRate =
+ (mSampleRate * hfrRatio + mFrameRate / 2) / mFrameRate;
+ if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
+ ALOGE("source sample rate out of range! "
+ "(mSampleRate %d, hfrRatio %d, mFrameRate %d",
+ mSampleRate, hfrRatio, mFrameRate);
+ return NULL;
+ }
+ }
+ }
+
+
+ sp<AudioSource> audioSource = AVFactory::get()->createAudioSource(
+ mAudioSource,
+ mOpPackageName,
+ sourceSampleRate,
+ mAudioChannels,
+ mSampleRate);
status_t err = audioSource->initCheck();
@@ -963,10 +1022,15 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
format->setInt32("aac-profile", OMX_AUDIO_AACObjectELD);
break;
+ case AUDIO_ENCODER_LPCM:
+ format->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
+ break;
default:
- ALOGE("Unknown audio encoder: %d", mAudioEncoder);
- return NULL;
+ if (handleCustomAudioSource(format) != OK) {
+ ALOGE("Unknown audio encoder: %d", mAudioEncoder);
+ return NULL;
+ }
}
int32_t maxInputSize;
@@ -984,12 +1048,20 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
sp<MediaSource> audioEncoder =
MediaCodecSource::Create(mLooper, format, audioSource);
+ // If encoder could not be created (as in LPCM), then
+ // use the AudioSource directly as the MediaSource.
+ if (audioEncoder == NULL &&
+ mAudioEncoder == AUDIO_ENCODER_LPCM) {
+ ALOGD("No encoder is needed for linear PCM format");
+ audioEncoder = audioSource;
+ }
mAudioSourceNode = audioSource;
if (audioEncoder == NULL) {
ALOGE("Failed to create audio encoder");
}
+ mAudioEncoderOMX = audioEncoder;
return audioEncoder;
}
@@ -1155,6 +1227,15 @@ status_t StagefrightRecorder::setupMPEG2TSRecording() {
return OK;
}
+status_t StagefrightRecorder::setupWAVERecording() {
+ CHECK(mOutputFormat == OUTPUT_FORMAT_WAVE);
+ CHECK(mAudioEncoder == AUDIO_ENCODER_LPCM);
+ CHECK(mAudioSource != AUDIO_SOURCE_CNT);
+
+ mWriter = new WAVEWriter(mOutputFd);
+ return setupRawAudioRecording();
+}
+
void StagefrightRecorder::clipVideoFrameRate() {
ALOGV("clipVideoFrameRate: encoder %d", mVideoEncoder);
if (mFrameRate == -1) {
@@ -1222,7 +1303,8 @@ status_t StagefrightRecorder::checkVideoEncoderCapabilities() {
(mVideoEncoder == VIDEO_ENCODER_H263 ? MEDIA_MIMETYPE_VIDEO_H263 :
mVideoEncoder == VIDEO_ENCODER_MPEG_4_SP ? MEDIA_MIMETYPE_VIDEO_MPEG4 :
mVideoEncoder == VIDEO_ENCODER_VP8 ? MEDIA_MIMETYPE_VIDEO_VP8 :
- mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
+ mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC :
+ mVideoEncoder == VIDEO_ENCODER_H265 ? MEDIA_MIMETYPE_VIDEO_HEVC : ""),
false /* decoder */, true /* hwCodec */, &codecs);
if (!mCaptureFpsEnable) {
@@ -1309,8 +1391,10 @@ void StagefrightRecorder::setDefaultVideoEncoderIfNecessary() {
int videoCodec = mEncoderProfiles->getCamcorderProfileParamByName(
"vid.codec", mCameraId, CAMCORDER_QUALITY_LOW);
- if (videoCodec > VIDEO_ENCODER_DEFAULT &&
- videoCodec < VIDEO_ENCODER_LIST_END) {
+ if ((videoCodec > VIDEO_ENCODER_DEFAULT &&
+ videoCodec < VIDEO_ENCODER_LIST_END) ||
+ (videoCodec > VIDEO_ENCODER_LIST_VENDOR_START &&
+ videoCodec < VIDEO_ENCODER_LIST_VENDOR_END)) {
mVideoEncoder = (video_encoder)videoCodec;
} else {
// default to H.264 if camcorder profile not available
@@ -1440,22 +1524,23 @@ status_t StagefrightRecorder::setupCameraSource(
videoSize.height = mVideoHeight;
if (mCaptureFpsEnable) {
if (mTimeBetweenCaptureUs < 0) {
- ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
+ ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %" PRId64 "",
mTimeBetweenCaptureUs);
return BAD_VALUE;
}
- mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
+ mCameraSourceTimeLapse = AVFactory::get()->CreateCameraSourceTimeLapseFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate, mPreviewSurface,
mTimeBetweenCaptureUs);
*cameraSource = mCameraSourceTimeLapse;
} else {
- *cameraSource = CameraSource::CreateFromCamera(
+ *cameraSource = AVFactory::get()->CreateCameraSourceFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate,
mPreviewSurface);
}
+ AVUtils::get()->cacheCaptureBuffers(mCamera, mVideoEncoder);
mCamera.clear();
mCameraProxy.clear();
if (*cameraSource == NULL) {
@@ -1487,6 +1572,15 @@ status_t StagefrightRecorder::setupCameraSource(
return OK;
}
+bool StagefrightRecorder::setCustomVideoEncoderMime(const video_encoder videoEncoder,
+ sp<AMessage> format) {
+ if (videoEncoder == VIDEO_ENCODER_H265) {
+ format->setString("mime", MEDIA_MIMETYPE_VIDEO_HEVC);
+ return true;
+ }
+ return false;
+}
+
status_t StagefrightRecorder::setupVideoEncoder(
sp<MediaSource> cameraSource,
sp<MediaSource> *source) {
@@ -1512,6 +1606,9 @@ status_t StagefrightRecorder::setupVideoEncoder(
break;
default:
+ if (setCustomVideoEncoderMime(mVideoEncoder, format)) {
+ break;
+ }
CHECK(!"Should not be here, unsupported video encoding.");
break;
}
@@ -1535,13 +1632,13 @@ status_t StagefrightRecorder::setupVideoEncoder(
format->setInt32("width", mVideoWidth);
format->setInt32("height", mVideoHeight);
format->setInt32("stride", mVideoWidth);
- format->setInt32("slice-height", mVideoWidth);
+ format->setInt32("slice-height", mVideoHeight);
format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
// set up time lapse/slow motion for surface source
if (mCaptureFpsEnable) {
if (mTimeBetweenCaptureUs <= 0) {
- ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
+ ALOGE("Invalid mTimeBetweenCaptureUs value: %" PRId64 "",
mTimeBetweenCaptureUs);
return BAD_VALUE;
}
@@ -1553,6 +1650,10 @@ status_t StagefrightRecorder::setupVideoEncoder(
format->setInt32("frame-rate", mFrameRate);
format->setInt32("i-frame-interval", mIFramesIntervalSec);
+ if (cameraSource != NULL) {
+ setupCustomVideoEncoderParams(cameraSource, format);
+ }
+
if (mVideoTimeScale > 0) {
format->setInt32("time-scale", mVideoTimeScale);
}
@@ -1594,6 +1695,8 @@ status_t StagefrightRecorder::setupVideoEncoder(
mGraphicBufferProducer = encoder->getGraphicBufferProducer();
}
+ mVideoSourceNode = cameraSource;
+ mVideoEncoderOMX = encoder;
*source = encoder;
return OK;
@@ -1611,11 +1714,14 @@ status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
case AUDIO_ENCODER_AAC:
case AUDIO_ENCODER_HE_AAC:
case AUDIO_ENCODER_AAC_ELD:
+ case AUDIO_ENCODER_LPCM:
break;
default:
- ALOGE("Unsupported audio encoder: %d", mAudioEncoder);
- return UNKNOWN_ERROR;
+ if (handleCustomAudioEncoder() != OK) {
+ ALOGE("Unsupported audio encoder: %d", mAudioEncoder);
+ return UNKNOWN_ERROR;
+ }
}
sp<MediaSource> audioEncoder = createAudioSource();
@@ -1637,7 +1743,7 @@ status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
writer = new WebmWriter(mOutputFd);
} else {
- writer = mp4writer = new MPEG4Writer(mOutputFd);
+ writer = mp4writer = AVFactory::get()->CreateMPEG4Writer(mOutputFd);
}
if (mVideoSource < VIDEO_SOURCE_LIST_END) {
@@ -1708,6 +1814,8 @@ status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
void StagefrightRecorder::setupMPEG4orWEBMMetaData(sp<MetaData> *meta) {
int64_t startTimeUs = systemTime() / 1000;
(*meta)->setInt64(kKeyTime, startTimeUs);
+ int64_t startTimeBootUs = systemTime(SYSTEM_TIME_BOOTTIME) / 1000;
+ (*meta)->setInt64(kKeyTimeBoot, startTimeBootUs);
(*meta)->setInt32(kKeyFileType, mOutputFormat);
(*meta)->setInt32(kKeyBitRate, mTotalBitRate);
if (mMovieTimeScale > 0) {
@@ -1726,10 +1834,23 @@ void StagefrightRecorder::setupMPEG4orWEBMMetaData(sp<MetaData> *meta) {
status_t StagefrightRecorder::pause() {
ALOGV("pause");
+ status_t err = OK;
if (mWriter == NULL) {
return UNKNOWN_ERROR;
}
- mWriter->pause();
+ err = setSourcePause(true);
+ if (err != OK) {
+ ALOGE("StagefrightRecorder pause failed");
+ return err;
+ }
+
+ err = mWriter->pause();
+ if (err != OK) {
+ ALOGE("Writer pause failed");
+ return err;
+ }
+
+ mRecPaused = true;
if (mStarted) {
mStarted = false;
@@ -1758,6 +1879,16 @@ status_t StagefrightRecorder::stop() {
mCameraSourceTimeLapse = NULL;
}
+ if (mRecPaused) {
+ status_t err = setSourcePause(false);
+ if (err != OK) {
+ ALOGE("Source start after pause in StagefrightRecorder stop failed");
+ return err;
+ }
+
+ mRecPaused = false;
+ }
+
if (mWriter != NULL) {
err = mWriter->stop();
mWriter.clear();
@@ -1927,4 +2058,89 @@ status_t StagefrightRecorder::dump(
::write(fd, result.string(), result.size());
return OK;
}
+
+status_t StagefrightRecorder::setSourcePause(bool pause) {
+ status_t err = OK;
+ if (pause) {
+ if (mVideoEncoderOMX != NULL) {
+ err = mVideoEncoderOMX->pause();
+ if (err != OK) {
+ ALOGE("OMX VideoEncoder pause failed");
+ return err;
+ }
+ }
+ if (mAudioEncoderOMX != NULL) {
+ if (mAudioEncoderOMX != mAudioSourceNode) {
+ err = mAudioEncoderOMX->pause();
+ if (err != OK) {
+ ALOGE("OMX AudioEncoder pause failed");
+ return err;
+ }
+ } else {
+ // If AudioSource is the same as MediaSource(as in LPCM),
+ // bypass omx encoder pause() call.
+ ALOGV("OMX AudioEncoder->pause() bypassed");
+ }
+ }
+ if (mVideoSourceNode != NULL) {
+ err = mVideoSourceNode->pause();
+ if (err != OK) {
+ ALOGE("OMX VideoSourceNode pause failed");
+ return err;
+ }
+ }
+ if (mAudioSourceNode != NULL) {
+ err = mAudioSourceNode->pause();
+ if (err != OK) {
+ ALOGE("OMX AudioSourceNode pause failed");
+ return err;
+ }
+ }
+ } else {
+ if (mVideoSourceNode != NULL) {
+ err = mVideoSourceNode->start();
+ if (err != OK) {
+ ALOGE("OMX VideoSourceNode start failed");
+ return err;
+ }
+ }
+ if (mAudioSourceNode != NULL) {
+ err = mAudioSourceNode->start();
+ if (err != OK) {
+ ALOGE("OMX AudioSourceNode start failed");
+ return err;
+ }
+ }
+ if (mVideoEncoderOMX != NULL) {
+ err = mVideoEncoderOMX->start();
+ if (err != OK) {
+ ALOGE("OMX VideoEncoder start failed");
+ return err;
+ }
+ }
+ if (mAudioEncoderOMX != NULL) {
+ if (mAudioEncoderOMX != mAudioSourceNode) {
+ err = mAudioEncoderOMX->start();
+ if (err != OK) {
+ ALOGE("OMX AudioEncoder start failed");
+ return err;
+ }
+ } else {
+ // If AudioSource is the same as MediaSource(as in LPCM),
+ // bypass omx encoder start() call.
+ ALOGV("OMX AudioEncoder->start() bypassed");
+ }
+ }
+ }
+ return err;
+}
+
+void StagefrightRecorder::setupCustomVideoEncoderParams(sp<MediaSource> cameraSource,
+ sp<AMessage> &format) {
+
+ // Setup HFR if needed
+ AVUtils::get()->HFRUtils().initializeHFR(cameraSource->getFormat(), format,
+ mMaxFileDurationUs, mVideoEncoder);
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index da00bc7..d93fc3b 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -39,6 +39,7 @@ class IGraphicBufferConsumer;
class IGraphicBufferProducer;
class SurfaceMediaSource;
struct ALooper;
+struct AMessage;
struct StagefrightRecorder : public MediaRecorderBase {
StagefrightRecorder(const String16 &opPackageName);
@@ -70,7 +71,7 @@ struct StagefrightRecorder : public MediaRecorderBase {
// Querying a SurfaceMediaSourcer
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const;
-private:
+protected:
sp<ICamera> mCamera;
sp<ICameraRecordingProxy> mCameraProxy;
sp<IGraphicBufferProducer> mPreviewSurface;
@@ -79,6 +80,9 @@ private:
String16 mClientName;
uid_t mClientUid;
sp<MediaWriter> mWriter;
+ sp<MediaSource> mVideoEncoderOMX;
+ sp<MediaSource> mAudioEncoderOMX;
+ sp<MediaSource> mVideoSourceNode;
int mOutputFd;
sp<AudioSource> mAudioSourceNode;
@@ -122,6 +126,7 @@ private:
MediaProfiles *mEncoderProfiles;
bool mStarted;
+ bool mRecPaused;
// Needed when GLFrames are encoded.
// An <IGraphicBufferProducer> pointer
// will be sent to the client side using which the
@@ -131,7 +136,7 @@ private:
static const int kMaxHighSpeedFps = 1000;
- status_t prepareInternal();
+ virtual status_t prepareInternal();
status_t setupMPEG4orWEBMRecording();
void setupMPEG4orWEBMMetaData(sp<MetaData> *meta);
status_t setupAMRRecording();
@@ -139,8 +144,8 @@ private:
status_t setupRawAudioRecording();
status_t setupRTPRecording();
status_t setupMPEG2TSRecording();
- sp<MediaSource> createAudioSource();
- status_t checkVideoEncoderCapabilities();
+ virtual sp<MediaSource> createAudioSource();
+ virtual status_t checkVideoEncoderCapabilities();
status_t checkAudioEncoderCapabilities();
// Generic MediaSource set-up. Returns the appropriate
// source (CameraSource or SurfaceMediaSource)
@@ -148,10 +153,13 @@ private:
status_t setupMediaSource(sp<MediaSource> *mediaSource);
status_t setupCameraSource(sp<CameraSource> *cameraSource);
status_t setupAudioEncoder(const sp<MediaWriter>& writer);
- status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaSource> *source);
+ virtual status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaSource> *source);
+ virtual void setupCustomVideoEncoderParams(sp<MediaSource> cameraSource,
+ sp<AMessage> &format);
+ virtual bool setCustomVideoEncoderMime(const video_encoder videoEncoder, sp<AMessage> format);
// Encoding parameter handling utilities
- status_t setParameter(const String8 &key, const String8 &value);
+ virtual status_t setParameter(const String8 &key, const String8 &value);
status_t setParamAudioEncodingBitRate(int32_t bitRate);
status_t setParamAudioNumberOfChannels(int32_t channles);
status_t setParamAudioSamplingRate(int32_t sampleRate);
@@ -181,11 +189,19 @@ private:
void clipAudioSampleRate();
void clipNumberOfAudioChannels();
void setDefaultProfileIfNecessary();
- void setDefaultVideoEncoderIfNecessary();
+ virtual void setDefaultVideoEncoderIfNecessary();
+ virtual status_t handleCustomOutputFormats() {return UNKNOWN_ERROR;}
+ virtual status_t handleCustomRecording() {return UNKNOWN_ERROR;}
+ virtual status_t handleCustomAudioSource(sp<AMessage> /*format*/) {return UNKNOWN_ERROR;}
+ virtual status_t handleCustomAudioEncoder() {return UNKNOWN_ERROR;}
StagefrightRecorder(const StagefrightRecorder &);
StagefrightRecorder &operator=(const StagefrightRecorder &);
+
+ status_t setupWAVERecording();
+public:
+ virtual status_t setSourcePause(bool pause);
};
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index cd20837..ff2a202 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -23,15 +23,25 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/rtsp \
$(TOP)/frameworks/av/media/libstagefright/timedtext \
$(TOP)/frameworks/av/media/libmediaplayerservice \
- $(TOP)/frameworks/native/include/media/openmax
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/av/include/media \
-LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CFLAGS += -Werror -Wall #-DLOG_NDEBUG=0
# enable experiments only in userdebug and eng builds
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
endif
+ifeq ($(TARGET_BOARD_PLATFORM),msm8974)
+LOCAL_CFLAGS += -DTARGET_8974
+endif
+
+ifeq ($(TARGET_NUPLAYER_CANNOT_SET_SURFACE_WITHOUT_A_FLUSH),true)
+LOCAL_CFLAGS += -DCANNOT_SET_SURFACE_WITHOUT_A_FLUSH
+endif
+
LOCAL_CLANG := true
LOCAL_MODULE:= libstagefright_nuplayer
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index e8c28d5..949c12f 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -37,6 +37,7 @@
#include "../../libstagefright/include/NuCachedSource2.h"
#include "../../libstagefright/include/WVMExtractor.h"
#include "../../libstagefright/include/HTTPBase.h"
+#include "mediaplayerservice/AVNuExtensions.h"
namespace android {
@@ -60,6 +61,7 @@ NuPlayer::GenericSource::GenericSource(
mAudioIsVorbis(false),
mIsWidevine(false),
mIsSecure(false),
+ mUseSetBuffers(false),
mIsStreaming(false),
mUIDValid(uidValid),
mUID(uid),
@@ -126,6 +128,7 @@ status_t NuPlayer::GenericSource::setDataSource(
status_t NuPlayer::GenericSource::setDataSource(const sp<DataSource>& source) {
resetDataSource();
+ Mutex::Autolock _l(mSourceLock);
mDataSource = source;
return OK;
}
@@ -138,14 +141,14 @@ status_t NuPlayer::GenericSource::initFromDataSource() {
sp<MediaExtractor> extractor;
String8 mimeType;
float confidence;
- sp<AMessage> dummy;
+ sp<AMessage> meta;
bool isWidevineStreaming = false;
CHECK(mDataSource != NULL);
if (mIsWidevine) {
isWidevineStreaming = SniffWVM(
- mDataSource, &mimeType, &confidence, &dummy);
+ mDataSource, &mimeType, &confidence, &meta);
if (!isWidevineStreaming ||
strcasecmp(
mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
@@ -153,7 +156,12 @@ status_t NuPlayer::GenericSource::initFromDataSource() {
return UNKNOWN_ERROR;
}
} else if (mIsStreaming) {
- if (!mDataSource->sniff(&mimeType, &confidence, &dummy)) {
+ sp<DataSource> dataSource;
+ {
+ Mutex::Autolock _l(mSourceLock);
+ dataSource = mDataSource;
+ }
+ if (!dataSource->sniff(&mimeType, &confidence, &meta)) {
return UNKNOWN_ERROR;
}
isWidevineStreaming = !strcasecmp(
@@ -171,8 +179,14 @@ status_t NuPlayer::GenericSource::initFromDataSource() {
}
extractor = mWVMExtractor;
} else {
+#ifndef TARGET_8974
+ int32_t flags = AVNuUtils::get()->getFlags();
+#else
+ int32_t flags = 0;
+#endif
extractor = MediaExtractor::Create(mDataSource,
- mimeType.isEmpty() ? NULL : mimeType.string());
+ mimeType.isEmpty() ? NULL : mimeType.string(),
+ mIsStreaming ? 0 : flags, &meta);
}
if (extractor == NULL) {
@@ -202,6 +216,13 @@ status_t NuPlayer::GenericSource::initFromDataSource() {
}
}
+#ifndef TARGET_8974
+ if (AVNuUtils::get()->canUseSetBuffers(mFileMeta)) {
+ mUseSetBuffers = true;
+ ALOGI("setBuffers mode enabled");
+ }
+#endif
+
int32_t totalBitrate = 0;
size_t numtracks = extractor->countTracks();
@@ -346,7 +367,7 @@ void NuPlayer::GenericSource::prepareAsync() {
if (mLooper == NULL) {
mLooper = new ALooper;
mLooper->setName("generic");
- mLooper->start();
+ mLooper->start(false, false, PRIORITY_AUDIO);
mLooper->registerHandler(this);
}
@@ -378,13 +399,20 @@ void NuPlayer::GenericSource::onPrepareAsync() {
}
}
- mDataSource = DataSource::CreateFromURI(
+ sp<DataSource> dataSource;
+ dataSource = DataSource::CreateFromURI(
mHTTPService, uri, &mUriHeaders, &contentType,
- static_cast<HTTPBase *>(mHttpSource.get()));
+ static_cast<HTTPBase *>(mHttpSource.get()),
+ true /*use extended cache*/);
+ Mutex::Autolock _l(mSourceLock);
+ mDataSource = dataSource;
} else {
mIsWidevine = false;
- mDataSource = new FileSource(mFd, mOffset, mLength);
+ sp<DataSource> dataSource;
+ dataSource = new FileSource(mFd, mOffset, mLength);
+ Mutex::Autolock _l(mSourceLock);
+ mDataSource = dataSource;
mFd = -1;
}
@@ -433,7 +461,8 @@ void NuPlayer::GenericSource::onPrepareAsync() {
| FLAG_CAN_PAUSE
| FLAG_CAN_SEEK_BACKWARD
| FLAG_CAN_SEEK_FORWARD
- | FLAG_CAN_SEEK);
+ | FLAG_CAN_SEEK
+ | (mUseSetBuffers ? FLAG_USE_SET_BUFFERS : 0));
if (mIsSecure) {
// secure decoders must be instantiated before starting widevine source
@@ -1030,7 +1059,8 @@ status_t NuPlayer::GenericSource::dequeueAccessUnit(
// start pulling in more buffers if we only have one (or no) buffer left
// so that decoder has less chance of being starved
- if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
+ if ((track->mPackets->getAvailableBufferCount(&finalResult) < 2)
+ && !mUseSetBuffers) {
postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
}
@@ -1374,7 +1404,7 @@ sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer(
}
sp<ABuffer> ab;
- if (mIsSecure && !audio) {
+ if ((mIsSecure || mUseSetBuffers) && !audio) {
// data is already provided in the buffer
ab = new ABuffer(NULL, mb->range_length());
mb->add_ref();
@@ -1489,7 +1519,9 @@ void NuPlayer::GenericSource::readBuffer(
break;
case MEDIA_TRACK_TYPE_AUDIO:
track = &mAudioTrack;
- if (mIsWidevine) {
+ if (mHttpSource != NULL && getTrackCount() == 1) {
+ maxBuffers = 16;
+ } else if (mIsWidevine || (mHttpSource != NULL)) {
maxBuffers = 8;
} else {
maxBuffers = 64;
@@ -1520,9 +1552,10 @@ void NuPlayer::GenericSource::readBuffer(
if (seekTimeUs >= 0) {
options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
seeking = true;
+ track->mPackets->clear();
}
- if (mIsWidevine) {
+ if (mIsWidevine || mUseSetBuffers) {
options.setNonBlocking();
}
@@ -1544,7 +1577,8 @@ void NuPlayer::GenericSource::readBuffer(
queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
sp<ABuffer> buffer = mediaBufferToABuffer(
- mbuf, trackType, seekTimeUs, actualTimeUs);
+ mbuf, trackType, seekTimeUs,
+ numBuffers == 0 ? actualTimeUs : NULL);
track->mPackets->queueAccessUnit(buffer);
formatChange = false;
seeking = false;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index ac980ef..5deb61e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -84,7 +84,7 @@ protected:
virtual sp<MetaData> getFormatMeta(bool audio);
-private:
+protected:
enum {
kWhatPrepareAsync,
kWhatFetchSubtitleData,
@@ -126,6 +126,7 @@ private:
bool mAudioIsVorbis;
bool mIsWidevine;
bool mIsSecure;
+ bool mUseSetBuffers;
bool mIsStreaming;
bool mUIDValid;
uid_t mUID;
@@ -136,6 +137,7 @@ private:
int64_t mOffset;
int64_t mLength;
+ Mutex mSourceLock;
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<DataSource> mHttpSource;
@@ -164,7 +166,7 @@ private:
int64_t getLastReadPosition();
void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
- void notifyPreparedAndCleanup(status_t err);
+ virtual void notifyPreparedAndCleanup(status_t err);
void onSecureDecodersInstantiated(status_t err);
void finishPrepareAsync();
status_t startSources();
@@ -181,7 +183,7 @@ private:
void onSeek(sp<AMessage> msg);
status_t doSeek(int64_t seekTimeUs);
- void onPrepareAsync();
+ virtual void onPrepareAsync();
void fetchTextData(
uint32_t what, media_track_type type,
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 126625a..a57fdc1 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -30,6 +30,8 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/Utils.h>
+
namespace android {
@@ -118,6 +120,19 @@ sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
return format;
}
+sp<MetaData> NuPlayer::HTTPLiveSource::getFormatMeta(bool audio) {
+ sp<AMessage> format = getFormat(audio);
+
+ if (format == NULL) {
+ return NULL;
+ }
+
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(format, meta);
+ return meta;
+}
+
+
status_t NuPlayer::HTTPLiveSource::feedMoreTSData() {
return OK;
}
@@ -197,7 +212,11 @@ status_t NuPlayer::HTTPLiveSource::selectTrack(size_t trackIndex, bool select, i
}
status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) {
- return mLiveSession->seekTo(seekTimeUs);
+ if (mLiveSession->isSeekable()) {
+ return mLiveSession->seekTo(seekTimeUs);
+ } else {
+ return INVALID_OPERATION;
+ }
}
void NuPlayer::HTTPLiveSource::pollForRawData(
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 9e0ec2f..388156c 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -39,6 +39,7 @@ struct NuPlayer::HTTPLiveSource : public NuPlayer::Source {
virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
virtual sp<AMessage> getFormat(bool audio);
+ virtual sp<MetaData> getFormatMeta(bool audio);
virtual status_t feedMoreTSData();
virtual status_t getDuration(int64_t *durationUs);
@@ -53,7 +54,6 @@ protected:
virtual void onMessageReceived(const sp<AMessage> &msg);
-private:
enum Flags {
// Don't log any URLs.
kFlagIncognito = 1,
@@ -78,7 +78,7 @@ private:
bool mHasMetadata;
bool mMetadataSelected;
- void onSessionNotify(const sp<AMessage> &msg);
+ virtual void onSessionNotify(const sp<AMessage> &msg);
void pollForRawData(
const sp<AMessage> &msg, int32_t currentGeneration,
LiveSession::StreamType fetchType, int32_t pushWhat);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 26532d7..ef2e6ec 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -56,6 +56,7 @@
#include "ESDS.h"
#include <media/stagefright/Utils.h>
+#include "mediaplayerservice/AVNuExtensions.h"
namespace android {
@@ -130,6 +131,23 @@ private:
DISALLOW_EVIL_CONSTRUCTORS(FlushDecoderAction);
};
+struct NuPlayer::InstantiateDecoderAction : public Action {
+ InstantiateDecoderAction(bool audio, sp<DecoderBase> *decoder)
+ : mAudio(audio),
+ mdecoder(decoder) {
+ }
+
+ virtual void execute(NuPlayer *player) {
+ player->instantiateDecoder(mAudio, mdecoder);
+ }
+
+private:
+ bool mAudio;
+ sp<DecoderBase> *mdecoder;
+
+ DISALLOW_EVIL_CONSTRUCTORS(InstantiateDecoderAction);
+};
+
struct NuPlayer::PostMessageAction : public Action {
PostMessageAction(const sp<AMessage> &msg)
: mMessage(msg) {
@@ -171,6 +189,7 @@ NuPlayer::NuPlayer(pid_t pid)
mPID(pid),
mSourceFlags(0),
mOffloadAudio(false),
+ mOffloadDecodedPCM(false),
mAudioDecoderGeneration(0),
mVideoDecoderGeneration(0),
mRendererGeneration(0),
@@ -188,6 +207,7 @@ NuPlayer::NuPlayer(pid_t pid)
mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
mVideoFpsHint(-1.f),
mStarted(false),
+ mResetting(false),
mSourceStarted(false),
mPaused(false),
mPausedByClient(false),
@@ -216,7 +236,7 @@ void NuPlayer::setDataSourceAsync(const sp<IStreamSource> &source) {
msg->post();
}
-static bool IsHTTPLiveURL(const char *url) {
+bool NuPlayer::IsHTTPLiveURL(const char *url) {
if (!strncasecmp("http://", url, 7)
|| !strncasecmp("https://", url, 8)
|| !strncasecmp("file://", url, 7)) {
@@ -640,7 +660,10 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
// When mStarted is true, mSource must have been set.
if (mSource == NULL || !mStarted || mSource->getFormat(false /* audio */) == NULL
// NOTE: mVideoDecoder's mSurface is always non-null
- || (mVideoDecoder != NULL && mVideoDecoder->setVideoSurface(surface) == OK)) {
+#ifndef CANNOT_SET_SURFACE_WITHOUT_A_FLUSH
+ || (mVideoDecoder != NULL && mVideoDecoder->setVideoSurface(surface) == OK)
+#endif
+ ) {
performSetSurface(surface);
break;
}
@@ -658,10 +681,9 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
// If the video decoder is not set (perhaps audio only in this case)
// do not perform a seek as it is not needed.
int64_t currentPositionUs = 0;
- if (getCurrentPosition(&currentPositionUs) == OK) {
- mDeferredActions.push_back(
- new SeekAction(currentPositionUs));
- }
+ getCurrentPosition(&currentPositionUs);
+ mDeferredActions.push_back(
+ new SeekAction(currentPositionUs));
}
// If there is a new surface texture, instantiate decoders
@@ -912,10 +934,21 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
audio ? "audio" : "video", formatChange);
if (formatChange) {
- mDeferredActions.push_back(
- new FlushDecoderAction(
+ int32_t seamlessChange = 0;
+ if (msg->findInt32("video-seamlessChange", &seamlessChange) && seamlessChange) {
+ ALOGE("video decoder seamlessChange in smooth streaming mode, "
+ "flush the video decoder");
+ mDeferredActions.push_back(
+ new FlushDecoderAction(FLUSH_CMD_NONE, FLUSH_CMD_FLUSH));
+ mDeferredActions.push_back(new ResumeDecoderAction(false));
+ processDeferredActions();
+ break;
+ } else {
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
audio ? FLUSH_CMD_NONE : FLUSH_CMD_SHUTDOWN));
+ }
}
mDeferredActions.push_back(
@@ -1098,6 +1131,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
int32_t reason;
CHECK(msg->findInt32("reason", &reason));
ALOGV("Tear down audio with reason %d.", reason);
+ mAudioDecoder->pause();
mAudioDecoder.clear();
++mAudioDecoderGeneration;
bool needsToCreateAudioDecoder = true;
@@ -1122,6 +1156,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
mRenderer->flush(
false /* audio */, false /* notifyComplete */);
}
+ mRenderer->signalAudioTearDownComplete();
int64_t positionUs;
if (!msg->findInt64("positionUs", &positionUs)) {
@@ -1145,10 +1180,17 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
{
ALOGV("kWhatReset");
+ mResetting = true;
+
+ if (mAudioDecoder != NULL && mFlushingAudio == NONE) {
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ FLUSH_CMD_SHUTDOWN /* audio */,
+ FLUSH_CMD_SHUTDOWN /* video */));
+ }
+
mDeferredActions.push_back(
- new FlushDecoderAction(
- FLUSH_CMD_SHUTDOWN /* audio */,
- FLUSH_CMD_SHUTDOWN /* video */));
+ new SimpleAction(&NuPlayer::closeAudioSink));
mDeferredActions.push_back(
new SimpleAction(&NuPlayer::performReset));
@@ -1227,7 +1269,8 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
}
void NuPlayer::onResume() {
- if (!mPaused) {
+ if (!mPaused || mResetting) {
+ ALOGD_IF(mResetting, "resetting, onResume discarded");
return;
}
mPaused = false;
@@ -1236,6 +1279,13 @@ void NuPlayer::onResume() {
} else {
ALOGW("resume called when source is gone or not set");
}
+ if (mOffloadAudio && !mOffloadDecodedPCM) {
+ // Resuming after a pause timed out event, check if can continue with offload
+ sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+ sp<AMessage> format = mSource->getFormat(true /*audio*/);
+ const bool hasVideo = (videoFormat != NULL);
+ tryOpenAudioSinkForOffload(format, hasVideo);
+ }
// |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
// needed.
if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
@@ -1290,6 +1340,7 @@ void NuPlayer::onStart(int64_t startPositionUs) {
}
mOffloadAudio = false;
+ mOffloadDecodedPCM = false;
mAudioEOS = false;
mVideoEOS = false;
mStarted = true;
@@ -1300,7 +1351,10 @@ void NuPlayer::onStart(int64_t startPositionUs) {
flags |= Renderer::FLAG_REAL_TIME;
}
+ ALOGV("onStart");
sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ AVNuUtils::get()->setSourcePCMFormat(audioMeta);
+
audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
if (mAudioSink != NULL) {
streamType = mAudioSink->getAudioStreamType();
@@ -1310,6 +1364,10 @@ void NuPlayer::onStart(int64_t startPositionUs) {
mOffloadAudio =
canOffloadStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType);
+ if (!mOffloadAudio && (audioMeta != NULL)) {
+ mOffloadDecodedPCM = mOffloadAudio = canOffloadDecodedPCMStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType);
+ }
+
if (mOffloadAudio) {
flags |= Renderer::FLAG_OFFLOAD_AUDIO;
}
@@ -1317,7 +1375,7 @@ void NuPlayer::onStart(int64_t startPositionUs) {
sp<AMessage> notify = new AMessage(kWhatRendererNotify, this);
++mRendererGeneration;
notify->setInt32("generation", mRendererGeneration);
- mRenderer = new Renderer(mAudioSink, notify, flags);
+ mRenderer = AVNuFactory::get()->createRenderer(mAudioSink, notify, flags);
mRendererLooper = new ALooper;
mRendererLooper->setName("NuPlayerRenderer");
mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
@@ -1444,15 +1502,22 @@ void NuPlayer::postScanSources() {
void NuPlayer::tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo) {
// Note: This is called early in NuPlayer to determine whether offloading
// is possible; otherwise the decoders call the renderer openAudioSink directly.
-
+ sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ sp<AMessage> pcmFormat;
+ if (mOffloadDecodedPCM) {
+ sp<MetaData> pcm = AVNuUtils::get()->createPCMMetaFromSource(audioMeta);
+ audioMeta = pcm;
+ convertMetaDataToMessage(pcm, &pcmFormat);
+ }
status_t err = mRenderer->openAudioSink(
- format, true /* offloadOnly */, hasVideo, AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio);
+ mOffloadDecodedPCM ? pcmFormat : format,
+ true /* offloadOnly */, hasVideo, AUDIO_OUTPUT_FLAG_NONE,
+ &mOffloadAudio, mSource->isStreaming());
if (err != OK) {
// Any failure we turn off mOffloadAudio.
mOffloadAudio = false;
+ mOffloadDecodedPCM = false;
} else if (mOffloadAudio) {
- sp<MetaData> audioMeta =
- mSource->getFormatMeta(true /* audio */);
sendMetaDataToHal(mAudioSink, audioMeta);
}
}
@@ -1469,6 +1534,7 @@ void NuPlayer::determineAudioModeChange() {
if (mRenderer == NULL) {
ALOGW("No renderer can be used to determine audio mode. Use non-offload for safety.");
mOffloadAudio = false;
+ mOffloadDecodedPCM = false;
return;
}
@@ -1476,8 +1542,11 @@ void NuPlayer::determineAudioModeChange() {
sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
const bool hasVideo = (videoFormat != NULL);
- const bool canOffload = canOffloadStream(
+ bool canOffload = canOffloadStream(
audioMeta, hasVideo, mSource->isStreaming(), streamType);
+ if (!canOffload) {
+ mOffloadDecodedPCM = canOffload = canOffloadDecodedPCMStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType);
+ }
if (canOffload) {
if (!mOffloadAudio) {
mRenderer->signalEnableOffloadAudio();
@@ -1499,7 +1568,10 @@ status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
return OK;
}
-
+ if (mSource == NULL) {
+ ALOGD("%s Ignore instantiate decoder after clearing source", __func__);
+ return INVALID_OPERATION;
+ }
sp<AMessage> format = mSource->getFormat(audio);
if (format == NULL) {
@@ -1537,12 +1609,17 @@ status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
notify->setInt32("generation", mAudioDecoderGeneration);
determineAudioModeChange();
- if (mOffloadAudio) {
+
+ if (AVNuUtils::get()->isRAWFormat(format)) {
+ AVNuUtils::get()->setPCMFormat(format,
+ AVNuUtils::get()->getKeyPCMFormat(mSource->getFormatMeta(true /* audio */)));
+ }
+ if (mOffloadAudio && !ifDecodedPCMOffload()) {
const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
format->setInt32("has-video", hasVideo);
- *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
+ *decoder = AVNuFactory::get()->createPassThruDecoder(notify, mSource, mRenderer);
} else {
- *decoder = new Decoder(notify, mSource, mPID, mRenderer);
+ *decoder = AVNuFactory::get()->createDecoder(notify, mSource, mPID, mRenderer);
}
} else {
sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
@@ -1567,7 +1644,8 @@ status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
(*decoder)->configure(format);
// allocate buffers to decrypt widevine source buffers
- if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
+ if (!audio && ((mSourceFlags & Source::FLAG_SECURE) ||
+ (mSourceFlags & Source::FLAG_USE_SET_BUFFERS))) {
Vector<sp<ABuffer> > inputBufs;
CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
@@ -1680,6 +1758,18 @@ void NuPlayer::flushDecoder(bool audio, bool needShutdown) {
return;
}
+ FlushStatus *state = audio ? &mFlushingAudio : &mFlushingVideo;
+
+ bool inShutdown = *state != NONE &&
+ *state != FLUSHING_DECODER &&
+ *state != FLUSHED;
+
+ // Reject flush if the decoder state is not one of the above
+ if (inShutdown) {
+ ALOGI("flush %s called while in shutdown", audio ? "audio" : "video");
+ return;
+ }
+
// Make sure we don't continue to scan sources until we finish flushing.
++mScanSourcesGeneration;
if (mScanSourcesPending) {
@@ -1898,9 +1988,6 @@ void NuPlayer::performDecoderFlush(FlushCommand audio, FlushCommand video) {
void NuPlayer::performReset() {
ALOGV("performReset");
- CHECK(mAudioDecoder == NULL);
- CHECK(mVideoDecoder == NULL);
-
cancelPollDuration();
++mScanSourcesGeneration;
@@ -1930,6 +2017,7 @@ void NuPlayer::performReset() {
}
mStarted = false;
+ mResetting = false;
mSourceStarted = false;
}
@@ -2190,7 +2278,7 @@ void NuPlayer::onSourceNotify(const sp<AMessage> &msg) {
int posMs;
int64_t timeUs, posUs;
driver->getCurrentPosition(&posMs);
- posUs = posMs * 1000;
+ posUs = (int64_t) posMs * 1000ll;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
if (posUs < timeUs) {
@@ -2223,6 +2311,13 @@ void NuPlayer::onSourceNotify(const sp<AMessage> &msg) {
break;
}
+ case Source::kWhatRTCPByeReceived:
+ {
+ ALOGV("notify the client that Bye message is received");
+ notifyListener(MEDIA_INFO, 2000, 0);
+ break;
+ }
+
default:
TRESPASS();
}
@@ -2368,4 +2463,32 @@ void NuPlayer::Source::onMessageReceived(const sp<AMessage> & /* msg */) {
TRESPASS();
}
+bool NuPlayer::ifDecodedPCMOffload() {
+ return mOffloadDecodedPCM;
+}
+
+void NuPlayer::setDecodedPcmOffload(bool decodePcmOffload) {
+ mOffloadDecodedPCM = decodePcmOffload;
+}
+
+bool NuPlayer::canOffloadDecodedPCMStream(const sp<MetaData> audioMeta,
+ bool hasVideo, bool isStreaming, audio_stream_type_t streamType) {
+ const char *mime = NULL;
+
+ //For offloading decoded content
+ if (!mOffloadAudio && (audioMeta != NULL)) {
+ audioMeta->findCString(kKeyMIMEType, &mime);
+ sp<MetaData> audioPCMMeta =
+ AVNuUtils::get()->createPCMMetaFromSource(audioMeta);
+
+ ALOGI("canOffloadDecodedPCMStream");
+ audioPCMMeta->dumpToLog();
+ mOffloadDecodedPCM =
+ ((mime && !AVNuUtils::get()->pcmOffloadException(audioMeta)) &&
+ canOffloadStream(audioPCMMeta, hasVideo, isStreaming, streamType));
+ ALOGI("PCM offload decided: %d", mOffloadDecodedPCM);
+ }
+ return mOffloadDecodedPCM;
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index c9f0bbd..725a1b2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -41,7 +41,7 @@ struct NuPlayer : public AHandler {
void setDataSourceAsync(const sp<IStreamSource> &source);
- void setDataSourceAsync(
+ virtual void setDataSourceAsync(
const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers);
@@ -86,12 +86,16 @@ protected:
virtual ~NuPlayer();
virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual bool ifDecodedPCMOffload();
+ virtual void setDecodedPcmOffload(bool decodePcmOffload);
+ virtual bool canOffloadDecodedPCMStream(const sp<MetaData> meta,
+ bool hasVideo, bool isStreaming, audio_stream_type_t streamType);
+ static bool IsHTTPLiveURL(const char *url);
public:
struct NuPlayerStreamListener;
struct Source;
-private:
struct Decoder;
struct DecoderBase;
struct DecoderPassThrough;
@@ -106,9 +110,11 @@ private:
struct SetSurfaceAction;
struct ResumeDecoderAction;
struct FlushDecoderAction;
+ struct InstantiateDecoderAction;
struct PostMessageAction;
struct SimpleAction;
+protected:
enum {
kWhatSetDataSource = '=DaS',
kWhatPrepare = 'prep',
@@ -146,6 +152,7 @@ private:
sp<MediaPlayerBase::AudioSink> mAudioSink;
sp<DecoderBase> mVideoDecoder;
bool mOffloadAudio;
+ bool mOffloadDecodedPCM;
sp<DecoderBase> mAudioDecoder;
sp<CCDecoder> mCCDecoder;
sp<Renderer> mRenderer;
@@ -197,6 +204,7 @@ private:
AVSyncSettings mSyncSettings;
float mVideoFpsHint;
bool mStarted;
+ bool mResetting;
bool mSourceStarted;
// Actual pause state, either as requested by client or due to buffering.
@@ -221,11 +229,11 @@ private:
mFlushComplete[1][1] = false;
}
- void tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo);
+ virtual void tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo);
void closeAudioSink();
void determineAudioModeChange();
- status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
+ virtual status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
status_t onInstantiateSecureDecoders();
@@ -233,13 +241,13 @@ private:
const sp<AMessage> &inputFormat,
const sp<AMessage> &outputFormat = NULL);
- void notifyListener(int msg, int ext1, int ext2, const Parcel *in = NULL);
+ virtual void notifyListener(int msg, int ext1, int ext2, const Parcel *in = NULL);
void handleFlushComplete(bool audio, bool isDecoder);
void finishFlushIfPossible();
void onStart(int64_t startPositionUs = -1);
- void onResume();
+ virtual void onResume();
void onPause();
bool audioDecoderStillNeeded();
@@ -256,14 +264,14 @@ private:
void processDeferredActions();
- void performSeek(int64_t seekTimeUs);
+ virtual void performSeek(int64_t seekTimeUs);
void performDecoderFlush(FlushCommand audio, FlushCommand video);
void performReset();
void performScanSources();
void performSetSurface(const sp<Surface> &wrapper);
void performResumeDecoders(bool needNotify);
- void onSourceNotify(const sp<AMessage> &msg);
+ virtual void onSourceNotify(const sp<AMessage> &msg);
void onClosedCaptionNotify(const sp<AMessage> &msg);
void queueDecoderShutdown(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index ac3c6b6..2c07f28 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -235,6 +235,12 @@ bool NuPlayer::CCDecoder::parseSEINalUnit(
payload_size += last_byte;
} while (last_byte == 0xFF);
+ if (payload_size > SIZE_MAX / 8
+ || !br.atLeastNumBitsLeft(payload_size * 8)) {
+ ALOGV("Malformed SEI payload");
+ break;
+ }
+
// sei_payload()
if (payload_type == 4) {
bool isCC = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index c005f3f..a18e1da 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -34,10 +34,14 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
+#include <stagefright/AVExtensions.h>
+#include <stagefright/FFMPEGSoftCodec.h>
#include <gui/Surface.h>
#include "avc_utils.h"
#include "ATSParser.h"
+#include "mediaplayerservice/AVNuExtensions.h"
+
namespace android {
@@ -69,7 +73,7 @@ NuPlayer::Decoder::Decoder(
mIsSecure(false),
mFormatChangePending(false),
mTimeChangePending(false),
- mPaused(true),
+ mVideoFormatChangeDoFlushOnly(false),
mResumePending(false),
mComponentName("decoder") {
mCodecLooper = new ALooper;
@@ -78,7 +82,9 @@ NuPlayer::Decoder::Decoder(
}
NuPlayer::Decoder::~Decoder() {
- mCodec->release();
+ if (mCodec != NULL) {
+ mCodec->release();
+ }
releaseAndResetMediaBuffers();
}
@@ -238,6 +244,7 @@ void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
mFormatChangePending = false;
mTimeChangePending = false;
+ mVideoFormatChangeDoFlushOnly = false;
++mBufferGeneration;
@@ -251,8 +258,17 @@ void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
mComponentName.append(" decoder");
ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
- mCodec = MediaCodec::CreateByType(
- mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid);
+ mCodec = AVUtils::get()->createCustomComponentByName(mCodecLooper, mime.c_str(), false /* encoder */, format);
+ FFMPEGSoftCodec::overrideComponentName(0, format, &mComponentName, &mime, false);
+
+ if (mCodec == NULL) {
+ if (!mComponentName.startsWith(mime.c_str())) {
+ mCodec = MediaCodec::CreateByComponentName(mCodecLooper, mComponentName.c_str());
+ } else {
+ mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+ }
+ }
+
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
@@ -357,7 +373,14 @@ void NuPlayer::Decoder::onResume(bool notifyComplete) {
if (notifyComplete) {
mResumePending = true;
}
- mCodec->start();
+
+ if (mCodec != NULL) {
+ mCodec->start();
+ } else {
+ ALOGW("Decoder %s onResume without a valid codec object",
+ mComponentName.c_str());
+ handleError(NO_INIT);
+ }
}
void NuPlayer::Decoder::doFlush(bool notifyComplete) {
@@ -558,6 +581,11 @@ bool NuPlayer::Decoder::handleAnOutputBuffer(
sp<ABuffer> buffer;
mCodec->getOutputBuffer(index, &buffer);
+ if (buffer == NULL) {
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
if (index >= mOutputBuffers.size()) {
for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
mOutputBuffers.add();
@@ -569,6 +597,10 @@ bool NuPlayer::Decoder::handleAnOutputBuffer(
buffer->setRange(offset, size);
buffer->meta()->clear();
buffer->meta()->setInt64("timeUs", timeUs);
+ setPcmFormat(buffer->meta());
+#ifdef TARGET_8974
+ AVNuUtils::get()->addFlagsInMeta(buffer, flags, mIsAudio);
+#endif
bool eos = flags & MediaCodec::BUFFER_FLAG_EOS;
// we do not expect CODECCONFIG or SYNCFRAME for decoder
@@ -592,6 +624,12 @@ bool NuPlayer::Decoder::handleAnOutputBuffer(
}
mSkipRenderingUntilMediaTimeUs = -1;
+ } else if ((flags & MediaCodec::BUFFER_FLAG_DATACORRUPT) &&
+ AVNuUtils::get()->dropCorruptFrame()) {
+ ALOGV("[%s] dropping corrupt buffer at time %lld as requested.",
+ mComponentName.c_str(), (long long)timeUs);
+ reply->post();
+ return true;
}
mNumFramesTotal += !mIsAudio;
@@ -636,7 +674,7 @@ void NuPlayer::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
}
status_t err = mRenderer->openAudioSink(
- format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
+ format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */, mSource->isStreaming());
if (err != OK) {
handleError(err);
}
@@ -711,6 +749,7 @@ status_t NuPlayer::Decoder::fetchInputData(sp<AMessage> &reply) {
// treat seamless format change separately
formatChange = !seamlessFormatChange;
}
+ AVNuUtils::get()->checkFormatChange(&formatChange, accessUnit);
// For format or time change, return EOS to queue EOS input,
// then wait for EOS on output.
@@ -722,9 +761,20 @@ status_t NuPlayer::Decoder::fetchInputData(sp<AMessage> &reply) {
mTimeChangePending = true;
err = ERROR_END_OF_STREAM;
} else if (seamlessFormatChange) {
- // reuse existing decoder and don't flush
- rememberCodecSpecificData(newFormat);
- continue;
+ if (!mIsAudio &&
+ newFormat != NULL &&
+ newFormat->contains("prefer-adaptive-playback")) {
+ ALOGV("in smooth streaming mode, "
+ "do video flush in video seamless format change");
+ mFormatChangePending = true;
+ mVideoFormatChangeDoFlushOnly = true;
+ err = ERROR_END_OF_STREAM;
+ } else {
+ // reuse existing decoder and don't flush
+ rememberCodecSpecificData(newFormat);
+ continue;
+ }
+
} else {
// This stream is unaffected by the discontinuity
return -EWOULDBLOCK;
@@ -952,10 +1002,14 @@ void NuPlayer::Decoder::finishHandleDiscontinuity(bool flushOnTimeChange) {
sp<AMessage> msg = mNotify->dup();
msg->setInt32("what", kWhatInputDiscontinuity);
msg->setInt32("formatChange", mFormatChangePending);
+ if (mVideoFormatChangeDoFlushOnly) {
+ msg->setInt32("video-seamlessChange", mVideoFormatChangeDoFlushOnly);
+ }
msg->post();
mFormatChangePending = false;
mTimeChangePending = false;
+ mVideoFormatChangeDoFlushOnly = false;
}
bool NuPlayer::Decoder::supportsSeamlessAudioFormatChange(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index eeb4af4..1fbefda 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -17,9 +17,13 @@
#ifndef NUPLAYER_DECODER_H_
#define NUPLAYER_DECODER_H_
-#include "NuPlayer.h"
+#include <media/stagefright/foundation/AMessage.h>
+#include "NuPlayer.h"
#include "NuPlayerDecoderBase.h"
+#include "NuPlayerSource.h"
+
+#include "mediaplayerservice/AVNuExtensions.h"
namespace android {
@@ -49,8 +53,9 @@ protected:
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
virtual bool doRequestBuffers();
+ virtual void setPcmFormat(const sp<AMessage> &format) { format->setInt32("pcm-format",
+ AVNuUtils::get()->getKeyPCMFormat(mSource->getFormatMeta(true))); }
-private:
enum {
kWhatCodecNotify = 'cdcN',
kWhatRenderBuffer = 'rndr',
@@ -90,8 +95,8 @@ private:
bool mIsSecure;
bool mFormatChangePending;
bool mTimeChangePending;
+ bool mVideoFormatChangeDoFlushOnly;
- bool mPaused;
bool mResumePending;
AString mComponentName;
@@ -103,7 +108,7 @@ private:
size_t size,
int64_t timeUs,
int32_t flags);
- void handleOutputFormatChange(const sp<AMessage> &format);
+ virtual void handleOutputFormatChange(const sp<AMessage> &format);
void releaseAndResetMediaBuffers();
void requestCodecNotification();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index 7e76842..04bb61c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -31,6 +31,7 @@ namespace android {
NuPlayer::DecoderBase::DecoderBase(const sp<AMessage> &notify)
: mNotify(notify),
mBufferGeneration(0),
+ mPaused(false),
mStats(new AMessage),
mRequestInputBuffersPending(false) {
// Every decoder has its own looper because MediaCodec operations
@@ -83,6 +84,13 @@ void NuPlayer::DecoderBase::setRenderer(const sp<Renderer> &renderer) {
msg->post();
}
+void NuPlayer::DecoderBase::pause() {
+ sp<AMessage> msg = new AMessage(kWhatPause, this);
+
+ sp<AMessage> response;
+ PostAndAwaitResponse(msg, &response);
+}
+
status_t NuPlayer::DecoderBase::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, this);
msg->setPointer("buffers", buffers);
@@ -146,6 +154,17 @@ void NuPlayer::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatPause:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ mPaused = true;
+
+ (new AMessage)->postReply(replyID);
+ break;
+ }
+
case kWhatGetInputBuffers:
{
sp<AReplyToken> replyID;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index b0dc01d..a334ec5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -36,6 +36,9 @@ struct NuPlayer::DecoderBase : public AHandler {
void init();
void setParameters(const sp<AMessage> &params);
+ // Synchronous call to ensure decoder will not request or send out data.
+ void pause();
+
void setRenderer(const sp<Renderer> &renderer);
virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
@@ -78,6 +81,7 @@ protected:
sp<AMessage> mNotify;
int32_t mBufferGeneration;
+ bool mPaused;
sp<AMessage> mStats;
private:
@@ -85,6 +89,7 @@ private:
kWhatConfigure = 'conf',
kWhatSetParameters = 'setP',
kWhatSetRenderer = 'setR',
+ kWhatPause = 'paus',
kWhatGetInputBuffers = 'gInB',
kWhatRequestInputBuffers = 'reqB',
kWhatFlush = 'flus',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 30146c4..b8b0505 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -29,14 +29,12 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaErrors.h>
+#include "mediaplayerservice/AVNuExtensions.h"
#include "ATSParser.h"
namespace android {
-// TODO optimize buffer size for power consumption
-// The offload read buffer size is 32 KB but 24 KB uses less power.
-static const size_t kAggregateBufferSizeBytes = 24 * 1024;
static const size_t kMaxCachedBytes = 200000;
NuPlayer::DecoderPassThrough::DecoderPassThrough(
@@ -46,13 +44,16 @@ NuPlayer::DecoderPassThrough::DecoderPassThrough(
: DecoderBase(notify),
mSource(source),
mRenderer(renderer),
+ // TODO optimize buffer size for power consumption
+ // The offload read buffer size is 32 KB but 24 KB uses less power.
+ mAggregateBufferSizeBytes(24 * 1024),
mSkipRenderingUntilMediaTimeUs(-1ll),
- mPaused(false),
mReachedEOS(true),
mPendingAudioErr(OK),
mPendingBuffersToDrain(0),
mCachedBytes(0),
- mComponentName("pass through decoder") {
+ mComponentName("pass through decoder"),
+ mPCMFormat(AUDIO_FORMAT_INVALID) {
ALOGW_IF(renderer == NULL, "expect a non-NULL renderer");
}
@@ -74,9 +75,18 @@ void NuPlayer::DecoderPassThrough::onConfigure(const sp<AMessage> &format) {
// The audio sink is already opened before the PassThrough decoder is created.
// Opening again might be relevant if decoder is instantiated after shutdown and
// format is different.
+ sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ if (AVNuUtils::get()->isRAWFormat(audioMeta)) {
+ mPCMFormat = AVNuUtils::get()->getKeyPCMFormat(audioMeta);
+ if (mPCMFormat != AUDIO_FORMAT_INVALID) {
+ AVNuUtils::get()->setPCMFormat(format, mPCMFormat);
+ AVNuUtils::get()->updateAudioBitWidth(mPCMFormat, format);
+ }
+ }
+
status_t err = mRenderer->openAudioSink(
format, true /* offloadOnly */, hasVideo,
- AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */);
+ AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */, mSource->isStreaming());
if (err != OK) {
handleError(err);
}
@@ -173,9 +183,9 @@ sp<ABuffer> NuPlayer::DecoderPassThrough::aggregateBuffer(
size_t smallSize = accessUnit->size();
if ((mAggregateBuffer == NULL)
// Don't bother if only room for a few small buffers.
- && (smallSize < (kAggregateBufferSizeBytes / 3))) {
+ && (smallSize < (mAggregateBufferSizeBytes / 3))) {
// Create a larger buffer for combining smaller buffers from the extractor.
- mAggregateBuffer = new ABuffer(kAggregateBufferSizeBytes);
+ mAggregateBuffer = new ABuffer(mAggregateBufferSizeBytes);
mAggregateBuffer->setRange(0, 0); // start empty
}
@@ -201,6 +211,7 @@ sp<ABuffer> NuPlayer::DecoderPassThrough::aggregateBuffer(
if ((bigSize == 0) && smallTimestampValid) {
mAggregateBuffer->meta()->setInt64("timeUs", timeUs);
}
+ setPcmFormat(mAggregateBuffer->meta());
// Append small buffer to the bigger buffer.
memcpy(mAggregateBuffer->base() + bigSize, accessUnit->data(), smallSize);
bigSize += smallSize;
@@ -212,6 +223,7 @@ sp<ABuffer> NuPlayer::DecoderPassThrough::aggregateBuffer(
} else {
// decided not to aggregate
aggregate = accessUnit;
+ setPcmFormat(aggregate->meta());
}
return aggregate;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index db33e87..91da1e1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -18,6 +18,8 @@
#define NUPLAYER_DECODER_PASS_THROUGH_H_
+#include <media/stagefright/foundation/AMessage.h>
+
#include "NuPlayer.h"
#include "NuPlayerDecoderBase.h"
@@ -43,36 +45,37 @@ protected:
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
virtual bool doRequestBuffers();
+ virtual void setPcmFormat(const sp<AMessage> &format) { format->setInt32("pcm-format", mPCMFormat); }
+ virtual sp<ABuffer> aggregateBuffer(const sp<ABuffer> &accessUnit);
-private:
enum {
kWhatBufferConsumed = 'bufC',
};
sp<Source> mSource;
sp<Renderer> mRenderer;
+ size_t mAggregateBufferSizeBytes;
int64_t mSkipRenderingUntilMediaTimeUs;
- bool mPaused;
-
- bool mReachedEOS;
+ bool mReachedEOS;
// Used by feedDecoderInputData to aggregate small buffers into
// one large buffer.
+ status_t mPendingAudioErr;
sp<ABuffer> mPendingAudioAccessUnit;
- status_t mPendingAudioErr;
sp<ABuffer> mAggregateBuffer;
+private:
// mPendingBuffersToDrain are only for debugging. It can be removed
// when the power investigation is done.
size_t mPendingBuffersToDrain;
size_t mCachedBytes;
AString mComponentName;
+ audio_format_t mPCMFormat;
bool isStaleReply(const sp<AMessage> &msg);
bool isDoneFetching() const;
status_t dequeueAccessUnit(sp<ABuffer> *accessUnit);
- sp<ABuffer> aggregateBuffer(const sp<ABuffer> &accessUnit);
status_t fetchInputData(sp<AMessage> &reply);
void doFlush(bool notifyComplete);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index f288c36..4383fce 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -31,6 +31,9 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include "mediaplayerservice/AVNuExtensions.h"
+#include "mediaplayerservice/AVMediaServiceExtensions.h"
+
namespace android {
NuPlayerDriver::NuPlayerDriver(pid_t pid)
@@ -55,7 +58,7 @@ NuPlayerDriver::NuPlayerDriver(pid_t pid)
true, /* canCallJava */
PRIORITY_AUDIO);
- mPlayer = new NuPlayer(pid);
+ mPlayer = AVNuFactory::get()->createNuPlayer(pid);
mLooper->registerHandler(mPlayer);
mPlayer->setDriver(this);
@@ -114,6 +117,7 @@ status_t NuPlayerDriver::setDataSource(int fd, int64_t offset, int64_t length) {
mCondition.wait(mLock);
}
+ AVNuUtils::get()->printFileName(fd);
return mAsyncResult;
}
@@ -405,6 +409,9 @@ status_t NuPlayerDriver::seekTo(int msec) {
{
mAtEOS = false;
mSeekInProgress = true;
+ if (mState == STATE_PAUSED) {
+ mStartupSeekTimeUs = seekTimeUs;
+ }
// seeks can take a while, so we essentially paused
notifyListener_l(MEDIA_PAUSED);
mPlayer->seekToAsync(seekTimeUs, true /* needNotify */);
@@ -607,6 +614,8 @@ status_t NuPlayerDriver::getMetadata(
Metadata::kSeekAvailable,
mPlayerFlags & NuPlayer::Source::FLAG_CAN_SEEK);
+ AVMediaServiceUtils::get()->appendMeta(&meta);
+
return OK;
}
@@ -741,12 +750,19 @@ void NuPlayerDriver::notifyListener_l(
}
}
if (mLooping || mAutoLoop) {
- mPlayer->seekToAsync(0);
- if (mAudioSink != NULL) {
- // The renderer has stopped the sink at the end in order to play out
- // the last little bit of audio. If we're looping, we need to restart it.
- mAudioSink->start();
+ if (mState == STATE_RUNNING) {
+ mPlayer->seekToAsync(0);
+ if (mAudioSink != NULL) {
+ // The renderer has stopped the sink at the end in order to play out
+ // the last little bit of audio. If we're looping, we need to restart it.
+ mAudioSink->start();
+ }
+ } else {
+ mPlayer->pause();
+ mState = STATE_PAUSED;
+ mAtEOS = true;
}
+
// don't send completion event when looping
return;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4d25294..8afdefe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -26,12 +26,15 @@
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/AWakeLock.h>
#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <media/stagefright/VideoFrameScheduler.h>
#include <inttypes.h>
+#include "mediaplayerservice/AVNuExtensions.h"
+#include "stagefright/AVExtensions.h"
namespace android {
@@ -81,6 +84,16 @@ const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER
// static
const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
+static bool sFrameAccurateAVsync = false;
+
+static void readProperties() {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("persist.sys.media.avsync", value, NULL)) {
+ sFrameAccurateAVsync =
+ !strcmp("1", value) || !strcasecmp("true", value);
+ }
+}
+
NuPlayer::Renderer::Renderer(
const sp<MediaPlayerBase::AudioSink> &sink,
const sp<AMessage> &notify,
@@ -102,6 +115,7 @@ NuPlayer::Renderer::Renderer(
mVideoLateByUs(0ll),
mHasAudio(false),
mHasVideo(false),
+ mFoundAudioEOS(false),
mNotifyCompleteAudio(false),
mNotifyCompleteVideo(false),
mSyncQueues(false),
@@ -113,7 +127,7 @@ NuPlayer::Renderer::Renderer(
mAudioRenderingStartGeneration(0),
mRenderingDataDelivered(false),
mAudioOffloadPauseTimeoutGeneration(0),
- mAudioTornDown(false),
+ mAudioTearingDown(false),
mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
mTotalBuffersQueued(0),
@@ -123,6 +137,7 @@ NuPlayer::Renderer::Renderer(
mMediaClock = new MediaClock;
mPlaybackRate = mPlaybackSettings.mSpeed;
mMediaClock->setPlaybackRate(mPlaybackRate);
+ readProperties();
}
NuPlayer::Renderer::~Renderer() {
@@ -313,7 +328,8 @@ void NuPlayer::Renderer::setVideoFrameRate(float fps) {
// Called on any threads.
status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
- return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
+ return mMediaClock->getMediaTime(
+ ALooper::GetNowUs(), mediaUs, (mHasAudio && mFoundAudioEOS));
}
void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
@@ -349,18 +365,20 @@ status_t NuPlayer::Renderer::openAudioSink(
bool offloadOnly,
bool hasVideo,
uint32_t flags,
- bool *isOffloaded) {
+ bool *isOffloaded,
+ bool isStreaming) {
sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
msg->setMessage("format", format);
msg->setInt32("offload-only", offloadOnly);
msg->setInt32("has-video", hasVideo);
msg->setInt32("flags", flags);
+ msg->setInt32("isStreaming", isStreaming);
sp<AMessage> response;
- msg->postAndAwaitResponse(&response);
+ status_t postStatus = msg->postAndAwaitResponse(&response);
int32_t err;
- if (!response->findInt32("err", &err)) {
+ if (postStatus != OK || !response->findInt32("err", &err)) {
err = INVALID_OPERATION;
} else if (err == OK && isOffloaded != NULL) {
int32_t offload;
@@ -393,7 +411,10 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
uint32_t flags;
CHECK(msg->findInt32("flags", (int32_t *)&flags));
- status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+ uint32_t isStreaming;
+ CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
@@ -436,9 +457,10 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
if (onDrainAudioQueue()) {
uint32_t numFramesPlayed;
- CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
- (status_t)OK);
-
+ if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
+ ALOGW("mAudioSink->getPosition failed");
+ break;
+ }
uint32_t numFramesPendingPlayout =
mNumFramesWritten - numFramesPlayed;
@@ -604,6 +626,12 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatAudioTearDownComplete:
+ {
+ onAudioTearDownComplete();
+ break;
+ }
+
case kWhatAudioOffloadPauseTimeout:
{
int32_t generation;
@@ -758,6 +786,7 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
// we don't know how much data we are queueing for offloaded tracks.
mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
+ mAnchorTimeMediaUs = nowMediaUs;
}
// for non-offloaded audio, we need to compute the frames written because
@@ -814,7 +843,7 @@ void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
bool NuPlayer::Renderer::onDrainAudioQueue() {
// do not drain audio during teardown as queued buffers may be invalid.
- if (mAudioTornDown) {
+ if (mAudioTearingDown) {
return false;
}
// TODO: This call to getPosition checks if AudioTrack has been created
@@ -940,7 +969,17 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
// (Case 1)
// Must be a multiple of the frame size. If it is not a multiple of a frame size, it
// needs to fail, as we should not carry over fractional frames between calls.
- CHECK_EQ(copy % mAudioSink->frameSize(), 0);
+
+ if (copy % mAudioSink->frameSize()) {
+ // CHECK_EQ(copy % mAudioSink->frameSize(), 0);
+ ALOGE("CHECK_EQ(copy % mAudioSink->frameSize(), 0) failed b/25372978");
+ ALOGE("mAudioSink->frameSize() %zu", mAudioSink->frameSize());
+ ALOGE("bytes to copy %zu", copy);
+ ALOGE("entry size %zu, entry offset %zu", entry->mBuffer->size(),
+ entry->mOffset - written);
+ notifyEOS(true /*audio*/, UNKNOWN_ERROR);
+ return false;
+ }
// (Case 2, 3, 4)
// Return early to the caller.
@@ -1042,6 +1081,9 @@ void NuPlayer::Renderer::postDrainVideoQueue() {
mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
mAnchorTimeMediaUs = mediaTimeUs;
realTimeUs = nowUs;
+ } else if (!mVideoSampleReceived) {
+ // Always render the first video frame.
+ realTimeUs = nowUs;
} else {
realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
}
@@ -1078,6 +1120,11 @@ void NuPlayer::Renderer::postDrainVideoQueue() {
ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
// post 2 display refreshes before rendering is due
+ // FIXME currently this increases power consumption, so unless frame-accurate
+ // AV sync is requested, post closer to required render time (at 0.63 vsyncs)
+ if (!sFrameAccurateAVsync) {
+ twoVsyncsUs >>= 4;
+ }
msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
mDrainVideoQueuePending = true;
@@ -1102,7 +1149,7 @@ void NuPlayer::Renderer::onDrainVideoQueue() {
return;
}
- int64_t nowUs = -1;
+ int64_t nowUs = ALooper::GetNowUs();
int64_t realTimeUs;
if (mFlags & FLAG_REAL_TIME) {
CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
@@ -1110,16 +1157,12 @@ void NuPlayer::Renderer::onDrainVideoQueue() {
int64_t mediaTimeUs;
CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- nowUs = ALooper::GetNowUs();
realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
}
bool tooLate = false;
if (!mPaused) {
- if (nowUs == -1) {
- nowUs = ALooper::GetNowUs();
- }
setVideoLateByUs(nowUs - realTimeUs);
tooLate = (mVideoLateByUs > 40000);
@@ -1143,6 +1186,12 @@ void NuPlayer::Renderer::onDrainVideoQueue() {
}
}
+ // Always render the first video frame while keeping stats on A/V sync.
+ if (!mVideoSampleReceived) {
+ realTimeUs = nowUs;
+ tooLate = false;
+ }
+
entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
entry->mNotifyConsumed->setInt32("render", !tooLate);
entry->mNotifyConsumed->post();
@@ -1168,6 +1217,9 @@ void NuPlayer::Renderer::notifyVideoRenderingStart() {
}
void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
+ if (audio) {
+ mFoundAudioEOS = true;
+ }
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatEOS);
notify->setInt32("audio", static_cast<int32_t>(audio));
@@ -1215,6 +1267,30 @@ void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
if (audio) {
Mutex::Autolock autoLock(mLock);
+#if 1
+ sp<ABuffer> newBuffer;
+ status_t err = AVNuUtils::get()->convertToSinkFormatIfNeeded(
+ buffer, newBuffer,
+ (offloadingAudio() ? mCurrentOffloadInfo.format : mCurrentPcmInfo.mFormat),
+ offloadingAudio());
+ switch (err) {
+ case NO_INIT:
+ // passthru decoder pushes some buffers before the audio sink
+ // is opened. Since the offload format is known only when the sink
+ // is opened, pcm conversions cannot take place. So, retry.
+ ALOGI("init pending, retrying in 10ms, this shouldn't happen");
+ msg->post(10000LL);
+ return;
+ case OK:
+ break;
+ default:
+ ALOGW("error 0x%x in converting to sink format, drop buffer", err);
+ notifyConsumed->post();
+ return;
+ }
+ CHECK(newBuffer != NULL);
+ entry.mBuffer = newBuffer;
+#endif
mAudioQueue.push_back(entry);
postDrainAudioQueue_l();
} else {
@@ -1483,6 +1559,7 @@ void NuPlayer::Renderer::onPause() {
mDrainAudioQueuePending = false;
mDrainVideoQueuePending = false;
+ mVideoRenderingStarted = false; // force-notify NOTE_INFO MEDIA_INFO_RENDERING_START after resume
if (mHasAudio) {
mAudioSink->pause();
@@ -1494,17 +1571,27 @@ void NuPlayer::Renderer::onPause() {
}
void NuPlayer::Renderer::onResume() {
+ readProperties();
+
if (!mPaused) {
return;
}
if (mHasAudio) {
+ status_t status = NO_ERROR;
cancelAudioOffloadPauseTimeout();
- status_t err = mAudioSink->start();
- if (err != OK) {
- ALOGE("cannot start AudioSink err %d", err);
+ status = mAudioSink->start();
+ if (offloadingAudio() && status != NO_ERROR && status != INVALID_OPERATION) {
+ ALOGD("received error :%d on resume for offload track posting TEAR_DOWN event",status);
notifyAudioTearDown();
}
+ //Update anchor time after resuming playback.
+ if (offloadingAudio() && status == NO_ERROR) {
+ int64_t nowUs = ALooper::GetNowUs();
+ int64_t nowMediaUs =
+ mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
+ mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
+ }
}
{
@@ -1566,6 +1653,7 @@ int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
int64_t numFramesPlayedAt;
AudioTimestamp ts;
static const int64_t kStaleTimestamp100ms = 100000;
+ int64_t durationUs;
status_t res = mAudioSink->getTimestamp(ts);
if (res == OK) { // case 1: mixing audio tracks and offloaded tracks.
@@ -1592,14 +1680,20 @@ int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
// numFramesPlayed, (long long)numFramesPlayedAt);
} else { // case 3: transitory at new track or audio fast tracks.
res = mAudioSink->getPosition(&numFramesPlayed);
- CHECK_EQ(res, (status_t)OK);
- numFramesPlayedAt = nowUs;
- numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
+ if (res != OK) {
+ //query to getPosition fails, use media clock to simulate render position
+ getCurrentPosition(&durationUs);
+ durationUs = durationUs - mAnchorTimeMediaUs;
+ return durationUs;
+ } else {
+ numFramesPlayedAt = nowUs;
+ numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
+ }
//ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
}
//CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test
- int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
+ durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
+ nowUs - numFramesPlayedAt;
if (durationUs < 0) {
// Occurs when numFramesPlayed position is very small and the following:
@@ -1618,10 +1712,10 @@ int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
}
void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
- if (mAudioTornDown) {
+ if (mAudioTearingDown) {
return;
}
- mAudioTornDown = true;
+ mAudioTearingDown = true;
int64_t currentPositionUs;
sp<AMessage> notify = mNotify->dup();
@@ -1657,9 +1751,15 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
- uint32_t flags) {
+ uint32_t flags,
+ bool isStreaming) {
ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
offloadOnly, offloadingAudio());
+
+ if (mAudioTearingDown) {
+ ALOGW("openAudioSink: not opening now!, would happen after teardown");
+ return OK;
+ }
bool audioSinkChanged = false;
int32_t numChannels;
@@ -1671,13 +1771,17 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
}
+ int32_t bitWidth = 16;
+ format->findInt32("bits-per-sample", &bitWidth);
+
int32_t sampleRate;
CHECK(format->findInt32("sample-rate", &sampleRate));
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
if (offloadingAudio()) {
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
- AString mime;
- CHECK(format->findString("mime", &mime));
status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
if (err != OK) {
@@ -1685,22 +1789,38 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
"audio_format", mime.c_str());
onDisableOffloadAudio();
} else {
+ audioFormat = AVUtils::get()->updateAudioFormat(audioFormat, format);
+ bitWidth = AVUtils::get()->getAudioSampleBits(format);
ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
mime.c_str(), audioFormat);
int avgBitRate = -1;
- format->findInt32("bit-rate", &avgBitRate);
+ format->findInt32("bitrate", &avgBitRate);
int32_t aacProfile = -1;
if (audioFormat == AUDIO_FORMAT_AAC
&& format->findInt32("aac-profile", &aacProfile)) {
// Redefine AAC format as per aac profile
- mapAACProfileToAudioFormat(
- audioFormat,
- aacProfile);
+ int32_t isADTSSupported;
+ isADTSSupported = AVUtils::get()->mapAACProfileToAudioFormat(format,
+ audioFormat,
+ aacProfile);
+ if (!isADTSSupported) {
+ mapAACProfileToAudioFormat(audioFormat,
+ aacProfile);
+ } else {
+ ALOGV("Format is AAC ADTS\n");
+ }
}
+ ALOGV("onOpenAudioSink: %s", format->debugString().c_str());
+
+ int32_t offloadBufferSize =
+ AVUtils::get()->getAudioMaxInputBufferSize(
+ audioFormat,
+ format);
audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+
offloadInfo.duration_us = -1;
format->findInt64(
"durationUs", &offloadInfo.duration_us);
@@ -1710,7 +1830,9 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
offloadInfo.bit_rate = avgBitRate;
offloadInfo.has_video = hasVideo;
- offloadInfo.is_streaming = true;
+ offloadInfo.is_streaming = isStreaming;
+ offloadInfo.bit_width = bitWidth;
+ offloadInfo.offload_buffer_size = offloadBufferSize;
if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
ALOGV("openAudioSink: no change in offload mode");
@@ -1763,6 +1885,7 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
} else {
mUseAudioCallback = true; // offload mode transfers data through callback
++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
+ mFlags |= FLAG_OFFLOAD_AUDIO;
}
}
}
@@ -1774,7 +1897,7 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
const PcmInfo info = {
(audio_channel_mask_t)channelMask,
(audio_output_flags_t)pcmFlags,
- AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
+ AVNuUtils::get()->getPCMFormat(format),
numChannels,
sampleRate
};
@@ -1809,7 +1932,7 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
sampleRate,
numChannels,
(audio_channel_mask_t)channelMask,
- AUDIO_FORMAT_PCM_16_BIT,
+ AVNuUtils::get()->getPCMFormat(format),
0 /* bufferCount - unused */,
mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
mUseAudioCallback ? this : NULL,
@@ -1834,7 +1957,6 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
if (audioSinkChanged) {
onAudioSinkChanged();
}
- mAudioTornDown = false;
return OK;
}
@@ -1844,5 +1966,13 @@ void NuPlayer::Renderer::onCloseAudioSink() {
mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
}
+void NuPlayer::Renderer::signalAudioTearDownComplete() {
+ (new AMessage(kWhatAudioTearDownComplete, this))->post();
+}
+
+void NuPlayer::Renderer::onAudioTearDownComplete() {
+ mAudioTearingDown = false;
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 9479c31..a84e673 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -73,13 +73,17 @@ struct NuPlayer::Renderer : public AHandler {
status_t getCurrentPosition(int64_t *mediaUs);
int64_t getVideoLateByUs();
+ virtual audio_stream_type_t getAudioStreamType(){return AUDIO_STREAM_DEFAULT;}
+
status_t openAudioSink(
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
uint32_t flags,
- bool *isOffloaded);
+ bool *isOffloaded,
+ bool isStreaming);
void closeAudioSink();
+ void signalAudioTearDownComplete();
enum {
kWhatEOS = 'eos ',
@@ -89,6 +93,7 @@ struct NuPlayer::Renderer : public AHandler {
kWhatMediaRenderingStart = 'mdrd',
kWhatAudioTearDown = 'adTD',
kWhatAudioOffloadPauseTimeout = 'aOPT',
+ kWhatAudioTearDownComplete = 'aTDC',
};
enum AudioTearDownReason {
@@ -101,7 +106,6 @@ protected:
virtual void onMessageReceived(const sp<AMessage> &msg);
-private:
enum {
kWhatDrainAudioQueue = 'draA',
kWhatDrainVideoQueue = 'draV',
@@ -162,6 +166,7 @@ private:
int64_t mVideoLateByUs;
bool mHasAudio;
bool mHasVideo;
+ bool mFoundAudioEOS;
bool mNotifyCompleteAudio;
bool mNotifyCompleteVideo;
@@ -181,7 +186,7 @@ private:
int64_t mLastPositionUpdateUs;
int32_t mAudioOffloadPauseTimeoutGeneration;
- bool mAudioTornDown;
+ bool mAudioTearingDown;
audio_offload_info_t mCurrentOffloadInfo;
struct PcmInfo {
@@ -229,7 +234,7 @@ private:
void prepareForMediaRenderingStart_l();
void notifyIfMediaRenderingStarted_l();
- void onQueueBuffer(const sp<AMessage> &msg);
+ virtual void onQueueBuffer(const sp<AMessage> &msg);
void onQueueEOS(const sp<AMessage> &msg);
void onFlush(const sp<AMessage> &msg);
void onAudioSinkChanged();
@@ -251,8 +256,10 @@ private:
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
- uint32_t flags);
+ uint32_t flags,
+ bool isStreaming);
void onCloseAudioSink();
+ void onAudioTearDownComplete();
void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
void notifyFlushComplete(bool audio);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 11a6a9f..b248316 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -39,6 +39,7 @@ struct NuPlayer::Source : public AHandler {
FLAG_DYNAMIC_DURATION = 16,
FLAG_SECURE = 32,
FLAG_PROTECTED = 64,
+ FLAG_USE_SET_BUFFERS = 128,
};
enum {
@@ -57,6 +58,7 @@ struct NuPlayer::Source : public AHandler {
kWhatQueueDecoderShutdown,
kWhatDrmNoLicense,
kWhatInstantiateSecureDecoders,
+ kWhatRTCPByeReceived,
};
// The provides message is used to notify the player about various
@@ -132,10 +134,10 @@ protected:
void notifyFlagsChanged(uint32_t flags);
void notifyVideoSizeChanged(const sp<AMessage> &format = NULL);
void notifyInstantiateSecureDecoders(const sp<AMessage> &reply);
- void notifyPrepared(status_t err = OK);
+ virtual void notifyPrepared(status_t err = OK);
-private:
sp<AMessage> mNotify;
+private:
DISALLOW_EVIL_CONSTRUCTORS(Source);
};
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index af0351e..4962520 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -24,13 +24,16 @@
#include "MyHandler.h"
#include "SDPLoader.h"
+#include <cutils/properties.h>
#include <media/IMediaHTTPService.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
+#include <mediaplayerservice/AVMediaServiceExtensions.h>
namespace android {
const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+const uint32_t kMaxNumKeepDamagedAccessUnits = 30;
NuPlayer::RTSPSource::RTSPSource(
const sp<AMessage> &notify,
@@ -53,7 +56,10 @@ NuPlayer::RTSPSource::RTSPSource(
mBuffering(false),
mSeekGeneration(0),
mEOSTimeoutAudio(0),
- mEOSTimeoutVideo(0) {
+ mEOSTimeoutVideo(0),
+ mVideoTrackIndex(-1),
+ mKeepDamagedAccessUnits(false),
+ mNumKeepDamagedAccessUnits(0) {
if (headers) {
mExtraHeaders = *headers;
@@ -131,6 +137,10 @@ void NuPlayer::RTSPSource::pause() {
// Check if EOS or ERROR is received
if (source != NULL && source->isFinished(mediaDurationUs)) {
+ if (mHandler != NULL) {
+ ALOGI("Nearing EOS...No Pause is issued");
+ mHandler->cancelTimeoutCheck();
+ }
return;
}
}
@@ -428,11 +438,22 @@ void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
+ bool isVideo = trackIndex == (size_t)mVideoTrackIndex;
int32_t damaged;
if (accessUnit->meta()->findInt32("damaged", &damaged)
&& damaged) {
- ALOGI("dropping damaged access unit.");
- break;
+ if (isVideo && mKeepDamagedAccessUnits
+ && mNumKeepDamagedAccessUnits < kMaxNumKeepDamagedAccessUnits) {
+ ALOGI("keep a damaged access unit.");
+ ++mNumKeepDamagedAccessUnits;
+ } else {
+ ALOGI("dropping damaged access unit.");
+ break;
+ }
+ } else {
+ if (isVideo) {
+ mNumKeepDamagedAccessUnits = 0;
+ }
}
if (mTSParser != NULL) {
@@ -476,8 +497,11 @@ void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
if (!info->mNPTMappingValid) {
// This is a live stream, we didn't receive any normal
// playtime mapping. We won't map to npt time.
- source->queueAccessUnit(accessUnit);
- break;
+ if (!AVMediaServiceUtils::get()->checkNPTMapping(&info->mRTPTime,
+ &info->mNormalPlaytimeUs, &info->mNPTMappingValid, rtpTime)) {
+ source->queueAccessUnit(accessUnit);
+ break;
+ }
}
int64_t nptUs =
@@ -563,6 +587,14 @@ void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case MyHandler::kWhatByeReceived:
+ {
+ sp<AMessage> msg = dupNotify();
+ msg->setInt32("what", kWhatRTCPByeReceived);
+ msg->post();
+ break;
+ }
+
case SDPLoader::kWhatSDPLoaded:
{
onSDPLoaded(msg);
@@ -597,6 +629,16 @@ void NuPlayer::RTSPSource::onConnected() {
bool isAudio = !strncasecmp(mime, "audio/", 6);
bool isVideo = !strncasecmp(mime, "video/", 6);
+ if (isVideo) {
+ mVideoTrackIndex = i;
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("rtsp.video.keep-damaged-au", value, NULL)
+ && !strcasecmp(mime, value)) {
+ ALOGV("enable to keep damaged au for %s", mime);
+ mKeepDamagedAccessUnits = true;
+ }
+ }
+
TrackInfo info;
info.mTimeScale = timeScale;
info.mRTPTime = 0;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 6438a1e..c431174 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -118,6 +118,10 @@ private:
sp<AReplyToken> mSeekReplyID;
+ int32_t mVideoTrackIndex;
+ bool mKeepDamagedAccessUnits;
+ uint32_t mNumKeepDamagedAccessUnits;
+
sp<AnotherPacketSource> getSource(bool audio);
void onConnected();
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index 0246b59..b9c915e 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "StreamingSource"
#include <utils/Log.h>
+#include <inttypes.h>
+
#include "StreamingSource.h"
#include "ATSParser.h"
@@ -29,9 +31,12 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <inttypes.h>
namespace android {
+const int32_t kNumListenerQueuePackets = 80;
+
NuPlayer::StreamingSource::StreamingSource(
const sp<AMessage> &notify,
const sp<IStreamSource> &source)
@@ -84,7 +89,7 @@ status_t NuPlayer::StreamingSource::feedMoreTSData() {
}
void NuPlayer::StreamingSource::onReadBuffer() {
- for (int32_t i = 0; i < 50; ++i) {
+ for (int32_t i = 0; i < kNumListenerQueuePackets; ++i) {
char buffer[188];
sp<AMessage> extra;
ssize_t n = mStreamListener->read(buffer, sizeof(buffer), &extra);
@@ -248,7 +253,7 @@ status_t NuPlayer::StreamingSource::dequeueAccessUnit(
if (err == OK) {
int64_t timeUs;
CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
- ALOGV("dequeueAccessUnit timeUs=%lld us", timeUs);
+ ALOGV("dequeueAccessUnit timeUs=%" PRId64 " us", timeUs);
}
#endif
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index 45e8a30..2115eb4 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -136,7 +136,8 @@ AACExtractor::AACExtractor(
const sp<DataSource> &source, const sp<AMessage> &_meta)
: mDataSource(source),
mInitCheck(NO_INIT),
- mFrameDurationUs(0) {
+ mFrameDurationUs(0),
+ mApeMeta(new MetaData) {
sp<AMessage> meta = _meta;
if (meta == NULL) {
@@ -166,15 +167,30 @@ AACExtractor::AACExtractor(
channel = (header[0] & 0x1) << 2 | (header[1] >> 6);
mMeta = MakeAACCodecSpecificData(profile, sf_index, channel);
+ mMeta->setInt32(kKeyAACAOT, profile + 1);
off64_t streamSize, numFrames = 0;
size_t frameSize = 0;
int64_t duration = 0;
+ uint8_t apeTag[8];
if (mDataSource->getSize(&streamSize) == OK) {
while (offset < streamSize) {
+ mDataSource->readAt(offset, &apeTag, 8);
+ if (ape.isAPE(apeTag)) {
+ size_t apeSize = 0;
+ mDataSource->readAt(offset + 8 + 4, &apeSize, 1);
+
+ if (ape.parseAPE(source, offset, mApeMeta) == false) {
+ break;
+ }
+
+ mOffsetVector.push(offset);
+ offset += apeSize;
+ continue;
+ }
if ((frameSize = getAdtsFrameLength(source, offset, NULL)) == 0) {
- return;
+ break;
}
mOffsetVector.push(offset);
@@ -196,15 +212,13 @@ AACExtractor::~AACExtractor() {
}
sp<MetaData> AACExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
if (mInitCheck != OK) {
- return meta;
+ return mApeMeta;
}
+ mApeMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC_ADTS);
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC_ADTS);
-
- return meta;
+ return mApeMeta;
}
size_t AACExtractor::countTracks() {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index cd2408b..581bfba 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -44,6 +44,9 @@
#include <media/stagefright/OMXCodec.h>
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/SurfaceUtils.h>
+#include <media/stagefright/FFMPEGSoftCodec.h>
+#include <media/stagefright/Utils.h>
+
#include <media/hardware/HardwareAPI.h>
#include <OMX_AudioExt.h>
@@ -52,8 +55,14 @@
#include <OMX_IndexExt.h>
#include <OMX_AsString.h>
+#ifdef USE_SAMSUNG_COLORFORMAT
+#include <sec_format.h>
+#endif
+
#include "include/avc_utils.h"
+#include <stagefright/AVExtensions.h>
+
namespace android {
// OMX errors are directly mapped into status_t range if
@@ -497,6 +506,8 @@ ACodec::ACodec()
mSentFormat(false),
mIsVideo(false),
mIsEncoder(false),
+ mEncoderComponent(false),
+ mComponentAllocByName(false),
mFatalError(false),
mShutdownInProgress(false),
mExplicitShutdown(false),
@@ -539,6 +550,10 @@ ACodec::ACodec()
ACodec::~ACodec() {
}
+status_t ACodec::setupCustomCodec(status_t err, const char * /*mime*/, const sp<AMessage> &/*msg*/) {
+ return err;
+}
+
void ACodec::setNotificationMessage(const sp<AMessage> &msg) {
mNotify = msg;
}
@@ -613,8 +628,8 @@ void ACodec::initiateShutdown(bool keepComponentAllocated) {
msg->setInt32("keepComponentAllocated", keepComponentAllocated);
msg->post();
if (!keepComponentAllocated) {
- // ensure shutdown completes in 3 seconds
- (new AMessage(kWhatReleaseCodecInstance, this))->post(3000000);
+ // ensure shutdown completes in 30 seconds
+ (new AMessage(kWhatReleaseCodecInstance, this))->post(30000000);
}
}
@@ -716,7 +731,7 @@ status_t ACodec::handleSetSurface(const sp<Surface> &surface) {
if (storingMetadataInDecodedBuffers()
&& !mLegacyAdaptiveExperiment
&& info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
- ALOGV("skipping buffer %p", info.mGraphicBuffer->getNativeBuffer());
+ ALOGV("skipping buffer %p", info.mGraphicBuffer.get() ? info.mGraphicBuffer->getNativeBuffer() : 0x0);
continue;
}
ALOGV("attaching buffer %p", info.mGraphicBuffer->getNativeBuffer());
@@ -752,7 +767,8 @@ status_t ACodec::handleSetSurface(const sp<Surface> &surface) {
}
// push blank buffers to previous window if requested
- if (mFlags & kFlagPushBlankBuffersToNativeWindowOnShutdown) {
+ if (mFlags & kFlagPushBlankBuffersToNativeWindowOnShutdown ||
+ mFlags & kFlagPushBlankBuffersToNativeWindowOnSwitch) {
pushBlankBuffersToNativeWindow(mNativeWindow.get());
}
@@ -828,15 +844,11 @@ status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
: OMXCodec::kRequiresAllocateBufferOnOutputPorts;
if ((portIndex == kPortIndexInput && (mFlags & kFlagIsSecure))
- || (portIndex == kPortIndexOutput && usingMetadataOnEncoderOutput())) {
+ || (portIndex == kPortIndexOutput && usingMetadataOnEncoderOutput())
+ || canAllocateBuffer(portIndex)) {
mem.clear();
- void *ptr;
- err = mOMX->allocateBuffer(
- mNode, portIndex, bufSize, &info.mBufferID,
- &ptr);
-
- info.mData = new ABuffer(ptr, bufSize);
+ err = allocateBuffer(portIndex, bufSize, info);
} else if (mQuirks & requiresAllocateBufferBit) {
err = mOMX->allocateBufferWithBackup(
mNode, portIndex, mem, &info.mBufferID, allottedSize);
@@ -908,14 +920,57 @@ status_t ACodec::setupNativeWindowSizeFormatAndUsage(
usage |= kVideoGrallocUsage;
*finalUsage = usage;
+#ifdef USE_SAMSUNG_COLORFORMAT
+ OMX_COLOR_FORMATTYPE eNativeColorFormat = def.format.video.eColorFormat;
+ setNativeWindowColorFormat(eNativeColorFormat);
+#endif
+
ALOGV("gralloc usage: %#x(OMX) => %#x(ACodec)", omxUsage, usage);
- return setNativeWindowSizeFormatAndUsage(
+ int32_t width = 0, height = 0;
+ int32_t isAdaptivePlayback = 0;
+
+ if (mInputFormat->findInt32("adaptive-playback", &isAdaptivePlayback)
+ && isAdaptivePlayback
+ && mInputFormat->findInt32("max-width", &width)
+ && mInputFormat->findInt32("max-height", &height)) {
+ width = max(width, (int32_t)def.format.video.nFrameWidth);
+ height = max(height, (int32_t)def.format.video.nFrameHeight);
+ ALOGV("Adaptive playback width = %d, height = %d", width, height);
+ } else {
+ width = def.format.video.nFrameWidth;
+ height = def.format.video.nFrameHeight;
+ }
+ err = setNativeWindowSizeFormatAndUsage(
nativeWindow,
- def.format.video.nFrameWidth,
- def.format.video.nFrameHeight,
+ width,
+ height,
+#ifdef USE_SAMSUNG_COLORFORMAT
+ eNativeColorFormat,
+#else
def.format.video.eColorFormat,
+#endif
mRotationDegrees,
usage);
+#ifdef QCOM_HARDWARE
+ if (err == OK) {
+ OMX_CONFIG_RECTTYPE rect;
+ InitOMXParams(&rect);
+ rect.nPortIndex = kPortIndexOutput;
+ err = mOMX->getConfig(
+ mNode, OMX_IndexConfigCommonOutputCrop, &rect, sizeof(rect));
+ if (err == OK) {
+ ALOGV("rect size = %d, %d, %d, %d", rect.nLeft, rect.nTop, rect.nWidth, rect.nHeight);
+ android_native_rect_t crop;
+ crop.left = rect.nLeft;
+ crop.top = rect.nTop;
+ crop.right = rect.nLeft + rect.nWidth - 1;
+ crop.bottom = rect.nTop + rect.nHeight - 1;
+ ALOGV("crop update (%d, %d), (%d, %d)", crop.left, crop.top, crop.right, crop.bottom);
+ err = native_window_set_crop(nativeWindow, &crop);
+ }
+ }
+#endif
+ return err;
}
status_t ACodec::configureOutputBuffersFromNativeWindow(
@@ -974,6 +1029,12 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
// 2. try to allocate two (2) additional buffers to reduce starvation from
// the consumer
// plus an extra buffer to account for incorrect minUndequeuedBufs
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ // Some devices don't like to set OMX_IndexParamPortDefinition at this
+ // point (even with an unmodified def), so skip it if possible.
+ // This check was present in KitKat.
+ if (def.nBufferCountActual < def.nBufferCountMin + *minUndequeuedBuffers) {
+#endif
for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
OMX_U32 newBufferCount =
def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
@@ -993,6 +1054,9 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
return err;
}
}
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ }
+#endif
err = native_window_set_buffer_count(
mNativeWindow.get(), def.nBufferCountActual);
@@ -1244,6 +1308,27 @@ void ACodec::dumpBuffers(OMX_U32 portIndex) {
}
}
+#ifdef USE_SAMSUNG_COLORFORMAT
+void ACodec::setNativeWindowColorFormat(OMX_COLOR_FORMATTYPE &eNativeColorFormat)
+{
+ // In case of Samsung decoders, we set proper native color format for the Native Window
+ if (!strcasecmp(mComponentName.c_str(), "OMX.SEC.AVC.Decoder")
+ || !strcasecmp(mComponentName.c_str(), "OMX.SEC.FP.AVC.Decoder")
+ || !strcasecmp(mComponentName.c_str(), "OMX.SEC.MPEG4.Decoder")
+ || !strcasecmp(mComponentName.c_str(), "OMX.Exynos.AVC.Decoder")) {
+ switch (eNativeColorFormat) {
+ case OMX_COLOR_FormatYUV420SemiPlanar:
+ eNativeColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YCbCr_420_SP;
+ break;
+ case OMX_COLOR_FormatYUV420Planar:
+ default:
+ eNativeColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YCbCr_420_P;
+ break;
+ }
+ }
+}
+#endif
+
status_t ACodec::cancelBufferToNativeWindow(BufferInfo *info) {
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
@@ -1327,7 +1412,8 @@ ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
}
bool stale = false;
- for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+ for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+ i--;
BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
if (info->mGraphicBuffer != NULL &&
@@ -1370,7 +1456,8 @@ ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
// get oldest undequeued buffer
BufferInfo *oldest = NULL;
- for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+ for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+ i--;
BufferInfo *info =
&mBuffers[kPortIndexOutput].editItemAt(i);
if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW &&
@@ -1574,6 +1661,8 @@ status_t ACodec::setComponentRole(
"audio_decoder.ac3", "audio_encoder.ac3" },
{ MEDIA_MIMETYPE_AUDIO_EAC3,
"audio_decoder.eac3", "audio_encoder.eac3" },
+ { MEDIA_MIMETYPE_VIDEO_MPEG4_DP,
+ "video_decoder.mpeg4", NULL },
};
static const size_t kNumMimeToRole =
@@ -1587,7 +1676,7 @@ status_t ACodec::setComponentRole(
}
if (i == kNumMimeToRole) {
- return ERROR_UNSUPPORTED;
+ return FFMPEGSoftCodec::setSupportedRole(mOMX, mNode, isEncoder, mime);
}
const char *role =
@@ -1639,6 +1728,8 @@ status_t ACodec::configureCodec(
return err;
}
+ enableCustomAllocationMode(msg);
+
int32_t bitRate = 0;
// FLAC encoder doesn't need a bitrate, other encoders do
if (encoder && strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)
@@ -1896,6 +1987,12 @@ status_t ACodec::configureCodec(
&& push != 0) {
mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
+
+ int32_t val;
+ if (msg->findInt32("push-blank-buffers-on-switch", &val)
+ && val != 0) {
+ mFlags |= kFlagPushBlankBuffersToNativeWindowOnSwitch;
+ }
}
int32_t rotationDegrees;
@@ -1909,7 +2006,8 @@ status_t ACodec::configureCodec(
if (video) {
// determine need for software renderer
bool usingSwRenderer = false;
- if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
+ if (haveNativeWindow && (mComponentName.startsWith("OMX.google.") ||
+ mComponentName.startsWith("OMX.ffmpeg."))) {
usingSwRenderer = true;
haveNativeWindow = false;
}
@@ -1994,10 +2092,12 @@ status_t ACodec::configureCodec(
// and have the decoder figure it all out.
err = OK;
} else {
- err = setupRawAudioFormat(
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bits-per-sample", &bitsPerSample);
+ err = setupRawAudioFormatInternal(
encoder ? kPortIndexInput : kPortIndexOutput,
sampleRate,
- numChannels);
+ numChannels, bitsPerSample);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
int32_t numChannels, sampleRate;
@@ -2009,6 +2109,7 @@ status_t ACodec::configureCodec(
int32_t sbrMode;
int32_t maxOutputChannelCount;
int32_t pcmLimiterEnable;
+ int32_t bitsPerSample = 16;
drcParams_t drc;
if (!msg->findInt32("is-adts", &isADTS)) {
isADTS = 0;
@@ -2047,11 +2148,12 @@ status_t ACodec::configureCodec(
// value is unknown
drc.targetRefLevel = -1;
}
+ msg->findInt32("bits-per-sample", &bitsPerSample);
err = setupAACCodec(
encoder, numChannels, sampleRate, bitRate, aacProfile,
isADTS != 0, sbrMode, maxOutputChannelCount, drc,
- pcmLimiterEnable);
+ pcmLimiterEnable, bitsPerSample);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
err = setupAMRCodec(encoder, false /* isWAMR */, bitRate);
@@ -2072,7 +2174,7 @@ status_t ACodec::configureCodec(
}
err = setupG711Codec(encoder, sampleRate, numChannels);
}
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC) && encoder) {
int32_t numChannels = 0, sampleRate = 0, compressionLevel = -1;
if (encoder &&
(!msg->findInt32("channel-count", &numChannels)
@@ -2108,16 +2210,21 @@ status_t ACodec::configureCodec(
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bits-per-sample", &bitsPerSample);
+ err = setupRawAudioFormatInternal(kPortIndexInput, sampleRate, numChannels, bitsPerSample);
}
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
+ } else if (!strncmp(mComponentName.c_str(), "OMX.google.", 11)
+ && !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupAC3Codec(encoder, numChannels, sampleRate);
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bits-per-sample", &bitsPerSample);
+ err = setupAC3Codec(encoder, numChannels, sampleRate, bitsPerSample);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EAC3)) {
int32_t numChannels;
@@ -2126,7 +2233,16 @@ status_t ACodec::configureCodec(
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupEAC3Codec(encoder, numChannels, sampleRate);
+ int32_t bitsPerSample = 16;
+ msg->findInt32("bits-per-sample", &bitsPerSample);
+ err = setupEAC3Codec(encoder, numChannels, sampleRate, bitsPerSample);
+ }
+ } else {
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11) && !mIsEncoder) {
+ err = FFMPEGSoftCodec::setAudioFormat(
+ msg, mime, mOMX, mNode);
+ } else {
+ err = setupCustomCodec(err, mime, msg);
}
}
@@ -2297,15 +2413,16 @@ status_t ACodec::setupAACCodec(
bool encoder, int32_t numChannels, int32_t sampleRate,
int32_t bitRate, int32_t aacProfile, bool isADTS, int32_t sbrMode,
int32_t maxOutputChannelCount, const drcParams_t& drc,
- int32_t pcmLimiterEnable) {
+ int32_t pcmLimiterEnable, int32_t bitsPerSample) {
if (encoder && isADTS) {
return -EINVAL;
}
- status_t err = setupRawAudioFormat(
+ status_t err = setupRawAudioFormatInternal(
encoder ? kPortIndexInput : kPortIndexOutput,
sampleRate,
- numChannels);
+ numChannels,
+ bitsPerSample);
if (err != OK) {
return err;
@@ -2442,9 +2559,9 @@ status_t ACodec::setupAACCodec(
}
status_t ACodec::setupAC3Codec(
- bool encoder, int32_t numChannels, int32_t sampleRate) {
- status_t err = setupRawAudioFormat(
- encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+ bool encoder, int32_t numChannels, int32_t sampleRate, int32_t bitsPerSample) {
+ status_t err = setupRawAudioFormatInternal(
+ encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels, bitsPerSample);
if (err != OK) {
return err;
@@ -2480,9 +2597,9 @@ status_t ACodec::setupAC3Codec(
}
status_t ACodec::setupEAC3Codec(
- bool encoder, int32_t numChannels, int32_t sampleRate) {
- status_t err = setupRawAudioFormat(
- encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels);
+ bool encoder, int32_t numChannels, int32_t sampleRate, int32_t bitsPerSample) {
+ status_t err = setupRawAudioFormatInternal(
+ encoder ? kPortIndexInput : kPortIndexOutput, sampleRate, numChannels, bitsPerSample);
if (err != OK) {
return err;
@@ -2629,6 +2746,11 @@ status_t ACodec::setupFlacCodec(
status_t ACodec::setupRawAudioFormat(
OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
+ return setupRawAudioFormatInternal(portIndex, sampleRate, numChannels, 16);
+}
+
+status_t ACodec::setupRawAudioFormatInternal(
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels, int32_t bitsPerSample) {
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
@@ -2663,7 +2785,7 @@ status_t ACodec::setupRawAudioFormat(
pcmParams.nChannels = numChannels;
pcmParams.eNumData = OMX_NumericalDataSigned;
pcmParams.bInterleaved = OMX_TRUE;
- pcmParams.nBitPerSample = 16;
+ pcmParams.nBitPerSample = bitsPerSample;
pcmParams.nSamplingRate = sampleRate;
pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
@@ -2839,13 +2961,14 @@ static const struct VideoCodingMapEntry {
{ MEDIA_MIMETYPE_VIDEO_AVC, OMX_VIDEO_CodingAVC },
{ MEDIA_MIMETYPE_VIDEO_HEVC, OMX_VIDEO_CodingHEVC },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, OMX_VIDEO_CodingMPEG4 },
+ { MEDIA_MIMETYPE_VIDEO_MPEG4_DP, OMX_VIDEO_CodingMPEG4 },
{ MEDIA_MIMETYPE_VIDEO_H263, OMX_VIDEO_CodingH263 },
{ MEDIA_MIMETYPE_VIDEO_MPEG2, OMX_VIDEO_CodingMPEG2 },
{ MEDIA_MIMETYPE_VIDEO_VP8, OMX_VIDEO_CodingVP8 },
{ MEDIA_MIMETYPE_VIDEO_VP9, OMX_VIDEO_CodingVP9 },
};
-static status_t GetVideoCodingTypeFromMime(
+status_t ACodec::GetVideoCodingTypeFromMime(
const char *mime, OMX_VIDEO_CODINGTYPE *codingType) {
for (size_t i = 0;
i < sizeof(kVideoCodingMapEntry) / sizeof(kVideoCodingMapEntry[0]);
@@ -2888,6 +3011,9 @@ status_t ACodec::setupVideoDecoder(
OMX_VIDEO_CODINGTYPE compressionFormat;
status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
+ err = FFMPEGSoftCodec::setVideoFormat(err,
+ msg, mime, mOMX, mNode, mIsEncoder, &compressionFormat,
+ mComponentName.c_str());
if (err != OK) {
return err;
}
@@ -3038,7 +3164,11 @@ status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
OMX_VIDEO_CODINGTYPE compressionFormat;
err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
+ err = FFMPEGSoftCodec::setVideoFormat(err,
+ msg, mime, mOMX, mNode, mIsEncoder, &compressionFormat,
+ mComponentName.c_str());
if (err != OK) {
+ ALOGE("Not a supported video mime type: %s", mime);
return err;
}
@@ -3232,6 +3362,7 @@ status_t ACodec::setupMPEG4EncoderParameters(const sp<AMessage> &msg) {
mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(level);
}
+ setBFrames(&mpeg4type);
err = mOMX->setParameter(
mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
@@ -3429,6 +3560,8 @@ status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
err = verifySupportForProfileAndLevel(profile, level);
if (err != OK) {
+ ALOGE("%s does not support profile %x @ level %x",
+ mComponentName.c_str(), profile, level);
return err;
}
@@ -3437,11 +3570,14 @@ status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
}
// XXX
+ // Allow higher profiles to be set since the encoder seems to support
+#ifdef USE_AVC_BASELINE_PROFILE
if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
ALOGW("Use baseline profile instead of %d for AVC recording",
h264type.eProfile);
h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
}
+#endif
if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
h264type.nSliceHeaderSpacing = 0;
@@ -3462,6 +3598,7 @@ status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
h264type.nCabacInitIdc = 0;
}
+ setBFrames(&h264type, iFrameInterval, frameRate);
if (h264type.nBFrames != 0) {
h264type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB;
}
@@ -3502,6 +3639,8 @@ status_t ACodec::setupHEVCEncoderParameters(const sp<AMessage> &msg) {
frameRate = (float)tmp;
}
+ AVUtils::get()->setIntraPeriod(setPFramesSpacing(iFrameInterval, frameRate), 0, mOMX, mNode);
+
OMX_VIDEO_PARAM_HEVCTYPE hevcType;
InitOMXParams(&hevcType);
hevcType.nPortIndex = kPortIndexOutput;
@@ -3683,7 +3822,7 @@ status_t ACodec::setupErrorCorrectionParameters() {
errorCorrectionType.bEnableHEC = OMX_FALSE;
errorCorrectionType.bEnableResync = OMX_TRUE;
- errorCorrectionType.nResynchMarkerSpacing = 256;
+ errorCorrectionType.nResynchMarkerSpacing = 0;
errorCorrectionType.bEnableDataPartitioning = OMX_FALSE;
errorCorrectionType.bEnableRVLC = OMX_FALSE;
@@ -3842,6 +3981,7 @@ bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
+ fmt != OMX_TI_COLOR_FormatYUV420PackedSemiPlanar &&
fmt != HAL_PIXEL_FORMAT_YV12) {
ALOGW("do not know color format 0x%x = %d", fmt, fmt);
return false;
@@ -3914,6 +4054,7 @@ bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
case OMX_COLOR_FormatYUV420SemiPlanar:
// FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+ case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
// NV12
image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
image.mPlane[image.U].mColInc = 2;
@@ -4071,6 +4212,16 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
rect.nWidth = videoDef->nFrameWidth;
rect.nHeight = videoDef->nFrameHeight;
}
+#ifdef MTK_HARDWARE
+ if (!strncmp(mComponentName.c_str(), "OMX.MTK.", 8) && mOMX->getConfig(
+ mNode, (OMX_INDEXTYPE) 0x7f00001c /* OMX_IndexVendorMtkOmxVdecGetCropInfo */,
+ &rect, sizeof(rect)) != OK) {
+ rect.nLeft = 0;
+ rect.nTop = 0;
+ rect.nWidth = videoDef->nFrameWidth;
+ rect.nHeight = videoDef->nFrameHeight;
+ }
+#endif
if (rect.nLeft < 0 ||
rect.nTop < 0 ||
@@ -4138,6 +4289,14 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
default:
{
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::getVideoPortFormat(portIndex,
+ (int)videoDef->eCompressionFormat, notify, mOMX, mNode);
+ if (err == OK) {
+ break;
+ }
+ }
+
if (mIsEncoder ^ (portIndex == kPortIndexOutput)) {
// should be CodingUnused
ALOGE("Raw port video compression format is %s(%d)",
@@ -4183,7 +4342,10 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
if (params.nChannels <= 0
|| (params.nChannels != 1 && !params.bInterleaved)
- || params.nBitPerSample != 16u
+ || (params.nBitPerSample != 16u
+ && params.nBitPerSample != 24u
+ && params.nBitPerSample != 32u
+ && params.nBitPerSample != 8u)// we support 8/16/24/32 bit s/w decoding
|| params.eNumData != OMX_NumericalDataSigned
|| params.ePCMMode != OMX_AUDIO_PCMModeLinear) {
ALOGE("unsupported PCM port: %u channels%s, %u-bit, %s(%d), %s(%d) mode ",
@@ -4198,6 +4360,8 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
+ notify->setInt32("bits-per-sample", params.nBitPerSample);
+ notify->setInt32("pcm-format", getPCMFormat(notify));
if (mChannelMaskPresent) {
notify->setInt32("channel-mask", mChannelMask);
@@ -4248,6 +4412,13 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
case OMX_AUDIO_CodingFLAC:
{
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::getAudioPortFormat(portIndex,
+ (int)audioDef->eEncoding, notify, mOMX, mNode);
+ if (err != OK) {
+ return err;
+ }
+ } else {
OMX_AUDIO_PARAM_FLACTYPE params;
InitOMXParams(&params);
params.nPortIndex = portIndex;
@@ -4261,6 +4432,7 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FLAC);
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSampleRate);
+ }
break;
}
@@ -4402,6 +4574,14 @@ status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
}
default:
+ if (!strncmp(mComponentName.c_str(), "OMX.ffmpeg.", 11)) {
+ err = FFMPEGSoftCodec::getAudioPortFormat(portIndex,
+ (int)audioDef->eEncoding, notify, mOMX, mNode);
+ }
+ if (err == OK) {
+ break;
+ }
+
ALOGE("Unsupported audio coding: %s(%d)\n",
asString(audioDef->eEncoding), audioDef->eEncoding);
return BAD_TYPE;
@@ -4440,18 +4620,17 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
(mEncoderDelay || mEncoderPadding)) {
int32_t channelCount;
CHECK(notify->findInt32("channel-count", &channelCount));
- size_t frameSize = channelCount * sizeof(int16_t);
if (mSkipCutBuffer != NULL) {
size_t prevbufsize = mSkipCutBuffer->size();
if (prevbufsize != 0) {
ALOGW("Replacing SkipCutBuffer holding %zu bytes", prevbufsize);
}
}
- mSkipCutBuffer = new SkipCutBuffer(
- mEncoderDelay * frameSize,
- mEncoderPadding * frameSize);
+ mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay, mEncoderPadding, channelCount);
}
+ getVQZIPInfo(notify);
+
notify->post();
mSentFormat = true;
@@ -4600,6 +4779,7 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) {
ALOGI("[%s] forcing the release of codec",
mCodec->mComponentName.c_str());
status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+ mCodec->changeState(mCodec->mUninitializedState);
ALOGE_IF("[%s] failed to release codec instance: err=%d",
mCodec->mComponentName.c_str(), err);
sp<AMessage> notify = mCodec->mNotify->dup();
@@ -5184,6 +5364,7 @@ bool ACodec::BaseState::onOMXFillBufferDone(
mCodec->mSkipCutBuffer->submit(info->mData);
}
info->mData->meta()->setInt64("timeUs", timeUs);
+ info->mData->meta()->setObject("graphic-buffer", info->mGraphicBuffer);
sp<AMessage> notify = mCodec->mNotify->dup();
notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
@@ -5193,6 +5374,8 @@ bool ACodec::BaseState::onOMXFillBufferDone(
reply->setInt32("buffer-id", info->mBufferID);
+ (void)mCodec->setDSModeHint(reply, flags, timeUs);
+
notify->setMessage("reply", reply);
notify->post();
@@ -5247,8 +5430,9 @@ void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) {
ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
}
+ bool skip = mCodec->getDSModeHint(msg);
int32_t render;
- if (mCodec->mNativeWindow != NULL
+ if (!skip && mCodec->mNativeWindow != NULL
&& msg->findInt32("render", &render) && render != 0
&& info->mData != NULL && info->mData->size() != 0) {
ATRACE_NAME("render");
@@ -5376,6 +5560,8 @@ void ACodec::UninitializedState::stateEntered() {
mCodec->mOMX.clear();
mCodec->mQuirks = 0;
mCodec->mFlags = 0;
+ mCodec->mEncoderComponent = 0;
+ mCodec->mComponentAllocByName = 0;
mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
mCodec->mComponentName.clear();
@@ -5481,6 +5667,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
ssize_t index = matchingCodecs.add();
OMXCodec::CodecNameAndQuirks *entry = &matchingCodecs.editItemAt(index);
entry->mName = String8(componentName.c_str());
+ mCodec->mComponentAllocByName = true;
if (!OMXCodec::findCodecQuirks(
componentName.c_str(), &entry->mQuirks)) {
@@ -5493,6 +5680,10 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
encoder = false;
}
+ if (encoder == true) {
+ mCodec->mEncoderComponent = true;
+ }
+
OMXCodec::findMatchingCodecs(
mime.c_str(),
encoder, // createEncoder
@@ -5685,6 +5876,7 @@ bool ACodec::LoadedState::onConfigureComponent(
status_t err = OK;
AString mime;
+
if (!msg->findString("mime", &mime)) {
err = BAD_VALUE;
} else {
@@ -5694,13 +5886,80 @@ bool ACodec::LoadedState::onConfigureComponent(
ALOGE("[%s] configureCodec returning error %d",
mCodec->mComponentName.c_str(), err);
- mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
- return false;
+ if (!mCodec->mEncoderComponent && !mCodec->mComponentAllocByName &&
+ !strncmp(mime.c_str(), "video/", strlen("video/"))) {
+ Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
+
+ OMXCodec::findMatchingCodecs(
+ mime.c_str(),
+ false, // createEncoder
+ NULL, // matchComponentName
+ 0, // flags
+ &matchingCodecs);
+
+ err = mCodec->mOMX->freeNode(mCodec->mNode);
+
+ if (err != OK) {
+ ALOGE("Failed to freeNode");
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+ return false;
+ }
+
+ mCodec->mNode = 0;
+ AString componentName;
+ sp<CodecObserver> observer = new CodecObserver;
+
+ err = NAME_NOT_FOUND;
+ for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
+ ++matchIndex) {
+ componentName = matchingCodecs.itemAt(matchIndex).mName.string();
+ if (!strcmp(mCodec->mComponentName.c_str(), componentName.c_str())) {
+ continue;
+ }
+
+ pid_t tid = gettid();
+ int prevPriority = androidGetThreadPriority(tid);
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
+ err = mCodec->mOMX->allocateNode(componentName.c_str(), observer, &mCodec->mNode);
+ androidSetThreadPriority(tid, prevPriority);
+
+ if (err == OK) {
+ break;
+ } else {
+ ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
+ }
+
+ mCodec->mNode = 0;
+ }
+
+ if (mCodec->mNode == 0) {
+ if (!mime.empty()) {
+ ALOGE("Unable to instantiate a decoder for type '%s'", mime.c_str());
+ } else {
+ ALOGE("Unable to instantiate codec '%s' with err %#x.", componentName.c_str(), err);
+ }
+
+ mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
+ return false;
+ }
+
+ sp<AMessage> notify = new AMessage(kWhatOMXMessageList, mCodec);
+ observer->setNotificationMessage(notify);
+ mCodec->mComponentName = componentName;
+
+ err = mCodec->configureCodec(mime.c_str(), msg);
+ }
+
+ if (err != OK) {
+ mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
+ return false;
+ }
}
{
sp<AMessage> notify = mCodec->mNotify->dup();
notify->setInt32("what", CodecBase::kWhatComponentConfigured);
+ notify->setString("componentName", mCodec->mComponentName.c_str());
notify->setMessage("input-format", mCodec->mInputFormat);
notify->setMessage("output-format", mCodec->mOutputFormat);
notify->post();
@@ -6363,6 +6622,23 @@ void ACodec::onSignalEndOfInputStream() {
notify->post();
}
+sp<IOMXObserver> ACodec::createObserver() {
+ sp<CodecObserver> observer = new CodecObserver;
+ sp<AMessage> notify = new AMessage(kWhatOMXMessageList, this);
+ observer->setNotificationMessage(notify);
+ return observer;
+}
+
+status_t ACodec::allocateBuffer(
+ OMX_U32 portIndex, size_t bufSize, BufferInfo &info) {
+ void *ptr;
+ status_t err = mOMX->allocateBuffer(
+ mNode, portIndex, bufSize, &info.mBufferID, &ptr);
+
+ info.mData = new ABuffer(ptr, bufSize);
+ return err;
+}
+
bool ACodec::ExecutingState::onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
mCodec->onFrameRendered(mediaTimeUs, systemNano);
return true;
@@ -6433,8 +6709,34 @@ bool ACodec::OutputPortSettingsChangedState::onMessageReceived(
bool handled = false;
switch (msg->what()) {
- case kWhatFlush:
case kWhatShutdown:
+ {
+ int32_t keepComponentAllocated;
+ CHECK(msg->findInt32(
+ "keepComponentAllocated", &keepComponentAllocated));
+
+ mCodec->mShutdownInProgress = true;
+ mCodec->mExplicitShutdown = true;
+ mCodec->mKeepComponentAllocated = keepComponentAllocated;
+
+ status_t err = mCodec->mOMX->sendCommand(
+ mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+ if (err != OK) {
+ if (keepComponentAllocated) {
+ mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+ }
+ // TODO: do some recovery here.
+ } else {
+ // This is technically not correct, but appears to be
+ // the only way to free the component instance using
+ // ExectingToIdleState.
+ mCodec->changeState(mCodec->mExecutingToIdleState);
+ }
+
+ handled = true;
+ break;
+ }
+ case kWhatFlush:
case kWhatResume:
case kWhatSetParameters:
{
@@ -6493,6 +6795,11 @@ bool ACodec::OutputPortSettingsChangedState::onOMXEvent(
mCodec->mNode, OMX_CommandPortEnable, kPortIndexOutput);
}
+ /* Clear the RenderQueue in which queued GraphicBuffers hold the
+ * actual buffer references in order to free them early.
+ */
+ mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
+
if (err == OK) {
err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
@@ -6501,15 +6808,6 @@ bool ACodec::OutputPortSettingsChangedState::onOMXEvent(
if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
-
- // This is technically not correct, but appears to be
- // the only way to free the component instance.
- // Controlled transitioning from excecuting->idle
- // and idle->loaded seem impossible probably because
- // the output port never finishes re-enabling.
- mCodec->mShutdownInProgress = true;
- mCodec->mKeepComponentAllocated = false;
- mCodec->changeState(mCodec->mLoadedState);
}
return true;
diff --git a/media/libstagefright/APE.cpp b/media/libstagefright/APE.cpp
new file mode 100644
index 0000000..74ca7dc
--- /dev/null
+++ b/media/libstagefright/APE.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) Texas Instruments - http://www.ti.com/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APE_TAG"
+#include <utils/Log.h>
+
+#include "include/APE.h"
+
+namespace android {
+
+APE::APE(){
+
+}
+
+APE::~APE(){
+
+}
+
+bool APE::isAPE(uint8_t *apeTag) const {
+ if(apeTag[0] == 'A' && apeTag[1] == 'P' && apeTag[2] == 'E' &&
+ apeTag[3] == 'T' && apeTag[4] == 'A' && apeTag[5] == 'G' &&
+ apeTag[6] == 'E' && apeTag[7] == 'X'){
+ return true;
+ }
+ return false;
+}
+
+size_t sizeItemKey(const sp<DataSource> &source, off64_t offset){
+ off64_t ItemKeyOffset = offset;
+ uint8_t keyTerminator = 0;
+ size_t keySize = 0;
+ while (keyTerminator != 0){
+ source->readAt(ItemKeyOffset, &keyTerminator, 1);
+ ItemKeyOffset++;
+ keySize++;
+ }
+ return keySize - 1;
+}
+
+bool APE::parseAPE(const sp<DataSource> &source, off64_t offset,
+ sp<MetaData> &meta){
+
+ struct Map {
+ int key;
+ const char *tag;
+ } const kMap[] = {
+ { kKeyAlbum, "Album" },
+ { kKeyArtist, "Artist" },
+ { kKeyAlbumArtist, "Album" },
+ { kKeyComposer, "Composer" },
+ { kKeyGenre, "Genre" },
+ { kKeyTitle, "Title" },
+ { kKeyYear, "Year" },
+ { kKeyCDTrackNumber, "Track" },
+ { kKeyDate, "Record Date"},
+ };
+
+ static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
+
+ off64_t headerOffset = offset;
+ headerOffset += 16;
+ itemNumber = 0;
+ if (source->readAt(headerOffset, &itemNumber, 1) == 0)
+ return false;
+
+ headerOffset += 16;
+
+ for(uint32_t it = 0; it < itemNumber; it++){
+ lenValue = 0;
+ if (source->readAt(headerOffset, &lenValue, 1) == 0)
+ return false;
+
+ headerOffset += 4;
+
+ itemFlags = 0;
+ if (source->readAt(headerOffset, &itemFlags, 1) == 0)
+ return false;
+
+ headerOffset += 4;
+
+ size_t sizeKey = sizeItemKey(source, headerOffset);
+
+ char *key = new char[sizeKey];
+
+ if (source->readAt(headerOffset, key, sizeKey) == 0)
+ return false;
+
+ key[sizeKey] = '\0';
+ headerOffset += sizeKey + 1;
+
+ char *val = new char[lenValue + 1];
+
+ if (source->readAt(headerOffset, val, lenValue) == 0)
+ return false;
+
+ val[lenValue] = '\0';
+
+ for (size_t i = 0; i < kNumMapEntries; i++){
+ if (!strcmp(key, kMap[i].tag)){
+ if (itemFlags == 0)
+ meta->setCString(kMap[i].key, (const char *)val);
+ break;
+ }
+ }
+ headerOffset += lenValue;
+ delete[] key;
+ delete[] val;
+ }
+
+ return true;
+}
+} //namespace android
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2529aa7..792c139 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -1,7 +1,6 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
-include frameworks/av/media/libstagefright/codecs/common/Config.mk
LOCAL_SRC_FILES:= \
ACodec.cpp \
@@ -66,12 +65,16 @@ LOCAL_SRC_FILES:= \
VBRISeeker.cpp \
VideoFrameScheduler.cpp \
WAVExtractor.cpp \
+ WAVEWriter.cpp \
WVMExtractor.cpp \
XINGSeeker.cpp \
avc_utils.cpp \
+ APE.cpp \
+ FFMPEGSoftCodec.cpp \
LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/include/media/ \
+ $(TOP)/frameworks/av/media/libavextensions \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
$(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/native/include/media/openmax \
@@ -104,7 +107,7 @@ LOCAL_SHARED_LIBRARIES := \
libutils \
libvorbisidec \
libz \
- libpowermanager
+ libpowermanager \
LOCAL_STATIC_LIBRARIES := \
libstagefright_color_conversion \
@@ -120,6 +123,31 @@ LOCAL_STATIC_LIBRARIES := \
libFLAC \
libmedia_helper \
+LOCAL_WHOLE_STATIC_LIBRARIES := libavextensions
+
+ifeq ($(BOARD_USE_S3D_SUPPORT), true)
+ifeq ($(BOARD_USES_HWC_SERVICES), true)
+LOCAL_CFLAGS += -DUSE_S3D_SUPPORT -DHWC_SERVICES
+LOCAL_C_INCLUDES += \
+ $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/openmax/include/exynos \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/$(TARGET_BOARD_PLATFORM)/libhwcService \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/$(TARGET_BOARD_PLATFORM)/libhwc \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/$(TARGET_BOARD_PLATFORM)/include \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/$(TARGET_SOC)/libhwcmodule \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/$(TARGET_SOC)/include \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/exynos/libexynosutils \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/exynos/include \
+ $(TOP)/hardware/samsung_slsi-$(TARGET_SLSI_VARIANT)/exynos/libhwc
+
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+ $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
+
+LOCAL_SHARED_LIBRARIES += \
+ libExynosHWCService
+endif
+endif
+
LOCAL_SHARED_LIBRARIES += \
libstagefright_enc_common \
libstagefright_avc_common \
@@ -127,15 +155,63 @@ LOCAL_SHARED_LIBRARIES += \
libdl \
libRScpp \
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CFLAGS += -Werror -Wno-multichar -Wno-error=deprecated-declarations
+
+ifeq ($(TARGET_USES_QCOM_BSP), true)
+ LOCAL_C_INCLUDES += $(call project-path-for,qcom-display)/libgralloc
+ LOCAL_CFLAGS += -DQTI_BSP
+endif
+
+LOCAL_C_INCLUDES += $(call project-path-for,qcom-media)/mm-core/inc
# enable experiments only in userdebug and eng builds
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
endif
+ifeq ($(TARGET_BOARD_PLATFORM),omap4)
+LOCAL_CFLAGS += -DBOARD_CANT_REALLOCATE_OMX_BUFFERS
+endif
+
+ifeq ($(TARGET_USE_AVC_BASELINE_PROFILE), true)
+LOCAL_CFLAGS += -DUSE_AVC_BASELINE_PROFILE
+endif
+
+ifeq ($(call is-vendor-board-platform,QCOM),true)
+LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_EXTN_FLAC_DECODER)),true)
+ LOCAL_CFLAGS += -DQTI_FLAC_DECODER
+endif
+endif
+
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_FLAC_OFFLOAD)),true)
+ LOCAL_CFLAGS += -DFLAC_OFFLOAD_ENABLED
+endif
+
LOCAL_CLANG := true
+ifeq ($(BOARD_USE_SAMSUNG_CAMERAFORMAT_NV21), true)
+# This needs flag requires the following string constant in
+# CameraParametersExtra.h:
+#
+# const char CameraParameters::PIXEL_FORMAT_YUV420SP_NV21[] = "nv21";
+LOCAL_CFLAGS += -DUSE_SAMSUNG_CAMERAFORMAT_NV21
+endif
+
+# FFMPEG plugin
+LOCAL_C_INCLUDES += $(TOP)/external/stagefright-plugins/include
+
+#LOCAL_CFLAGS += -DLOG_NDEBUG=0
+
+ifeq ($(BOARD_USE_SAMSUNG_COLORFORMAT), true)
+LOCAL_CFLAGS += -DUSE_SAMSUNG_COLORFORMAT
+
+# Include native color format header path
+LOCAL_C_INCLUDES += \
+ $(TOP)/hardware/samsung/exynos4/hal/include \
+ $(TOP)/hardware/samsung/exynos4/include
+endif
+
LOCAL_MODULE:= libstagefright
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index dd9d393..1f9383b 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -54,6 +54,7 @@ AudioPlayer::AudioPlayer(
mFinalStatus(OK),
mSeekTimeUs(0),
mStarted(false),
+ mSourcePaused(false),
mIsFirstBuffer(false),
mFirstBufferResult(OK),
mFirstBuffer(NULL),
@@ -62,7 +63,8 @@ AudioPlayer::AudioPlayer(
mPinnedTimeUs(-1ll),
mPlaying(false),
mStartPosUs(0),
- mCreateFlags(flags) {
+ mCreateFlags(flags),
+ mPauseRequired(false) {
}
AudioPlayer::~AudioPlayer() {
@@ -82,6 +84,7 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
status_t err;
if (!sourceAlreadyStarted) {
+ mSourcePaused = false;
err = mSource->start();
if (err != OK) {
@@ -99,7 +102,6 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
MediaSource::ReadOptions options;
if (mSeeking) {
options.setSeekTo(mSeekTimeUs);
- mSeeking = false;
}
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
@@ -109,8 +111,25 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(mFirstBuffer == NULL);
mFirstBufferResult = OK;
mIsFirstBuffer = false;
+
+ if (mSeeking) {
+ mPositionTimeRealUs = 0;
+ mPositionTimeMediaUs = mSeekTimeUs;
+ mSeeking = false;
+ }
+
} else {
mIsFirstBuffer = true;
+
+ if (mSeeking) {
+ mPositionTimeRealUs = 0;
+ if (mFirstBuffer == NULL || !mFirstBuffer->meta_data()->findInt64(
+ kKeyTime, &mPositionTimeMediaUs)) {
+ return UNKNOWN_ERROR;
+ }
+ mSeeking = false;
+ }
+
}
sp<MetaData> format = mSource->getFormat();
@@ -257,13 +276,16 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
mStarted = true;
mPlaying = true;
mPinnedTimeUs = -1ll;
-
+ const char *componentName;
+ if (!(format->findCString(kKeyDecoderComponent, &componentName))) {
+ componentName = "none";
+ }
+ mPauseRequired = !strncmp(componentName, "OMX.qcom.", 9);
return OK;
}
void AudioPlayer::pause(bool playPendingSamples) {
CHECK(mStarted);
-
if (playPendingSamples) {
if (mAudioSink.get() != NULL) {
mAudioSink->stop();
@@ -284,10 +306,21 @@ void AudioPlayer::pause(bool playPendingSamples) {
}
mPlaying = false;
+ CHECK(mSource != NULL);
+ if (mPauseRequired) {
+ if (mSource->pause() == OK) {
+ mSourcePaused = true;
+ }
+ }
}
status_t AudioPlayer::resume() {
CHECK(mStarted);
+ CHECK(mSource != NULL);
+ if (mSourcePaused == true) {
+ mSourcePaused = false;
+ mSource->start();
+ }
status_t err;
if (mAudioSink.get() != NULL) {
@@ -349,7 +382,7 @@ void AudioPlayer::reset() {
mInputBuffer->release();
mInputBuffer = NULL;
}
-
+ mSourcePaused = false;
mSource->stop();
// The following hack is necessary to ensure that the OMX
@@ -379,6 +412,7 @@ void AudioPlayer::reset() {
mStarted = false;
mPlaying = false;
mStartPosUs = 0;
+ mPauseRequired = false;
}
// static
@@ -549,6 +583,10 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) {
mIsFirstBuffer = false;
} else {
err = mSource->read(&mInputBuffer, &options);
+ if (err == OK && mInputBuffer == NULL && mSourcePaused) {
+ ALOGV("mSourcePaused, return 0 from fillBuffer");
+ return 0;
+ }
}
CHECK((err == OK && mInputBuffer != NULL)
@@ -795,8 +833,8 @@ int64_t AudioPlayer::getMediaTimeUs() {
}
int64_t realTimeOffset = getRealTimeUsLocked() - mPositionTimeRealUs;
- if (realTimeOffset < 0) {
- realTimeOffset = 0;
+ if (mPositionTimeMediaUs + realTimeOffset < 0) {
+ return 0;
}
return mPositionTimeMediaUs + realTimeOffset;
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 55f4361..77a7d1c 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -62,10 +62,11 @@ AudioSource::AudioSource(
mFirstSampleTimeUs(-1ll),
mInitialReadTimeUs(0),
mNumFramesReceived(0),
- mNumClientOwnedBuffers(0) {
+ mNumClientOwnedBuffers(0),
+ mRecPaused(false) {
ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
sampleRate, outSampleRate, channelCount);
- CHECK(channelCount == 1 || channelCount == 2);
+ CHECK(channelCount == 1 || channelCount == 2 || channelCount == 6);
CHECK(sampleRate > 0);
size_t minFrameCount;
@@ -113,6 +114,11 @@ status_t AudioSource::initCheck() const {
status_t AudioSource::start(MetaData *params) {
Mutex::Autolock autoLock(mLock);
+ if (mRecPaused) {
+ mRecPaused = false;
+ return OK;
+ }
+
if (mStarted) {
return UNKNOWN_ERROR;
}
@@ -128,6 +134,8 @@ status_t AudioSource::start(MetaData *params) {
int64_t startTimeUs;
if (params && params->findInt64(kKeyTime, &startTimeUs)) {
mStartTimeUs = startTimeUs;
+ } else {
+ mStartTimeUs = systemTime() / 1000ll;
}
status_t err = mRecord->start();
if (err == OK) {
@@ -140,6 +148,12 @@ status_t AudioSource::start(MetaData *params) {
return err;
}
+status_t AudioSource::pause() {
+ ALOGV("AudioSource::Pause");
+ mRecPaused = true;
+ return OK;
+}
+
void AudioSource::releaseQueuedFrames_l() {
ALOGV("releaseQueuedFrames_l");
List<MediaBuffer *>::iterator it;
@@ -294,10 +308,6 @@ void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
int64_t timeUs = systemTime() / 1000ll;
- // Estimate the real sampling time of the 1st sample in this buffer
- // from AudioRecord's latency. (Apply this adjustment first so that
- // the start time logic is not affected.)
- timeUs -= mRecord->latency() * 1000LL;
ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
Mutex::Autolock autoLock(mLock);
@@ -370,6 +380,14 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
}
void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
+ if (mRecPaused) {
+ if (!mBuffersReceived.empty()) {
+ releaseQueuedFrames_l();
+ }
+ buffer->release();
+ return;
+ }
+
const size_t bufferSize = buffer->range_length();
const size_t frameSize = mRecord->frameSize();
const int64_t timestampUs =
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 3cd0b0e..933f241 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -216,7 +216,8 @@ AwesomePlayer::AwesomePlayer()
mLastVideoTimeUs(-1),
mTextDriver(NULL),
mOffloadAudio(false),
- mAudioTearDown(false) {
+ mAudioTearDown(false),
+ mIsFirstFrameAfterResume(false) {
CHECK_EQ(mClient.connect(), (status_t)OK);
DataSource::RegisterDefaultSniffers();
@@ -343,6 +344,7 @@ status_t AwesomePlayer::setDataSource(
reset_l();
+ fd = dup(fd);
sp<DataSource> dataSource = new FileSource(fd, offset, length);
status_t err = dataSource->initCheck();
@@ -1244,6 +1246,7 @@ void AwesomePlayer::initRenderer_l() {
setVideoScalingMode_l(mVideoScalingMode);
if (USE_SURFACE_ALLOC
&& !strncmp(component, "OMX.", 4)
+ && strncmp(component, "OMX.ffmpeg.", 11)
&& strncmp(component, "OMX.google.", 11)) {
// Hardware decoders avoid the CPU color conversion by decoding
// directly to ANativeBuffers, so we must use a renderer that
@@ -1804,11 +1807,18 @@ void AwesomePlayer::onVideoEvent() {
if (mSeeking != NO_SEEK) {
ALOGV("seeking to %" PRId64 " us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
+ MediaSource::ReadOptions::SeekMode seekmode = (mSeeking == SEEK_VIDEO_ONLY)
+ ? MediaSource::ReadOptions::SEEK_NEXT_SYNC
+ : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC;
+ // Seek to the next key-frame after resume for http streaming
+ if (mCachedSource != NULL && mIsFirstFrameAfterResume) {
+ seekmode = MediaSource::ReadOptions::SEEK_NEXT_SYNC;
+ mIsFirstFrameAfterResume = false;
+ }
+
options.setSeekTo(
mSeekTimeUs,
- mSeeking == SEEK_VIDEO_ONLY
- ? MediaSource::ReadOptions::SEEK_NEXT_SYNC
- : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ seekmode);
}
for (;;) {
status_t err = mVideoSource->read(&mVideoBuffer, &options);
@@ -3044,4 +3054,86 @@ void AwesomePlayer::onAudioTearDownEvent() {
beginPrepareAsync_l();
}
+// suspend() will release the decoders, the renderers and the buffers allocated for decoders
+// Releasing decoders eliminates draining power in suspended state.
+status_t AwesomePlayer::suspend() {
+ ALOGV("suspend()");
+ Mutex::Autolock autoLock(mLock);
+
+ // Set PAUSE to DrmManagerClient which will be set START in play_l()
+ if (mDecryptHandle != NULL) {
+ mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
+ Playback::PAUSE, 0);
+ }
+
+ cancelPlayerEvents();
+ if (mQueueStarted) {
+ mQueue.stop();
+ mQueueStarted = false;
+ }
+
+ // Shutdown audio decoder first
+ if ((mAudioPlayer == NULL || !(mFlags & AUDIOPLAYER_STARTED))
+ && mAudioSource != NULL) {
+ mAudioSource->stop();
+ }
+ mAudioSource.clear();
+ mOmxSource.clear();
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+ modifyFlags(AUDIO_RUNNING | AUDIOPLAYER_STARTED, CLEAR);
+
+ // Shutdown the video decoder
+ mVideoRenderer.clear();
+ if (mVideoSource != NULL) {
+ shutdownVideoDecoder_l();
+ }
+ modifyFlags(PLAYING, CLEAR);
+ mVideoRenderingStarted = false;
+
+ // Disconnect the source
+ if (mCachedSource != NULL) {
+ status_t err = mCachedSource->disconnectWhileSuspend();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ return OK;
+}
+
+status_t AwesomePlayer::resume() {
+ ALOGV("resume()");
+ Mutex::Autolock autoLock(mLock);
+
+ // Reconnect the source
+ status_t err = mCachedSource->connectWhileResume();
+ if (err != OK) {
+ return err;
+ }
+
+ if (mVideoTrack != NULL && mVideoSource == NULL) {
+ status_t err = initVideoDecoder();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (mAudioTrack != NULL && mAudioSource == NULL) {
+ status_t err = initAudioDecoder();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ mIsFirstFrameAfterResume = true;
+
+ if (!mQueueStarted) {
+ mQueue.start();
+ mQueueStarted = true;
+ }
+
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index fa30644..f6b4741 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -35,6 +35,8 @@
#include <utils/String8.h>
#include <cutils/properties.h>
+#include <stagefright/AVExtensions.h>
+
#if LOG_NDEBUG
#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
#else
@@ -100,6 +102,11 @@ void CameraSourceListener::postDataTimestamp(
}
static int32_t getColorFormat(const char* colorFormat) {
+ if (!colorFormat) {
+ ALOGE("Invalid color format");
+ return -1;
+ }
+
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
return OMX_COLOR_FormatYUV420Planar;
}
@@ -109,8 +116,20 @@ static int32_t getColorFormat(const char* colorFormat) {
}
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420SP)) {
+#ifdef USE_SAMSUNG_COLORFORMAT
+ static const int OMX_SEC_COLOR_FormatNV12LPhysicalAddress = 0x7F000002;
+ return OMX_SEC_COLOR_FormatNV12LPhysicalAddress;
+#else
return OMX_COLOR_FormatYUV420SemiPlanar;
+#endif
+ }
+
+#ifdef USE_SAMSUNG_CAMERAFORMAT_NV21
+ if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420SP_NV21)) {
+ static const int OMX_SEC_COLOR_FormatNV21Linear = 0x7F000011;
+ return OMX_SEC_COLOR_FormatNV21Linear;
}
+#endif /* USE_SAMSUNG_CAMERAFORMAT_NV21 */
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422I)) {
return OMX_COLOR_FormatYCbYCr;
@@ -128,6 +147,10 @@ static int32_t getColorFormat(const char* colorFormat) {
return OMX_COLOR_FormatAndroidOpaque;
}
+ if (!strcmp(colorFormat, "YVU420SemiPlanar")) {
+ return OMX_QCOM_COLOR_FormatYVU420SemiPlanar;
+ }
+
ALOGE("Uknown color format (%s), please add it to "
"CameraSource::getColorFormat", colorFormat);
@@ -187,7 +210,11 @@ CameraSource::CameraSource(
mNumFramesDropped(0),
mNumGlitches(0),
mGlitchDurationThresholdUs(200000),
- mCollectStats(false) {
+ mCollectStats(false),
+ mPauseAdjTimeUs(0),
+ mPauseStartTimeUs(0),
+ mPauseEndTimeUs(0),
+ mRecPause(false) {
mVideoSize.width = -1;
mVideoSize.height = -1;
@@ -577,6 +604,8 @@ status_t CameraSource::initWithCameraAccess(
mMeta->setInt32(kKeyStride, mVideoSize.width);
mMeta->setInt32(kKeySliceHeight, mVideoSize.height);
mMeta->setInt32(kKeyFrameRate, mVideoFrameRate);
+ AVUtils::get()->extractCustomCameraKeys(params, mMeta);
+
return OK;
}
@@ -643,6 +672,14 @@ status_t CameraSource::startCameraRecording() {
status_t CameraSource::start(MetaData *meta) {
ALOGV("start");
+ if(mRecPause) {
+ mRecPause = false;
+ mPauseAdjTimeUs = mPauseEndTimeUs - mPauseStartTimeUs;
+ ALOGV("resume : mPause Adj / End / Start : %" PRId64 " / %" PRId64 " / %" PRId64" us",
+ mPauseAdjTimeUs, mPauseEndTimeUs, mPauseStartTimeUs);
+ return OK;
+ }
+
CHECK(!mStarted);
if (mInitCheck != OK) {
ALOGE("CameraSource is not initialized yet");
@@ -656,13 +693,23 @@ status_t CameraSource::start(MetaData *meta) {
}
mStartTimeUs = 0;
+ mRecPause = false;
+ mPauseAdjTimeUs = 0;
+ mPauseStartTimeUs = 0;
+ mPauseEndTimeUs = 0;
mNumInputBuffers = 0;
mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
mEncoderDataSpace = HAL_DATASPACE_BT709;
if (meta) {
int64_t startTimeUs;
- if (meta->findInt64(kKeyTime, &startTimeUs)) {
+
+ auto key = kKeyTime;
+ if (!property_get_bool("media.camera.ts.monotonic", true)) {
+ key = kKeyTimeBoot;
+ }
+
+ if (meta->findInt64(key, &startTimeUs)) {
mStartTimeUs = startTimeUs;
}
@@ -689,6 +736,16 @@ status_t CameraSource::start(MetaData *meta) {
return err;
}
+status_t CameraSource::pause() {
+ mRecPause = true;
+ mPauseStartTimeUs = mLastFrameTimestampUs;
+ //record the end time too, or there is a risk the end time is 0
+ mPauseEndTimeUs = mLastFrameTimestampUs;
+ ALOGV("pause : mPauseStart %" PRId64 " us, #Queued Frames : %zd",
+ mPauseStartTimeUs, mFramesReceived.size());
+ return OK;
+}
+
void CameraSource::stopCameraRecording() {
ALOGV("stopCameraRecording");
if (mCameraFlags & FLAGS_HOT_CAMERA) {
@@ -895,10 +952,23 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
return;
}
+ if (mRecPause == true) {
+ if(!mFramesReceived.empty()) {
+ ALOGV("releaseQueuedFrames - #Queued Frames : %zd", mFramesReceived.size());
+ releaseQueuedFrames();
+ }
+ ALOGV("release One Video Frame for Pause : %" PRId64 "us", timestampUs);
+ releaseOneRecordingFrame(data);
+ mPauseEndTimeUs = timestampUs;
+ return;
+ }
+ timestampUs -= mPauseAdjTimeUs;
+ ALOGV("dataCallbackTimestamp: AdjTimestamp %" PRId64 "us", timestampUs);
+
if (mNumFramesReceived > 0) {
if (timestampUs <= mLastFrameTimestampUs) {
- ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
- (long long)timestampUs, (long long)mLastFrameTimestampUs);
+ ALOGW("Dropping frame with backward timestamp %" PRId64 " (last %" PRId64 ")",
+ timestampUs, mLastFrameTimestampUs);
releaseOneRecordingFrame(data);
return;
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 0acd9d0..53815bd 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -78,6 +78,7 @@ CameraSourceTimeLapse::CameraSourceTimeLapse(
storeMetaDataInVideoBuffers),
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
mLastTimeLapseFrameRealTimestampUs(0),
+ mLastTimeLapseFrameTimeStampUs(0),
mSkipCurrentFrame(false) {
mTimeBetweenFrameCaptureUs = timeBetweenFrameCaptureUs;
@@ -252,6 +253,7 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
ALOGV("dataCallbackTimestamp timelapse: initial frame");
mLastTimeLapseFrameRealTimestampUs = *timestampUs;
+ mLastTimeLapseFrameTimeStampUs = *timestampUs;
return false;
}
@@ -263,8 +265,10 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
if (mForceRead) {
ALOGV("dataCallbackTimestamp timelapse: forced read");
mForceRead = false;
+ mLastTimeLapseFrameRealTimestampUs = *timestampUs;
*timestampUs =
- mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ mLastTimeLapseFrameTimeStampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ mLastTimeLapseFrameTimeStampUs = *timestampUs;
// Really make sure that this video recording frame will not be dropped.
if (*timestampUs < mStartTimeUs) {
@@ -279,7 +283,8 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
// The first 2 output frames from the encoder are: decoder specific info and
// the compressed video frame data for the first input video frame.
if (mNumFramesEncoded >= 1 && *timestampUs <
- (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenFrameCaptureUs)) {
+ (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenFrameCaptureUs) &&
+ (mTimeBetweenFrameCaptureUs > mTimeBetweenTimeLapseVideoFramesUs + 1)) {
// Skip all frames from last encoded frame until
// sufficient time (mTimeBetweenFrameCaptureUs) has passed.
// Tell the camera to release its recording frame and return.
@@ -293,7 +298,14 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
ALOGV("dataCallbackTimestamp timelapse: got timelapse frame");
mLastTimeLapseFrameRealTimestampUs = *timestampUs;
- *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ *timestampUs = mLastTimeLapseFrameTimeStampUs + mTimeBetweenTimeLapseVideoFramesUs;
+ mLastTimeLapseFrameTimeStampUs = *timestampUs;
+ // Update start-time once the captured-time reaches the expected start-time.
+ // Not doing so will result in CameraSource always dropping frames since
+ // updated-timestamp will never intersect start-timestamp
+ if ((mNumFramesReceived == 0 && mLastTimeLapseFrameRealTimestampUs >= mStartTimeUs)) {
+ mStartTimeUs = *timestampUs;
+ }
return false;
}
return false;
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 5020c6c..34f0649 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -38,7 +38,6 @@
#include <media/IMediaHTTPConnection.h>
#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/DataURISource.h>
#include <media/stagefright/FileSource.h>
@@ -47,9 +46,28 @@
#include <utils/String8.h>
#include <cutils/properties.h>
+#include <cutils/log.h>
+
+#include <dlfcn.h>
+
+#include <stagefright/AVExtensions.h>
namespace android {
+static void *loadExtractorPlugin() {
+ void *ret = NULL;
+ char lib[PROPERTY_VALUE_MAX];
+ if (property_get("media.sf.extractor-plugin", lib, NULL)) {
+ if (void *extractorLib = ::dlopen(lib, RTLD_LAZY)) {
+ ret = ::dlsym(extractorLib, "getExtractorPlugin");
+ ALOGW_IF(!ret, "Failed to find symbol, dlerror: %s", ::dlerror());
+ } else {
+ ALOGV("Failed to load %s, dlerror: %s", lib, ::dlerror());
+ }
+ }
+ return ret;
+}
+
bool DataSource::getUInt16(off64_t offset, uint16_t *x) {
*x = 0;
@@ -116,6 +134,8 @@ bool DataSource::gSniffersRegistered = false;
bool DataSource::sniff(
String8 *mimeType, float *confidence, sp<AMessage> *meta) {
+
+
*mimeType = "";
*confidence = 0.0f;
meta->clear();
@@ -127,11 +147,18 @@ bool DataSource::sniff(
}
}
+ String8 newMimeType;
+ if (mimeType != NULL) {
+ newMimeType.setTo(*mimeType);
+ }
+ float newConfidence = *confidence;
+
for (List<SnifferFunc>::iterator it = gSniffers.begin();
it != gSniffers.end(); ++it) {
- String8 newMimeType;
- float newConfidence;
- sp<AMessage> newMeta;
+ int64_t sniffStart = ALooper::GetNowUs();
+ String8 newMimeType = *mimeType;
+ float newConfidence = *confidence;
+ sp<AMessage> newMeta = *meta;
if ((*it)(this, &newMimeType, &newConfidence, &newMeta)) {
if (newConfidence > *confidence) {
*mimeType = newMimeType;
@@ -139,6 +166,9 @@ bool DataSource::sniff(
*meta = newMeta;
}
}
+ ALOGV("Sniffer (%p) completed in %.2f ms (mime=%s confidence=%.2f",
+ this, ((float)(ALooper::GetNowUs() - sniffStart) / 1000.0f),
+ mimeType == NULL ? NULL : (*mimeType).string(), *confidence);
}
return *confidence > 0.0;
@@ -156,6 +186,19 @@ void DataSource::RegisterSniffer_l(SnifferFunc func) {
gSniffers.push_back(func);
}
+void DataSource::RegisterSnifferPlugin() {
+ static void (*getExtractorPlugin)(MediaExtractor::Plugin *) =
+ (void (*)(MediaExtractor::Plugin *))loadExtractorPlugin();
+
+ MediaExtractor::Plugin *plugin = MediaExtractor::getPlugin();
+ if (!plugin->sniff && getExtractorPlugin) {
+ getExtractorPlugin(plugin);
+ }
+ if (plugin->sniff) {
+ RegisterSniffer_l(plugin->sniff);
+ }
+}
+
// static
void DataSource::RegisterDefaultSniffers() {
Mutex::Autolock autoLock(gSnifferMutex);
@@ -175,12 +218,15 @@ void DataSource::RegisterDefaultSniffers() {
RegisterSniffer_l(SniffMPEG2PS);
RegisterSniffer_l(SniffWVM);
RegisterSniffer_l(SniffMidi);
+ RegisterSniffer_l(AVUtils::get()->getExtendedSniffer());
+ RegisterSnifferPlugin();
char value[PROPERTY_VALUE_MAX];
if (property_get("drm.service.enabled", value, NULL)
&& (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
RegisterSniffer_l(SniffDRM);
}
+
gSniffersRegistered = true;
}
@@ -190,7 +236,8 @@ sp<DataSource> DataSource::CreateFromURI(
const char *uri,
const KeyedVector<String8, String8> *headers,
String8 *contentType,
- HTTPBase *httpSource) {
+ HTTPBase *httpSource,
+ bool useExtendedCache) {
if (contentType != NULL) {
*contentType = "";
}
@@ -214,7 +261,7 @@ sp<DataSource> DataSource::CreateFromURI(
ALOGE("Failed to make http connection from http service!");
return NULL;
}
- httpSource = new MediaHTTP(conn);
+ httpSource = AVFactory::get()->createMediaHTTP(conn);
}
String8 tmp;
@@ -246,10 +293,17 @@ sp<DataSource> DataSource::CreateFromURI(
*contentType = httpSource->getMIMEType();
}
- source = NuCachedSource2::Create(
- httpSource,
- cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
- disconnectAtHighwatermark);
+ if (useExtendedCache) {
+ source = AVFactory::get()->createCachedSource(
+ httpSource,
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
+ } else {
+ source = NuCachedSource2::Create(
+ httpSource,
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
+ }
} else {
// We do not want that prefetching, caching, datasource wrapper
// in the widevine:// case.
@@ -278,7 +332,7 @@ sp<DataSource> DataSource::CreateMediaHTTP(const sp<IMediaHTTPService> &httpServ
if (conn == NULL) {
return NULL;
} else {
- return new MediaHTTP(conn);
+ return AVFactory::get()->createMediaHTTP(conn);
}
}
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 2c39314..2a61c3a 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -42,7 +42,8 @@ sp<DataURISource> DataURISource::Create(const char *uri) {
AString encoded(commaPos + 1);
// Strip CR and LF...
- for (size_t i = encoded.size(); i-- > 0;) {
+ for (size_t i = encoded.size(); i > 0;) {
+ i--;
if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
encoded.erase(i, 1);
}
diff --git a/media/libstagefright/FFMPEGSoftCodec.cpp b/media/libstagefright/FFMPEGSoftCodec.cpp
new file mode 100644
index 0000000..0d5802b
--- /dev/null
+++ b/media/libstagefright/FFMPEGSoftCodec.cpp
@@ -0,0 +1,1384 @@
+/*
+ * Copyright (C) 2014 The CyanogenMod Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FFMPEGSoftCodec"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ABitReader.h>
+
+#include <media/stagefright/FFMPEGSoftCodec.h>
+
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
+
+#include <cutils/properties.h>
+
+#include <OMX_Component.h>
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+
+#include <OMX_FFMPEG_Extn.h>
+
+#ifdef QCOM_HARDWARE
+#include <OMX_QCOMExtns.h>
+#endif
+
+namespace android {
+
+enum MetaKeyType{
+ INT32, INT64, STRING, DATA, CSD
+};
+
+struct MetaKeyEntry{
+ int MetaKey;
+ const char* MsgKey;
+ MetaKeyType KeyType;
+};
+
+static const MetaKeyEntry MetaKeyTable[] {
+ {kKeyAACAOT , "aac-profile" , INT32},
+ {kKeyArbitraryMode , "use-arbitrary-mode" , INT32},
+ {kKeyBitRate , "bitrate" , INT32},
+ {kKeyBitsPerSample , "bits-per-sample" , INT32},
+ {kKeyBlockAlign , "block-align" , INT32},
+ {kKeyChannelCount , "channel-count" , INT32},
+ {kKeyCodecId , "codec-id" , INT32},
+ {kKeyCodedSampleBits , "coded-sample-bits" , INT32},
+ {kKeyRawCodecSpecificData , "raw-codec-specific-data", CSD},
+ {kKeyRVVersion , "rv-version" , INT32},
+ {kKeySampleFormat , "sample-format" , INT32},
+ {kKeySampleRate , "sample-rate" , INT32},
+ {kKeyWMAVersion , "wma-version" , INT32}, // int32_t
+ {kKeyWMVVersion , "wmv-version" , INT32},
+ {kKeyPCMFormat , "pcm-format" , INT32},
+ {kKeyDivXVersion , "divx-version" , INT32},
+ {kKeyThumbnailTime , "thumbnail-time" , INT64},
+};
+
+const char* FFMPEGSoftCodec::getMsgKey(int key) {
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (key == MetaKeyTable[i].MetaKey) {
+ return MetaKeyTable[i].MsgKey;
+ }
+ }
+ return "unknown";
+}
+
+void FFMPEGSoftCodec::convertMetaDataToMessageFF(
+ const sp<MetaData> &meta, sp<AMessage> *format) {
+ const char * str_val;
+ int32_t int32_val;
+ int64_t int64_val;
+ uint32_t data_type;
+ const void * data;
+ size_t size;
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (MetaKeyTable[i].KeyType == INT32 &&
+ meta->findInt32(MetaKeyTable[i].MetaKey, &int32_val)) {
+ ALOGV("found metakey %s of type int32", MetaKeyTable[i].MsgKey);
+ format->get()->setInt32(MetaKeyTable[i].MsgKey, int32_val);
+ } else if (MetaKeyTable[i].KeyType == INT64 &&
+ meta->findInt64(MetaKeyTable[i].MetaKey, &int64_val)) {
+ ALOGV("found metakey %s of type int64", MetaKeyTable[i].MsgKey);
+ format->get()->setInt64(MetaKeyTable[i].MsgKey, int64_val);
+ } else if (MetaKeyTable[i].KeyType == STRING &&
+ meta->findCString(MetaKeyTable[i].MetaKey, &str_val)) {
+ ALOGV("found metakey %s of type string", MetaKeyTable[i].MsgKey);
+ format->get()->setString(MetaKeyTable[i].MsgKey, str_val);
+ } else if ( (MetaKeyTable[i].KeyType == DATA ||
+ MetaKeyTable[i].KeyType == CSD) &&
+ meta->findData(MetaKeyTable[i].MetaKey, &data_type, &data, &size)) {
+ ALOGV("found metakey %s of type data", MetaKeyTable[i].MsgKey);
+ if (MetaKeyTable[i].KeyType == CSD) {
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ if (strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-0", buffer);
+ } else {
+ const uint8_t *ptr = (const uint8_t *)data;
+ CHECK(size >= 8);
+ int seqLength = 0, picLength = 0;
+ for (size_t i = 4; i < (size - 4); i++)
+ {
+ if ((*(ptr + i) == 0) && (*(ptr + i + 1) == 0) &&
+ (*(ptr + i + 2) == 0) && (*(ptr + i + 3) == 1))
+ seqLength = i;
+ }
+ sp<ABuffer> buffer = new ABuffer(seqLength);
+ memcpy(buffer->data(), data, seqLength);
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-0", buffer);
+ picLength=size-seqLength;
+ sp<ABuffer> buffer1 = new ABuffer(picLength);
+ memcpy(buffer1->data(), (const uint8_t *)data + seqLength, picLength);
+ buffer1->meta()->setInt32("csd", true);
+ buffer1->meta()->setInt64("timeUs", 0);
+ format->get()->setBuffer("csd-1", buffer1);
+ }
+ } else {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+ format->get()->setBuffer(MetaKeyTable[i].MsgKey, buffer);
+ }
+ }
+
+ }
+}
+
+void FFMPEGSoftCodec::convertMessageToMetaDataFF(
+ const sp<AMessage> &msg, sp<MetaData> &meta) {
+ AString str_val;
+ int32_t int32_val;
+ int64_t int64_val;
+ static const size_t numMetaKeys =
+ sizeof(MetaKeyTable) / sizeof(MetaKeyTable[0]);
+ size_t i;
+ for (i = 0; i < numMetaKeys; ++i) {
+ if (MetaKeyTable[i].KeyType == INT32 &&
+ msg->findInt32(MetaKeyTable[i].MsgKey, &int32_val)) {
+ ALOGV("found metakey %s of type int32", MetaKeyTable[i].MsgKey);
+ meta->setInt32(MetaKeyTable[i].MetaKey, int32_val);
+ } else if (MetaKeyTable[i].KeyType == INT64 &&
+ msg->findInt64(MetaKeyTable[i].MsgKey, &int64_val)) {
+ ALOGV("found metakey %s of type int64", MetaKeyTable[i].MsgKey);
+ meta->setInt64(MetaKeyTable[i].MetaKey, int64_val);
+ } else if (MetaKeyTable[i].KeyType == STRING &&
+ msg->findString(MetaKeyTable[i].MsgKey, &str_val)) {
+ ALOGV("found metakey %s of type string", MetaKeyTable[i].MsgKey);
+ meta->setCString(MetaKeyTable[i].MetaKey, str_val.c_str());
+ }
+ }
+}
+
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+const char* FFMPEGSoftCodec::overrideComponentName(
+ uint32_t /*quirks*/, const sp<MetaData> &meta, const char *mime, bool isEncoder) {
+ const char* componentName = NULL;
+
+ int32_t wmvVersion = 0;
+ if (!strncasecmp(mime, MEDIA_MIMETYPE_VIDEO_WMV, strlen(MEDIA_MIMETYPE_VIDEO_WMV)) &&
+ meta->findInt32(kKeyWMVVersion, &wmvVersion)) {
+ ALOGD("Found WMV version key %d", wmvVersion);
+ if (wmvVersion != 2) {
+ ALOGD("Use FFMPEG for unsupported WMV track");
+ componentName = "OMX.ffmpeg.wmv.decoder";
+ }
+ }
+
+ int32_t encodeOptions = 0;
+ if (!isEncoder && !strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_WMA, strlen(MEDIA_MIMETYPE_AUDIO_WMA)) &&
+ !meta->findInt32(kKeyWMAEncodeOpt, &encodeOptions)) {
+ ALOGD("Use FFMPEG for unsupported WMA track");
+ componentName = "OMX.ffmpeg.wma.decoder";
+ }
+
+ // Google's decoder doesn't support MAIN profile
+ int32_t aacProfile = 0;
+ if (!isEncoder && !strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC, strlen(MEDIA_MIMETYPE_AUDIO_AAC)) &&
+ meta->findInt32(kKeyAACAOT, &aacProfile)) {
+ if ((aacProfile == OMX_AUDIO_AACObjectMain) || (aacProfile == OMX_AUDIO_AACObjectLTP)) {
+ ALOGD("Use FFMPEG for AAC Main/LTP profile");
+ componentName = "OMX.ffmpeg.aac.decoder";
+ }
+ }
+
+ // Use FFMPEG for high-res formats which other decoders can't handle
+ int32_t bits = 16;
+ if (!isEncoder && meta->findInt32(kKeyBitsPerSample, &bits)) {
+ if (bits > 16) {
+ if (!strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC, strlen(MEDIA_MIMETYPE_AUDIO_AAC))) {
+ componentName = "OMX.ffmpeg.aac.decoder";
+ ALOGD("Use FFMPEG for high-res AAC format");
+ } else if (!strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC, strlen(MEDIA_MIMETYPE_AUDIO_FLAC))) {
+ componentName = "OMX.ffmpeg.flac.decoder";
+ ALOGD("Use FFMPEG for high-res FLAC format");
+ }
+ }
+ }
+
+ return componentName;
+}
+
+void FFMPEGSoftCodec::overrideComponentName(
+ uint32_t quirks, const sp<AMessage> &msg, AString* componentName, AString* mime, int32_t isEncoder) {
+
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(msg, meta);
+ const char *updated = overrideComponentName(
+ quirks, meta, mime->c_str(), isEncoder);
+ if (updated != NULL) {
+ componentName->setTo(updated);
+ }
+}
+
+status_t FFMPEGSoftCodec::setVideoFormat(
+ status_t status,
+ const sp<AMessage> &msg, const char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, bool isEncoder,
+ OMX_VIDEO_CODINGTYPE *compressionFormat,
+ const char* componentName) {
+ status_t err = OK;
+
+ //ALOGD("setVideoFormat: %s", msg->debugString(0).c_str());
+
+ /* status passed in is the result of the normal codec lookup */
+ if (status != OK) {
+
+ if (isEncoder) {
+ ALOGE("Encoding not supported");
+ err = BAD_VALUE;
+
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_WMV, mime)) {
+ if (strncmp(componentName, "OMX.ffmpeg.", 11) == 0) {
+ err = setWMVFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setWMVFormat() failed (err = %d)", err);
+ }
+ }
+ *compressionFormat = OMX_VIDEO_CodingWMV;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_RV, mime)) {
+ err = setRVFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setRVFormat() failed (err = %d)", err);
+ } else {
+ *compressionFormat = OMX_VIDEO_CodingRV;
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VC1, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingVC1;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_FLV1, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingFLV1;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingDIVX;
+#ifdef QCOM_HARDWARE
+ // compressionFormat will be override later
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingDIVX;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingDIVX;
+#endif
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)OMX_VIDEO_CodingHEVC;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_FFMPEG, mime)) {
+ ALOGV("Setting the OMX_VIDEO_PARAM_FFMPEGTYPE params");
+ err = setFFmpegVideoFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setFFmpegVideoFormat() failed (err = %d)", err);
+ } else {
+ *compressionFormat = OMX_VIDEO_CodingAutoDetect;
+ }
+ } else {
+ err = BAD_TYPE;
+ }
+ }
+
+#ifdef QCOM_HARDWARE
+ // We need to do a few extra steps if FFMPEGExtractor is in control
+ // and we want to talk to the hardware codecs. This logic is taken
+ // from the CAF L release. It was unfortunately moved to a proprietary
+ // blob and an architecture which is hellish for OEMs who wish to
+ // customize the platform.
+ if (err == OK && (!strncmp(componentName, "OMX.qcom.", 9)
+ || !strncmp(componentName, "OMX.ittiam.", 11))) {
+ status_t xerr = OK;
+
+
+ int32_t mode = 0;
+ OMX_QCOM_PARAM_PORTDEFINITIONTYPE portFmt;
+ InitOMXParams(&portFmt);
+ portFmt.nPortIndex = kPortIndexInput;
+
+ if (msg->findInt32("use-arbitrary-mode", &mode) && mode) {
+ ALOGI("Decoder will be in arbitrary mode");
+ portFmt.nFramePackingFormat = OMX_QCOM_FramePacking_Arbitrary;
+ } else {
+ ALOGI("Decoder will be in frame by frame mode");
+ portFmt.nFramePackingFormat = OMX_QCOM_FramePacking_OnlyOneCompleteFrame;
+ }
+ xerr = OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_QcomIndexPortDefn,
+ (void *)&portFmt, sizeof(portFmt));
+ if (xerr != OK) {
+ ALOGW("Failed to set frame packing format on component");
+ }
+
+ if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ // Override with QCOM specific compressionFormat
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ setQCDIVXFormat(msg, mime, OMXhandle, nodeID, kPortIndexOutput);
+ }
+
+ // Enable timestamp reordering for mpeg4 and vc1 codec types, the AVI file
+ // type, and hevc content in the ts container
+ AString container;
+ const char * containerStr = NULL;
+ if (msg->findString("file-format", &container)) {
+ containerStr = container.c_str();
+ }
+
+ bool tsReorder = false;
+ const char* roleVC1 = "OMX.qcom.video.decoder.vc1";
+ const char* roleMPEG4 = "OMX.qcom.video.decoder.mpeg4";
+ const char* roleHEVC = "OMX.qcom.video.decoder.hevc";
+ if (!strncmp(componentName, roleVC1, strlen(roleVC1)) ||
+ !strncmp(componentName, roleMPEG4, strlen(roleMPEG4))) {
+ // The codec requires timestamp reordering
+ tsReorder = true;
+ } else if (containerStr != NULL) {
+ if (!strncmp(containerStr, MEDIA_MIMETYPE_CONTAINER_AVI,
+ strlen(MEDIA_MIMETYPE_CONTAINER_AVI))) {
+ tsReorder = true;
+ } else if (!strncmp(containerStr, MEDIA_MIMETYPE_CONTAINER_MPEG2TS,
+ strlen(MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) ||
+ !strncmp(componentName, roleHEVC, strlen(roleHEVC))) {
+ tsReorder = true;
+ }
+ }
+
+ if (tsReorder) {
+ ALOGI("Enabling timestamp reordering");
+ QOMX_INDEXTIMESTAMPREORDER reorder;
+ InitOMXParams(&reorder);
+ reorder.nPortIndex = kPortIndexOutput;
+ reorder.bEnable = OMX_TRUE;
+ xerr = OMXhandle->setParameter(nodeID,
+ (OMX_INDEXTYPE)OMX_QcomIndexParamEnableTimeStampReorder,
+ (void *)&reorder, sizeof(reorder));
+
+ if (xerr != OK) {
+ ALOGW("Failed to enable timestamp reordering");
+ }
+ }
+
+ // Enable Sync-frame decode mode for thumbnails
+ char board[PROPERTY_VALUE_MAX];
+ property_get("ro.board.platform", board, NULL);
+ int32_t thumbnailMode = 0;
+ if (msg->findInt32("thumbnail-mode", &thumbnailMode) &&
+ thumbnailMode > 0 &&
+ !(!strcmp(board, "msm8996") || !strcmp(board, "msm8937") ||
+ !strcmp(board, "msm8953") || !strcmp(board, "msm8976"))) {
+ ALOGV("Enabling thumbnail mode.");
+ QOMX_ENABLETYPE enableType;
+ OMX_INDEXTYPE indexType;
+
+ status_t err = OMXhandle->getExtensionIndex(
+ nodeID, OMX_QCOM_INDEX_PARAM_VIDEO_SYNCFRAMEDECODINGMODE,
+ &indexType);
+ if (err != OK) {
+ ALOGW("Failed to get extension for SYNCFRAMEDECODINGMODE");
+ } else {
+
+ enableType.bEnable = OMX_TRUE;
+ err = OMXhandle->setParameter(nodeID,indexType,
+ (void *)&enableType, sizeof(enableType));
+ if (err != OK) {
+ ALOGW("Failed to get extension for SYNCFRAMEDECODINGMODE");
+ } else {
+ ALOGI("Thumbnail mode enabled.");
+ }
+ }
+ }
+
+ // MediaCodec clients can request decoder extradata by setting
+ // "enable-extradata-<type>" in MediaFormat.
+ // Following <type>s are supported:
+ // "user" => user-extradata
+ int extraDataRequested = 0;
+ if (msg->findInt32("enable-extradata-user", &extraDataRequested) &&
+ extraDataRequested == 1) {
+ ALOGI("[%s] User-extradata requested", componentName);
+ QOMX_ENABLETYPE enableType;
+ enableType.bEnable = OMX_TRUE;
+
+ xerr = OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_QcomIndexEnableExtnUserData,
+ &enableType, sizeof(enableType));
+ if (xerr != OK) {
+ ALOGW("[%s] Failed to enable user-extradata", componentName);
+ }
+ }
+ }
+#endif
+ return err;
+}
+
+#ifdef QCOM_HARDWARE
+status_t FFMPEGSoftCodec::setQCDIVXFormat(
+ const sp<AMessage> &msg, const char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, int port_index) {
+ status_t err = OK;
+ ALOGV("Setting the QOMX_VIDEO_PARAM_DIVXTYPE params ");
+ QOMX_VIDEO_PARAM_DIVXTYPE paramDivX;
+ InitOMXParams(&paramDivX);
+ paramDivX.nPortIndex = port_index;
+ int32_t DivxVersion = 0;
+ if (!msg->findInt32(getMsgKey(kKeyDivXVersion), &DivxVersion)) {
+ // Cannot find the key, the caller is skipping the container
+ // and use codec directly, let determine divx version from
+ // mime type
+ DivxVersion = kTypeDivXVer_4;
+ const char *v;
+ if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime)) {
+ DivxVersion = kTypeDivXVer_4;
+ v = "4";
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ DivxVersion = kTypeDivXVer_3_11;
+ v = "3.11";
+ }
+ ALOGW("Divx version key missing, initializing the version to %s", v);
+ }
+ ALOGV("Divx Version Type %d", DivxVersion);
+
+ if (DivxVersion == kTypeDivXVer_4) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat4;
+ } else if (DivxVersion == kTypeDivXVer_5) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat5;
+ } else if (DivxVersion == kTypeDivXVer_6) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat6;
+ } else if (DivxVersion == kTypeDivXVer_3_11 ) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat311;
+ } else {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormatUnused;
+ }
+ paramDivX.eProfile = (QOMX_VIDEO_DIVXPROFILETYPE)0; //Not used for now.
+
+ err = OMXhandle->setParameter(nodeID,
+ (OMX_INDEXTYPE)OMX_QcomIndexParamVideoDivx,
+ &paramDivX, sizeof(paramDivX));
+ return err;
+}
+#endif
+
+status_t FFMPEGSoftCodec::getVideoPortFormat(OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXHandle, IOMX::node_id nodeId) {
+
+ status_t err = BAD_TYPE;
+ switch (coding) {
+ case OMX_VIDEO_CodingWMV:
+ {
+ OMX_VIDEO_PARAM_WMVTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, OMX_IndexParamVideoWmv, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t version;
+ if (params.eFormat == OMX_VIDEO_WMVFormat7) {
+ version = kTypeWMVVer_7;
+ } else if (params.eFormat == OMX_VIDEO_WMVFormat8) {
+ version = kTypeWMVVer_8;
+ } else {
+ version = kTypeWMVVer_9;
+ }
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_WMV);
+ notify->setInt32("wmv-version", version);
+ break;
+ }
+ case OMX_VIDEO_CodingAutoDetect:
+ {
+ OMX_VIDEO_PARAM_FFMPEGTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamVideoFFmpeg, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_FFMPEG);
+ notify->setInt32("codec-id", params.eCodecId);
+ break;
+ }
+ case OMX_VIDEO_CodingRV:
+ {
+ OMX_VIDEO_PARAM_RVTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamVideoRv, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t version;
+ if (params.eFormat == OMX_VIDEO_RVFormatG2) {
+ version = kTypeRVVer_G2;
+ } else if (params.eFormat == OMX_VIDEO_RVFormat8) {
+ version = kTypeRVVer_8;
+ } else {
+ version = kTypeRVVer_9;
+ }
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RV);
+ break;
+ }
+ }
+ return err;
+}
+
+status_t FFMPEGSoftCodec::getAudioPortFormat(OMX_U32 portIndex, int coding,
+ sp<AMessage> &notify, sp<IOMX> OMXHandle, IOMX::node_id nodeId) {
+
+ status_t err = BAD_TYPE;
+ switch (coding) {
+ case OMX_AUDIO_CodingRA:
+ {
+ OMX_AUDIO_PARAM_RATYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, OMX_IndexParamAudioRa, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RA);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+ case OMX_AUDIO_CodingMP2:
+ {
+ OMX_AUDIO_PARAM_MP2TYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioMp2, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+ case OMX_AUDIO_CodingWMA:
+ {
+ OMX_AUDIO_PARAM_WMATYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, OMX_IndexParamAudioWma, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_WMA);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+ case OMX_AUDIO_CodingAPE:
+ {
+ OMX_AUDIO_PARAM_APETYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioApe, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_APE);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ notify->setInt32("bits-per-sample", params.nBitsPerSample);
+ break;
+ }
+ case OMX_AUDIO_CodingFLAC:
+ {
+ OMX_AUDIO_PARAM_FLACTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioFlac, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FLAC);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ notify->setInt32("bits-per-sample", params.nCompressionLevel); // piggyback
+ break;
+ }
+
+ case OMX_AUDIO_CodingDTS:
+ {
+ OMX_AUDIO_PARAM_DTSTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioDts, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_DTS);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+ case OMX_AUDIO_CodingAC3:
+ {
+ OMX_AUDIO_PARAM_ANDROID_AC3TYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC3);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+
+ case OMX_AUDIO_CodingAutoDetect:
+ {
+ OMX_AUDIO_PARAM_FFMPEGTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ err = OMXHandle->getParameter(
+ nodeId, (OMX_INDEXTYPE)OMX_IndexParamAudioFFmpeg, &params, sizeof(params));
+ if (err != OK) {
+ return err;
+ }
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FFMPEG);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+ }
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setAudioFormat(
+ const sp<AMessage> &msg, const char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID) {
+ ALOGV("setAudioFormat called");
+ status_t err = OK;
+
+ ALOGV("setAudioFormat: %s", msg->debugString(0).c_str());
+
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_WMA, mime)) {
+ err = setWMAFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setWMAFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_VORBIS, mime)) {
+ err = setVORBISFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setVORBISFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RA, mime)) {
+ err = setRAFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setRAFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_FLAC, mime)) {
+ err = setFLACFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setFLACFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II, mime)) {
+ err = setMP2Format(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setMP2Format() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mime)) {
+ err = setAC3Format(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setAC3Format() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_APE, mime)) {
+ err = setAPEFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setAPEFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_DTS, mime)) {
+ err = setDTSFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setDTSFormat() failed (err = %d)", err);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_FFMPEG, mime)) {
+ err = setFFmpegAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK) {
+ ALOGE("setFFmpegAudioFormat() failed (err = %d)", err);
+ }
+ }
+
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setSupportedRole(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder, const char *mime) {
+
+ ALOGV("setSupportedRole Called %s", mime);
+
+ struct MimeToRole {
+ const char *mime;
+ const char *decoderRole;
+ const char *encoderRole;
+ };
+
+ static const MimeToRole kFFMPEGMimeToRole[] = {
+ { MEDIA_MIMETYPE_AUDIO_AAC,
+ "audio_decoder.aac", NULL },
+ { MEDIA_MIMETYPE_AUDIO_MPEG,
+ "audio_decoder.mp3", NULL },
+ { MEDIA_MIMETYPE_AUDIO_VORBIS,
+ "audio_decoder.vorbis", NULL },
+ { MEDIA_MIMETYPE_AUDIO_WMA,
+ "audio_decoder.wma", NULL },
+ { MEDIA_MIMETYPE_AUDIO_RA,
+ "audio_decoder.ra" , NULL },
+ { MEDIA_MIMETYPE_AUDIO_FLAC,
+ "audio_decoder.flac", NULL },
+ { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+ "audio_decoder.mp2", NULL },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", NULL },
+ { MEDIA_MIMETYPE_AUDIO_APE,
+ "audio_decoder.ape", NULL },
+ { MEDIA_MIMETYPE_AUDIO_DTS,
+ "audio_decoder.dts", NULL },
+ { MEDIA_MIMETYPE_VIDEO_MPEG2,
+ "video_decoder.mpeg2", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX4,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX311,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_VIDEO_WMV,
+ "video_decoder.vc1", NULL }, // so we can still talk to hardware codec
+ { MEDIA_MIMETYPE_VIDEO_VC1,
+ "video_decoder.vc1", NULL },
+ { MEDIA_MIMETYPE_VIDEO_RV,
+ "video_decoder.rv", NULL },
+ { MEDIA_MIMETYPE_VIDEO_FLV1,
+ "video_decoder.flv1", NULL },
+ { MEDIA_MIMETYPE_VIDEO_HEVC,
+ "video_decoder.hevc", NULL },
+ { MEDIA_MIMETYPE_AUDIO_FFMPEG,
+ "audio_decoder.trial", NULL },
+ { MEDIA_MIMETYPE_VIDEO_FFMPEG,
+ "video_decoder.trial", NULL },
+ };
+ static const size_t kNumMimeToRole =
+ sizeof(kFFMPEGMimeToRole) / sizeof(kFFMPEGMimeToRole[0]);
+
+ size_t i;
+ for (i = 0; i < kNumMimeToRole; ++i) {
+ if (!strcasecmp(mime, kFFMPEGMimeToRole[i].mime)) {
+ break;
+ }
+ }
+
+ if (i == kNumMimeToRole) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ const char *role =
+ isEncoder ? kFFMPEGMimeToRole[i].encoderRole
+ : kFFMPEGMimeToRole[i].decoderRole;
+
+ if (role != NULL) {
+ OMX_PARAM_COMPONENTROLETYPE roleParams;
+ InitOMXParams(&roleParams);
+
+ strncpy((char *)roleParams.cRole,
+ role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+ roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+ status_t err = omx->setParameter(
+ node, OMX_IndexParamStandardComponentRole,
+ &roleParams, sizeof(roleParams));
+
+ if (err != OK) {
+ ALOGW("Failed to set standard component role '%s'.", role);
+ return err;
+ }
+ }
+ return OK;
+}
+
+//video
+status_t FFMPEGSoftCodec::setWMVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t version = -1;
+ OMX_VIDEO_PARAM_WMVTYPE paramWMV;
+
+ if (!msg->findInt32(getMsgKey(kKeyWMVVersion), &version)) {
+ ALOGE("WMV version not detected");
+ }
+
+ InitOMXParams(&paramWMV);
+ paramWMV.nPortIndex = kPortIndexInput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamVideoWmv, &paramWMV, sizeof(paramWMV));
+ if (err != OK) {
+ return err;
+ }
+
+ if (version == kTypeWMVVer_7) {
+ paramWMV.eFormat = OMX_VIDEO_WMVFormat7;
+ } else if (version == kTypeWMVVer_8) {
+ paramWMV.eFormat = OMX_VIDEO_WMVFormat8;
+ } else if (version == kTypeWMVVer_9) {
+ paramWMV.eFormat = OMX_VIDEO_WMVFormat9;
+ }
+
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamVideoWmv, &paramWMV, sizeof(paramWMV));
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setRVFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t version = kTypeRVVer_G2;
+ OMX_VIDEO_PARAM_RVTYPE paramRV;
+
+ if (!msg->findInt32(getMsgKey(kKeyRVVersion), &version)) {
+ ALOGE("RV version not detected");
+ }
+
+ InitOMXParams(&paramRV);
+ paramRV.nPortIndex = kPortIndexInput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamVideoRv, &paramRV, sizeof(paramRV));
+ if (err != OK)
+ return err;
+
+ if (version == kTypeRVVer_G2) {
+ paramRV.eFormat = OMX_VIDEO_RVFormatG2;
+ } else if (version == kTypeRVVer_8) {
+ paramRV.eFormat = OMX_VIDEO_RVFormat8;
+ } else if (version == kTypeRVVer_9) {
+ paramRV.eFormat = OMX_VIDEO_RVFormat9;
+ }
+
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamVideoRv, &paramRV, sizeof(paramRV));
+ return err;
+}
+
+status_t FFMPEGSoftCodec::setFFmpegVideoFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t codec_id = 0;
+ int32_t width = 0;
+ int32_t height = 0;
+ OMX_VIDEO_PARAM_FFMPEGTYPE param;
+
+ ALOGD("setFFmpegVideoFormat");
+
+ if (msg->findInt32(getMsgKey(kKeyWidth), &width)) {
+ ALOGE("No video width specified");
+ }
+ if (msg->findInt32(getMsgKey(kKeyHeight), &height)) {
+ ALOGE("No video height specified");
+ }
+ if (!msg->findInt32(getMsgKey(kKeyCodecId), &codec_id)) {
+ ALOGE("No codec id sent for FFMPEG catch-all codec!");
+ }
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamVideoFFmpeg, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.eCodecId = codec_id;
+ param.nWidth = width;
+ param.nHeight = height;
+
+ err = OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamVideoFFmpeg, &param, sizeof(param));
+ return err;
+}
+
+//audio
+status_t FFMPEGSoftCodec::setRawAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ int32_t bitsPerSample = 16;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ if (!msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample)) {
+ ALOGD("No PCM format specified, using 16 bit");
+ }
+
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+ if (err != OK) {
+ return err;
+ }
+
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+ if (err != OK) {
+ return err;
+ }
+
+ OMX_AUDIO_PARAM_PCMMODETYPE pcmParams;
+ InitOMXParams(&pcmParams);
+ pcmParams.nPortIndex = kPortIndexOutput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+
+ if (err != OK) {
+ return err;
+ }
+
+ pcmParams.nChannels = numChannels;
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.bInterleaved = OMX_TRUE;
+ pcmParams.nBitPerSample = bitsPerSample;
+ pcmParams.nSamplingRate = sampleRate;
+ pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
+
+ if (getOMXChannelMapping(numChannels, pcmParams.eChannelMapping) != OK) {
+ return OMX_ErrorNone;
+ }
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+}
+
+status_t FFMPEGSoftCodec::setWMAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t version = 0;
+ int32_t numChannels = 0;
+ int32_t bitRate = 0;
+ int32_t sampleRate = 0;
+ int32_t blockAlign = 0;
+ int32_t bitsPerSample = 0;
+
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ CHECK(msg->findInt32(getMsgKey(kKeyBitRate), &bitRate));
+ if (!msg->findInt32(getMsgKey(kKeyBlockAlign), &blockAlign)) {
+ // we should be last on the codec list, but another sniffer may
+ // have handled it and there is no hardware codec.
+ if (!msg->findInt32(getMsgKey(kKeyWMABlockAlign), &blockAlign)) {
+ return ERROR_UNSUPPORTED;
+ }
+ }
+
+ // mm-parser may want a different bit depth
+ if (msg->findInt32(getMsgKey(kKeyWMABitspersample), &bitsPerSample)) {
+ msg->setInt32("bits-per-sample", bitsPerSample);
+ }
+
+ ALOGV("Channels: %d, SampleRate: %d, BitRate: %d, blockAlign: %d",
+ numChannels, sampleRate, bitRate, blockAlign);
+
+ CHECK(msg->findInt32(getMsgKey(kKeyWMAVersion), &version));
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ if (err != OK)
+ return err;
+
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+
+ // http://msdn.microsoft.com/en-us/library/ff819498(v=vs.85).aspx
+ if (version == kTypeWMA) {
+ paramWMA.eFormat = OMX_AUDIO_WMAFormat7;
+ } else if (version == kTypeWMAPro) {
+ paramWMA.eFormat = OMX_AUDIO_WMAFormat8;
+ } else if (version == kTypeWMALossLess) {
+ paramWMA.eFormat = OMX_AUDIO_WMAFormat9;
+ }
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+}
+
+status_t FFMPEGSoftCodec::setVORBISFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_VORBISTYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioVorbis, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioVorbis, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setRAFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t bitRate = 0;
+ int32_t sampleRate = 0;
+ int32_t blockAlign = 0;
+ OMX_AUDIO_PARAM_RATYPE paramRA;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ msg->findInt32(getMsgKey(kKeyBitRate), &bitRate);
+ CHECK(msg->findInt32(getMsgKey(kKeyBlockAlign), &blockAlign));
+
+ ALOGV("Channels: %d, SampleRate: %d, BitRate: %d, blockAlign: %d",
+ numChannels, sampleRate, bitRate, blockAlign);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&paramRA);
+ paramRA.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioRa, &paramRA, sizeof(paramRA));
+ if (err != OK)
+ return err;
+
+ paramRA.eFormat = OMX_AUDIO_RAFormatUnused; // FIXME, cook only???
+ paramRA.nChannels = numChannels;
+ paramRA.nSamplingRate = sampleRate;
+ // FIXME, HACK!!!, I use the nNumRegions parameter pass blockAlign!!!
+ // the cook audio codec need blockAlign!
+ paramRA.nNumRegions = blockAlign;
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioRa, &paramRA, sizeof(paramRA));
+}
+
+status_t FFMPEGSoftCodec::setFLACFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ int32_t bitsPerSample = 16;
+ OMX_AUDIO_PARAM_FLACTYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample);
+
+ ALOGV("Channels: %d, SampleRate: %d BitsPerSample: %d",
+ numChannels, sampleRate, bitsPerSample);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioFlac, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+ param.nCompressionLevel = bitsPerSample; // piggyback hax!
+
+ return OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioFlac, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setMP2Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_MP2TYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioMp2, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioMp2, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setAC3Format(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_ANDROID_AC3TYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSampleRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setAPEFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ int32_t bitsPerSample = 0;
+ OMX_AUDIO_PARAM_APETYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+ CHECK(msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample));
+
+ ALOGV("Channels:%d, SampleRate:%d, bitsPerSample:%d",
+ numChannels, sampleRate, bitsPerSample);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioApe, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSamplingRate = sampleRate;
+ param.nBitsPerSample = bitsPerSample;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioApe, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setDTSFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t numChannels = 0;
+ int32_t sampleRate = 0;
+ OMX_AUDIO_PARAM_DTSTYPE param;
+
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate));
+
+ ALOGV("Channels: %d, SampleRate: %d",
+ numChannels, sampleRate);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioDts, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.nChannels = numChannels;
+ param.nSamplingRate = sampleRate;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioDts, &param, sizeof(param));
+}
+
+status_t FFMPEGSoftCodec::setFFmpegAudioFormat(
+ const sp<AMessage> &msg, sp<IOMX> OMXhandle, IOMX::node_id nodeID)
+{
+ int32_t codec_id = 0;
+ int32_t numChannels = 0;
+ int32_t bitRate = 0;
+ int32_t bitsPerSample = 16;
+ int32_t sampleRate = 0;
+ int32_t blockAlign = 0;
+ int32_t sampleFormat = 0;
+ int32_t codedSampleBits = 0;
+ OMX_AUDIO_PARAM_FFMPEGTYPE param;
+
+ ALOGD("setFFmpegAudioFormat");
+
+ CHECK(msg->findInt32(getMsgKey(kKeyCodecId), &codec_id));
+ CHECK(msg->findInt32(getMsgKey(kKeyChannelCount), &numChannels));
+ CHECK(msg->findInt32(getMsgKey(kKeySampleFormat), &sampleFormat));
+ msg->findInt32(getMsgKey(kKeyBitRate), &bitRate);
+ msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample);
+ msg->findInt32(getMsgKey(kKeySampleRate), &sampleRate);
+ msg->findInt32(getMsgKey(kKeyBlockAlign), &blockAlign);
+ msg->findInt32(getMsgKey(kKeyBitsPerSample), &bitsPerSample);
+ msg->findInt32(getMsgKey(kKeyCodedSampleBits), &codedSampleBits);
+
+ status_t err = setRawAudioFormat(msg, OMXhandle, nodeID);
+ if (err != OK)
+ return err;
+
+ InitOMXParams(&param);
+ param.nPortIndex = kPortIndexInput;
+
+ err = OMXhandle->getParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioFFmpeg, &param, sizeof(param));
+ if (err != OK)
+ return err;
+
+ param.eCodecId = codec_id;
+ param.nChannels = numChannels;
+ param.nBitRate = bitRate;
+ param.nBitsPerSample = codedSampleBits;
+ param.nSampleRate = sampleRate;
+ param.nBlockAlign = blockAlign;
+ param.eSampleFormat = sampleFormat;
+
+ return OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_IndexParamAudioFFmpeg, &param, sizeof(param));
+}
+
+}
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index 89a91f7..55ce566 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -32,6 +32,8 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
+#include <system/audio.h>
+
namespace android {
class FLACParser;
@@ -72,6 +74,8 @@ private:
class FLACParser : public RefBase {
+friend class FLACSource;
+
public:
FLACParser(
const sp<DataSource> &dataSource,
@@ -84,9 +88,18 @@ public:
}
// stream properties
+ unsigned getMinBlockSize() const {
+ return mStreamInfo.min_blocksize;
+ }
unsigned getMaxBlockSize() const {
return mStreamInfo.max_blocksize;
}
+ unsigned getMinFrameSize() const {
+ return mStreamInfo.min_framesize;
+ }
+ unsigned getMaxFrameSize() const {
+ return mStreamInfo.max_framesize;
+ }
unsigned getSampleRate() const {
return mStreamInfo.sample_rate;
}
@@ -103,6 +116,8 @@ public:
// media buffers
void allocateBuffers();
void releaseBuffers();
+ void copyBuffer(short *dst, const int *const *src, unsigned nSamples);
+
MediaBuffer *readBuffer() {
return readBuffer(false, 0LL);
}
@@ -113,6 +128,7 @@ public:
protected:
virtual ~FLACParser();
+
private:
sp<DataSource> mDataSource;
sp<MetaData> mFileMetadata;
@@ -122,7 +138,6 @@ private:
// media buffers
size_t mMaxBufferSize;
MediaBufferGroup *mGroup;
- void (*mCopy)(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels);
// handle to underlying libFLAC parser
FLAC__StreamDecoder *mDecoder;
@@ -139,7 +154,7 @@ private:
bool mWriteRequested;
bool mWriteCompleted;
FLAC__FrameHeader mWriteHeader;
- const FLAC__int32 * const *mWriteBuffer;
+ const FLAC__int32 * mWriteBuffer[FLAC__MAX_CHANNELS];
// most recent error reported by libFLAC parser
FLAC__StreamDecoderErrorStatus mErrorStatus;
@@ -323,7 +338,9 @@ FLAC__StreamDecoderWriteStatus FLACParser::writeCallback(
mWriteRequested = false;
// FLAC parser doesn't free or realloc buffer until next frame or finish
mWriteHeader = frame->header;
- mWriteBuffer = buffer;
+ for(unsigned channel = 0; channel < frame->header.channels; channel++) {
+ mWriteBuffer[channel] = buffer[channel];
+ }
mWriteCompleted = true;
return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
} else {
@@ -377,109 +394,41 @@ void FLACParser::errorCallback(FLAC__StreamDecoderErrorStatus status)
mErrorStatus = status;
}
-// Copy samples from FLAC native 32-bit non-interleaved to 16-bit interleaved.
-// These are candidates for optimization if needed.
-
-static void copyMono8(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] << 8;
- }
-}
-
-static void copyStereo8(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] << 8;
- *dst++ = src[1][i] << 8;
- }
-}
-
-static void copyMultiCh8(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i] << 8;
- }
- }
-}
-
-static void copyMono16(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i];
- }
-}
-
-static void copyStereo16(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i];
- *dst++ = src[1][i];
- }
-}
-
-static void copyMultiCh16(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i];
- }
- }
-}
-
-// 24-bit versions should do dithering or noise-shaping, here or in AudioFlinger
-
-static void copyMono24(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] >> 8;
- }
-}
-
-static void copyStereo24(
- short *dst,
- const int *const *src,
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] >> 8;
- *dst++ = src[1][i] >> 8;
- }
-}
-
-static void copyMultiCh24(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
+void FLACParser::copyBuffer(short *dst, const int *const *src, unsigned nSamples)
{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i] >> 8;
+ unsigned int nChannels = getChannels();
+ unsigned int nBits = getBitsPerSample();
+ switch (nBits) {
+ case 8:
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i] << 8;
+ }
+ }
+ break;
+ case 16:
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i];
+ }
+ }
+ break;
+ case 24:
+ case 32:
+ {
+ int32_t *out = (int32_t *)dst;
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *out++ = src[c][i] << 8;
+ }
+ }
+ break;
}
+ default:
+ TRESPASS();
}
}
-static void copyTrespass(
- short * /* dst */,
- const int *const * /* src */,
- unsigned /* nSamples */,
- unsigned /* nChannels */) {
- TRESPASS();
-}
-
// FLACParser
FLACParser::FLACParser(
@@ -492,14 +441,12 @@ FLACParser::FLACParser(
mInitCheck(false),
mMaxBufferSize(0),
mGroup(NULL),
- mCopy(copyTrespass),
mDecoder(NULL),
mCurrentPos(0LL),
mEOF(false),
mStreamInfoValid(false),
mWriteRequested(false),
mWriteCompleted(false),
- mWriteBuffer(NULL),
mErrorStatus((FLAC__StreamDecoderErrorStatus) -1)
{
ALOGV("FLACParser::FLACParser");
@@ -571,6 +518,8 @@ status_t FLACParser::init()
}
// check sample rate
switch (getSampleRate()) {
+ case 100:
+ case 1000:
case 8000:
case 11025:
case 12000:
@@ -578,38 +527,18 @@ status_t FLACParser::init()
case 22050:
case 24000:
case 32000:
+ case 42000:
case 44100:
+ case 46000:
case 48000:
case 88200:
case 96000:
+ case 192000:
break;
default:
ALOGE("unsupported sample rate %u", getSampleRate());
return NO_INIT;
}
- // configure the appropriate copy function, defaulting to trespass
- static const struct {
- unsigned mChannels;
- unsigned mBitsPerSample;
- void (*mCopy)(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels);
- } table[] = {
- { 1, 8, copyMono8 },
- { 2, 8, copyStereo8 },
- { 8, 8, copyMultiCh8 },
- { 1, 16, copyMono16 },
- { 2, 16, copyStereo16 },
- { 8, 16, copyMultiCh16 },
- { 1, 24, copyMono24 },
- { 2, 24, copyStereo24 },
- { 8, 24, copyMultiCh24 },
- };
- for (unsigned i = 0; i < sizeof(table)/sizeof(table[0]); ++i) {
- if (table[i].mChannels >= getChannels() &&
- table[i].mBitsPerSample == getBitsPerSample()) {
- mCopy = table[i].mCopy;
- break;
- }
- }
// populate track metadata
if (mTrackMetadata != 0) {
mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
@@ -618,6 +547,7 @@ status_t FLACParser::init()
// sample rate is non-zero, so division by zero not possible
mTrackMetadata->setInt64(kKeyDuration,
(getTotalSamples() * 1000000LL) / getSampleRate());
+ mTrackMetadata->setInt32(kKeyBitsPerSample, getBitsPerSample());
}
} else {
ALOGE("missing STREAMINFO");
@@ -633,7 +563,9 @@ void FLACParser::allocateBuffers()
{
CHECK(mGroup == NULL);
mGroup = new MediaBufferGroup;
- mMaxBufferSize = getMaxBlockSize() * getChannels() * sizeof(short);
+ // allocate enough to hold 24-bits (packed in 32 bits)
+ unsigned int bytesPerSample = getBitsPerSample() > 16 ? 4 : 2;
+ mMaxBufferSize = getMaxBlockSize() * getChannels() * bytesPerSample;
mGroup->add_buffer(new MediaBuffer(mMaxBufferSize));
}
@@ -686,12 +618,12 @@ MediaBuffer *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
if (err != OK) {
return NULL;
}
- size_t bufferSize = blocksize * getChannels() * sizeof(short);
+ size_t bufferSize = blocksize * getChannels() * (getBitsPerSample() > 16 ? 4 : 2);
CHECK(bufferSize <= mMaxBufferSize);
short *data = (short *) buffer->data();
buffer->set_range(0, bufferSize);
// copy PCM from FLAC write buffer to our media buffer, with interleaving
- (*mCopy)(data, mWriteBuffer, blocksize, getChannels());
+ copyBuffer(data, (const FLAC__int32 * const *)(&mWriteBuffer), blocksize);
// fill in buffer metadata
CHECK(mWriteHeader.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
FLAC__uint64 sampleNumber = mWriteHeader.number.sample_number;
@@ -726,9 +658,10 @@ FLACSource::~FLACSource()
status_t FLACSource::start(MetaData * /* params */)
{
+ CHECK(!mStarted);
+
ALOGV("FLACSource::start");
- CHECK(!mStarted);
mParser->allocateBuffers();
mStarted = true;
@@ -845,12 +778,14 @@ bool SniffFLAC(
{
// first 4 is the signature word
// second 4 is the sizeof STREAMINFO
+ // 1st bit of 2nd 4 bytes represent whether last block of metadata or not
// 042 is the mandatory STREAMINFO
// no need to read rest of the header, as a premature EOF will be caught later
uint8_t header[4+4];
if (source->readAt(0, header, sizeof(header)) != sizeof(header)
- || memcmp("fLaC\0\0\0\042", header, 4+4))
- {
+ || memcmp("fLaC", header, 4)
+ || !(header[4] == 0x80 || header[4] == 0x00)
+ || memcmp("\0\0\042", header + 5, 3)) {
return false;
}
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index 565f156..eaa41ff 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -30,6 +30,7 @@ namespace android {
FileSource::FileSource(const char *filename)
: mFd(-1),
+ mUri(filename),
mOffset(0),
mLength(-1),
mDecryptHandle(NULL),
@@ -58,6 +59,7 @@ FileSource::FileSource(int fd, int64_t offset, int64_t length)
mDrmBuf(NULL){
CHECK(offset >= 0);
CHECK(length >= 0);
+ fetchUriFromFd(fd);
}
FileSource::~FileSource() {
@@ -166,7 +168,7 @@ ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
}
if (mDrmBuf != NULL && mDrmBufSize > 0 && (offset + mOffset) >= mDrmBufOffset
- && (offset + mOffset + size) <= (mDrmBufOffset + mDrmBufSize)) {
+ && (offset + mOffset + size) <= static_cast<size_t>(mDrmBufOffset + mDrmBufSize)) {
/* Use buffered data */
memcpy(data, (void*)(mDrmBuf+(offset+mOffset-mDrmBufOffset)), size);
return size;
@@ -177,7 +179,7 @@ ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
DRM_CACHE_SIZE, offset + mOffset);
if (mDrmBufSize > 0) {
int64_t dataRead = 0;
- dataRead = size > mDrmBufSize ? mDrmBufSize : size;
+ dataRead = size > static_cast<size_t>(mDrmBufSize) ? mDrmBufSize : size;
memcpy(data, (void*)mDrmBuf, dataRead);
return dataRead;
} else {
@@ -188,4 +190,18 @@ ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
return mDrmManagerClient->pread(mDecryptHandle, data, size, offset + mOffset);
}
}
+
+void FileSource::fetchUriFromFd(int fd) {
+ ssize_t len = 0;
+ char path[PATH_MAX] = {0};
+ char link[PATH_MAX] = {0};
+
+ mUri.clear();
+
+ snprintf(path, PATH_MAX, "/proc/%d/fd/%d", getpid(), fd);
+ if ((len = readlink(path, link, sizeof(link)-1)) != -1) {
+ link[len] = '\0';
+ mUri.setTo(link);
+ }
+}
} // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 9e7f298..80ef7b7 100755
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -321,6 +321,12 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('m', 'p', '4', 'a'):
return MEDIA_MIMETYPE_AUDIO_AAC;
+ case FOURCC('e', 'n', 'c', 'a'):
+ return MEDIA_MIMETYPE_AUDIO_AAC;
+
+ case FOURCC('.', 'm', 'p', '3'):
+ return MEDIA_MIMETYPE_AUDIO_MPEG;
+
case FOURCC('s', 'a', 'm', 'r'):
return MEDIA_MIMETYPE_AUDIO_AMR_NB;
@@ -330,6 +336,9 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('m', 'p', '4', 'v'):
return MEDIA_MIMETYPE_VIDEO_MPEG4;
+ case FOURCC('e', 'n', 'c', 'v'):
+ return MEDIA_MIMETYPE_VIDEO_MPEG4;
+
case FOURCC('s', '2', '6', '3'):
case FOURCC('h', '2', '6', '3'):
case FOURCC('H', '2', '6', '3'):
@@ -1044,7 +1053,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
int64_t delay = (media_time * samplerate + 500000) / 1000000;
mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
- int64_t paddingus = duration - (segment_duration + media_time);
+ int64_t paddingus = duration - (int64_t)(segment_duration + media_time);
if (paddingus < 0) {
// track duration from media header (which is what kKeyDuration is) might
// be slightly shorter than the segment duration, which would make the
@@ -1373,7 +1382,14 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + sizeof(buffer);
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG, FourCC2MIME(chunk_type)) ||
+ !strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(chunk_type))) {
+ // ESD is not required in mp3
+ // amr wb with damr atom corrupted can cause the clip to not play
+ *offset = stop_offset;
+ } else {
+ *offset = data_offset + sizeof(buffer);
+ }
while (*offset < stop_offset) {
status_t err = parseChunk(offset, depth + 1);
if (err != OK) {
@@ -1612,13 +1628,21 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return ERROR_MALFORMED;
*offset += chunk_size;
+ // Ignore stss block for audio even if its present
+ // All audio sample are sync samples itself,
+ // self decodeable and playable.
+ // Parsing this block for audio restricts audio seek to few entries
+ // available in this block, sometimes 0, which is undesired.
+ const char *mime;
+ CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
+ if (strncasecmp("audio/", mime, 6)) {
+ status_t err =
+ mLastTrack->sampleTable->setSyncSampleParams(
+ data_offset, chunk_data_size);
- status_t err =
- mLastTrack->sampleTable->setSyncSampleParams(
- data_offset, chunk_data_size);
-
- if (err != OK) {
- return err;
+ if (err != OK) {
+ return err;
+ }
}
break;
@@ -1715,6 +1739,9 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
}
}
}
+
+ updateVideoTrackInfoFromESDS_MPEG4Video(mLastTrack->meta);
+
break;
}
@@ -1984,6 +2011,9 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return ERROR_IO;
}
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
uint32_t type = ntohl(buffer);
// For the 3GPP file format, the handler-type within the 'hdlr' box
// shall be 'text'. We also want to support 'sbtl' handler type
@@ -3010,12 +3040,12 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
return OK;
}
- if (objectTypeIndication == 0x6b) {
- // The media subtype is MP3 audio
- // Our software MP3 audio decoder may not be able to handle
- // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
- ALOGE("MP3 track in MP4/3GPP file is not supported");
- return ERROR_UNSUPPORTED;
+ if (objectTypeIndication == 0x6b
+ || objectTypeIndication == 0x69) {
+ // This is mpeg1/2 audio content, set mimetype to mpeg
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+ ALOGD("objectTypeIndication:0x%x, set mimetype to mpeg ",objectTypeIndication);
+ return OK;
}
const uint8_t *csd;
@@ -3168,7 +3198,7 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
extensionFlag, objectType);
}
- if (numChannels == 0) {
+ if (numChannels == 0 && (br.numBitsLeft() > 0)) {
int32_t channelsEffectiveNum = 0;
int32_t channelsNum = 0;
if (br.numBitsLeft() < 32) {
@@ -3336,11 +3366,13 @@ MPEG4Source::MPEG4Source(
const uint8_t *ptr = (const uint8_t *)data;
- CHECK(size >= 7);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
-
- // The number of bytes used to encode the length of a NAL unit.
- mNALLengthSize = 1 + (ptr[4] & 3);
+ if (size < 7 || ptr[0] != 1) {
+ ALOGE("Invalid AVCC atom, size %zu, configurationVersion: %d",
+ size, ptr[0]);
+ } else {
+ // The number of bytes used to encode the length of a NAL unit.
+ mNALLengthSize = 1 + (ptr[4] & 3);
+ }
} else if (mIsHEVC) {
uint32_t type;
const void *data;
@@ -4661,7 +4693,9 @@ static bool LegacySniffMPEG4(
return false;
}
- if (!memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
+ if (!memcmp(header, "ftyp3g2a", 8) || !memcmp(header, "ftyp3g2b", 8)
+ || !memcmp(header, "ftyp3g2c", 8)
+ || !memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
|| !memcmp(header, "ftyp3gr6", 8) || !memcmp(header, "ftyp3gs6", 8)
|| !memcmp(header, "ftyp3ge6", 8) || !memcmp(header, "ftyp3gg6", 8)
|| !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 47f114a..16da3eb 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -38,10 +38,11 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/Utils.h>
#include <media/mediarecorder.h>
+#include <stagefright/AVExtensions.h>
#include <cutils/properties.h>
#include "include/ESDS.h"
-
+#include <stagefright/AVExtensions.h>
#ifndef __predict_false
#define __predict_false(exp) __builtin_expect((exp) != 0, 0)
@@ -91,6 +92,7 @@ public:
bool isAvc() const { return mIsAvc; }
bool isAudio() const { return mIsAudio; }
bool isMPEG4() const { return mIsMPEG4; }
+ bool isHEVC() const { return mIsHEVC; }
void addChunkOffset(off64_t offset);
int32_t getTrackId() const { return mTrackId; }
status_t dump(int fd, const Vector<String16>& args) const;
@@ -236,6 +238,7 @@ private:
bool mIsAvc;
bool mIsAudio;
bool mIsMPEG4;
+ bool mIsHEVC;
int32_t mTrackId;
int64_t mTrackDurationUs;
int64_t mMaxChunkDurationUs;
@@ -318,6 +321,7 @@ private:
// Simple validation on the codec specific data
status_t checkCodecSpecificData() const;
int32_t mRotation;
+ int32_t mHFRRatio;
void updateTrackSizeEstimate();
void addOneStscTableEntry(size_t chunkId, size_t sampleId);
@@ -385,7 +389,10 @@ MPEG4Writer::MPEG4Writer(int fd)
mLongitudex10000(0),
mAreGeoTagsAvailable(false),
mStartTimeOffsetMs(-1),
- mMetaKeys(new AMessage()) {
+ mMetaKeys(new AMessage()),
+ mIsVideoHEVC(false),
+ mIsAudioAMR(false),
+ mHFRRatio(1) {
addDeviceMeta();
// Verify mFd is seekable
@@ -463,6 +470,8 @@ const char *MPEG4Writer::Track::getFourCCForMime(const char *mime) {
return "s263";
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
return "avc1";
+ } else {
+ return AVUtils::get()->HEVCMuxerUtils().getFourCCForMime(mime);
}
} else {
ALOGE("Track (%s) other than video or audio is not supported", mime);
@@ -488,10 +497,23 @@ status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
const char *mime;
source->getFormat()->findCString(kKeyMIMEType, &mime);
bool isAudio = !strncasecmp(mime, "audio/", 6);
+ bool isVideo = !strncasecmp(mime, "video/", 6);
if (Track::getFourCCForMime(mime) == NULL) {
ALOGE("Unsupported mime '%s'", mime);
return ERROR_UNSUPPORTED;
}
+ mIsAudioAMR = isAudio && (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mime));
+
+
+ if (isVideo) {
+ mIsVideoHEVC = AVUtils::get()->HEVCMuxerUtils().isVideoHEVC(mime);
+ }
+
+ if (isAudio && !AVUtils::get()->isAudioMuxFormatSupported(mime)) {
+ ALOGE("Muxing is not supported for %s", mime);
+ return ERROR_UNSUPPORTED;
+ }
// At this point, we know the track to be added is either
// video or audio. Thus, we only need to check whether it
@@ -512,6 +534,8 @@ status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
Track *track = new Track(this, source, 1 + mTracks.size());
mTracks.push_back(track);
+ mHFRRatio = AVUtils::get()->HFRUtils().getHFRRatio(source->getFormat());
+
return OK;
}
@@ -580,7 +604,7 @@ int64_t MPEG4Writer::estimateMoovBoxSize(int32_t bitRate) {
// If the estimation is wrong, we will pay the price of wasting
// some reserved space. This should not happen so often statistically.
- static const int32_t factor = mUse32BitOffset? 1: 2;
+ int32_t factor = mUse32BitOffset? 1: 2;
static const int64_t MIN_MOOV_BOX_SIZE = 3 * 1024; // 3 KB
static const int64_t MAX_MOOV_BOX_SIZE = (180 * 3000000 * 6LL / 8000);
int64_t size = MIN_MOOV_BOX_SIZE;
@@ -677,8 +701,6 @@ status_t MPEG4Writer::start(MetaData *param) {
mIsRealTimeRecording = isRealTimeRecording;
}
- mStartTimestampUs = -1;
-
if (mStarted) {
if (mPaused) {
mPaused = false;
@@ -687,6 +709,8 @@ status_t MPEG4Writer::start(MetaData *param) {
return OK;
}
+ mStartTimestampUs = -1;
+
if (!param ||
!param->findInt32(kKeyTimeScale, &mTimeScale)) {
mTimeScale = 1000;
@@ -999,7 +1023,11 @@ uint32_t MPEG4Writer::getMpeg4Time() {
// MP4 file uses time counting seconds since midnight, Jan. 1, 1904
// while time function returns Unix epoch values which starts
// at 1970-01-01. Lets add the number of seconds between them
- uint32_t mpeg4Time = now + (66 * 365 + 17) * (24 * 60 * 60);
+ static const uint32_t delta = (66 * 365 + 17) * (24 * 60 * 60);
+ if (now < 0 || uint32_t(now) > UINT32_MAX - delta) {
+ return 0;
+ }
+ uint32_t mpeg4Time = uint32_t(now) + delta;
return mpeg4Time;
}
@@ -1009,7 +1037,7 @@ void MPEG4Writer::writeMvhdBox(int64_t durationUs) {
writeInt32(0); // version=0, flags=0
writeInt32(now); // creation time
writeInt32(now); // modification time
- writeInt32(mTimeScale); // mvhd timescale
+ writeInt32(mTimeScale / mHFRRatio); // mvhd timescale
int32_t duration = (durationUs * mTimeScale + 5E5) / 1E6;
writeInt32(duration);
writeInt32(0x10000); // rate: 1.0
@@ -1047,8 +1075,10 @@ void MPEG4Writer::writeFtypBox(MetaData *param) {
beginBox("ftyp");
int32_t fileType;
- if (param && param->findInt32(kKeyFileType, &fileType) &&
- fileType != OUTPUT_FORMAT_MPEG_4) {
+ if (mIsVideoHEVC) {
+ AVUtils::get()->HEVCMuxerUtils().writeHEVCFtypBox(this);
+ } else if (mIsAudioAMR || (param && param->findInt32(kKeyFileType, &fileType) &&
+ fileType != OUTPUT_FORMAT_MPEG_4)) {
writeFourcc("3gp4");
writeInt32(0);
writeFourcc("isom");
@@ -1118,7 +1148,7 @@ off64_t MPEG4Writer::addSample_l(MediaBuffer *buffer) {
return old_offset;
}
-static void StripStartcode(MediaBuffer *buffer) {
+void MPEG4Writer::StripStartcode(MediaBuffer *buffer) {
if (buffer->range_length() < 4) {
return;
}
@@ -1455,7 +1485,8 @@ MPEG4Writer::Track::Track(
mCodecSpecificDataSize(0),
mGotAllCodecSpecificData(false),
mReachedEOS(false),
- mRotation(0) {
+ mRotation(0),
+ mHFRRatio(1) {
getCodecSpecificDataFromInputFormatIfPossible();
const char *mime;
@@ -1464,6 +1495,12 @@ MPEG4Writer::Track::Track(
mIsAudio = !strncasecmp(mime, "audio/", 6);
mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
+ mIsHEVC = AVUtils::get()->HEVCMuxerUtils().isVideoHEVC(mime);
+
+ if (mIsHEVC) {
+ AVUtils::get()->HEVCMuxerUtils().getHEVCCodecSpecificDataFromInputFormatIfPossible(
+ mMeta, &mCodecSpecificData, &mCodecSpecificDataSize, &mGotAllCodecSpecificData);
+ }
setTimeScale();
}
@@ -1562,6 +1599,7 @@ void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() {
size_t size;
if (mMeta->findData(kKeyAVCC, &type, &data, &size)) {
mCodecSpecificData = malloc(size);
+ CHECK(mCodecSpecificData != NULL);
mCodecSpecificDataSize = size;
memcpy(mCodecSpecificData, data, size);
mGotAllCodecSpecificData = true;
@@ -1575,6 +1613,7 @@ void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() {
ESDS esds(data, size);
if (esds.getCodecSpecificInfo(&data, &size) == OK) {
mCodecSpecificData = malloc(size);
+ CHECK(mCodecSpecificData != NULL);
mCodecSpecificDataSize = size;
memcpy(mCodecSpecificData, data, size);
mGotAllCodecSpecificData = true;
@@ -1657,7 +1696,7 @@ void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
while (!chunk->mSamples.empty()) {
List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
- off64_t offset = chunk->mTrack->isAvc()
+ off64_t offset = (chunk->mTrack->isAvc() || chunk->mTrack->isHEVC())
? addLengthPrefixedSample_l(*it)
: addSample_l(*it);
@@ -1798,9 +1837,16 @@ status_t MPEG4Writer::Track::start(MetaData *params) {
}
int64_t startTimeUs;
+
if (params == NULL || !params->findInt64(kKeyTime, &startTimeUs)) {
startTimeUs = 0;
}
+
+ int64_t startTimeBootUs;
+ if (params == NULL || !params->findInt64(kKeyTimeBoot, &startTimeBootUs)) {
+ startTimeBootUs = 0;
+ }
+
mStartTimeRealUs = startTimeUs;
int32_t rotationDegrees;
@@ -1811,6 +1857,7 @@ status_t MPEG4Writer::Track::start(MetaData *params) {
initTrackingProgressStatus(params);
sp<MetaData> meta = new MetaData;
+
if (mOwner->isRealTimeRecording() && mOwner->numTracks() > 1) {
/*
* This extra delay of accepting incoming audio/video signals
@@ -1826,10 +1873,12 @@ status_t MPEG4Writer::Track::start(MetaData *params) {
startTimeOffsetUs = kInitialDelayTimeUs;
}
startTimeUs += startTimeOffsetUs;
+ startTimeBootUs += startTimeOffsetUs;
ALOGI("Start time offset: %" PRId64 " us", startTimeOffsetUs);
}
meta->setInt64(kKeyTime, startTimeUs);
+ meta->setInt64(kKeyTimeBoot, startTimeBootUs);
status_t err = mSource->start(meta.get());
if (err != OK) {
@@ -1852,6 +1901,8 @@ status_t MPEG4Writer::Track::start(MetaData *params) {
pthread_create(&mThread, &attr, ThreadWrapper, this);
pthread_attr_destroy(&attr);
+ mHFRRatio = AVUtils::get()->HFRUtils().getHFRRatio(mMeta);
+
return OK;
}
@@ -1881,6 +1932,10 @@ status_t MPEG4Writer::Track::stop() {
status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
ALOGD("%s track stopped", mIsAudio? "Audio": "Video");
+ if (mOwner->exceedsFileSizeLimit() && mStszTableEntries->count() == 0) {
+ ALOGE(" Filesize limit exceeded and zero samples written ");
+ return ERROR_END_OF_STREAM;
+ }
return err;
}
@@ -1971,6 +2026,7 @@ status_t MPEG4Writer::Track::copyAVCCodecSpecificData(
mCodecSpecificDataSize = size;
mCodecSpecificData = malloc(size);
+ CHECK(mCodecSpecificData != NULL);
memcpy(mCodecSpecificData, data, size);
return OK;
}
@@ -2093,6 +2149,7 @@ status_t MPEG4Writer::Track::makeAVCCodecSpecificData(
// ISO 14496-15: AVC file format
mCodecSpecificDataSize += 7; // 7 more bytes in the header
mCodecSpecificData = malloc(mCodecSpecificDataSize);
+ CHECK(mCodecSpecificData != NULL);
uint8_t *header = (uint8_t *)mCodecSpecificData;
header[0] = 1; // version
header[1] = mProfileIdc; // profile indication
@@ -2195,7 +2252,9 @@ status_t MPEG4Writer::Track::threadEntry() {
MediaBuffer *buffer;
const char *trackName = mIsAudio ? "Audio" : "Video";
while (!mDone && (err = mSource->read(&buffer)) == OK) {
- if (buffer->range_length() == 0) {
+ if (buffer == NULL) {
+ continue;
+ } else if (buffer->range_length() == 0) {
buffer->release();
buffer = NULL;
++nZeroLengthFrames;
@@ -2227,10 +2286,18 @@ status_t MPEG4Writer::Track::threadEntry() {
} else if (mIsMPEG4) {
mCodecSpecificDataSize = buffer->range_length();
mCodecSpecificData = malloc(mCodecSpecificDataSize);
+ CHECK(mCodecSpecificData != NULL);
memcpy(mCodecSpecificData,
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
+ } else if (mIsHEVC) {
+ status_t err = AVUtils::get()->HEVCMuxerUtils().makeHEVCCodecSpecificData(
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length(), &mCodecSpecificData, &mCodecSpecificDataSize);
+ if ((status_t)OK != err) {
+ return err;
+ }
}
buffer->release();
@@ -2240,20 +2307,28 @@ status_t MPEG4Writer::Track::threadEntry() {
continue;
}
- // Make a deep copy of the MediaBuffer and Metadata and release
- // the original as soon as we can
- MediaBuffer *copy = new MediaBuffer(buffer->range_length());
- memcpy(copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
- copy->set_range(0, buffer->range_length());
- meta_data = new MetaData(*buffer->meta_data().get());
- buffer->release();
- buffer = NULL;
+ MediaBuffer *copy = NULL;
+ // Check if the upstream source hints it is OK to hold on to the
+ // buffer without releasing immediately and avoid cloning the buffer
+ if (AVUtils::get()->canDeferRelease(buffer->meta_data())) {
+ copy = buffer;
+ meta_data = new MetaData(*buffer->meta_data().get());
+ } else {
+ // Make a deep copy of the MediaBuffer and Metadata and release
+ // the original as soon as we can
+ copy = new MediaBuffer(buffer->range_length());
+ memcpy(copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+ copy->set_range(0, buffer->range_length());
+ meta_data = new MetaData(*buffer->meta_data().get());
+ buffer->release();
+ buffer = NULL;
+ }
- if (mIsAvc) StripStartcode(copy);
+ if (mIsAvc || mIsHEVC) StripStartcode(copy);
size_t sampleSize = copy->range_length();
- if (mIsAvc) {
+ if (mIsAvc || mIsHEVC) {
if (mOwner->useNalLengthFour()) {
sampleSize += 4;
} else {
@@ -2287,22 +2362,11 @@ status_t MPEG4Writer::Track::threadEntry() {
previousPausedDurationUs = mStartTimestampUs;
}
+#if 0
if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- return ERROR_MALFORMED;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- return ERROR_MALFORMED;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
mResumed = false;
}
+#endif
timestampUs -= previousPausedDurationUs;
if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
@@ -2320,8 +2384,8 @@ status_t MPEG4Writer::Track::threadEntry() {
CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
decodingTimeUs -= previousPausedDurationUs;
cttsOffsetTimeUs =
- timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ timestampUs - decodingTimeUs;
+ if (WARN_UNLESS(kMaxCttsOffsetTimeUs >= decodingTimeUs - timestampUs, "for %s track", trackName)) {
copy->release();
return ERROR_MALFORMED;
}
@@ -2398,7 +2462,9 @@ status_t MPEG4Writer::Track::threadEntry() {
ALOGE("timestampUs %" PRId64 " < lastTimestampUs %" PRId64 " for %s track",
timestampUs, lastTimestampUs, trackName);
copy->release();
- return UNKNOWN_ERROR;
+ err = UNKNOWN_ERROR;
+ mSource->notifyError(err);
+ return err;
}
// if the duration is different for this sample, see if it is close enough to the previous
@@ -2453,7 +2519,7 @@ status_t MPEG4Writer::Track::threadEntry() {
trackProgressStatus(timestampUs);
}
if (!hasMultipleTracks) {
- off64_t offset = mIsAvc? mOwner->addLengthPrefixedSample_l(copy)
+ off64_t offset = (mIsAvc || mIsHEVC)? mOwner->addLengthPrefixedSample_l(copy)
: mOwner->addSample_l(copy);
uint32_t count = (mOwner->use32BitFileOffset()
@@ -2546,8 +2612,13 @@ status_t MPEG4Writer::Track::threadEntry() {
if (mIsAudio) {
ALOGI("Audio track drift time: %" PRId64 " us", mOwner->getDriftTimeUs());
}
-
- if (err == ERROR_END_OF_STREAM) {
+ // if err is ERROR_IO (ex: during SSR), return OK to save the
+ // recorded file successfully. Session tear down will happen as part of
+ // client callback
+ if (mIsAudio && (err == ERROR_IO)) {
+ return OK;
+ }
+ else if (err == ERROR_END_OF_STREAM) {
return OK;
}
return err;
@@ -2705,7 +2776,8 @@ status_t MPEG4Writer::Track::checkCodecSpecificData() const {
CHECK(mMeta->findCString(kKeyMIMEType, &mime));
if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
- !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime) ||
+ mIsHEVC) {
if (!mCodecSpecificData ||
mCodecSpecificDataSize <= 0) {
ALOGE("Missing codec specific data");
@@ -2726,6 +2798,11 @@ void MPEG4Writer::Track::writeTrackHeader(bool use32BitOffset) {
ALOGV("%s track time scale: %d",
mIsAudio? "Audio": "Video", mTimeScale);
+ if (mMdatSizeBytes == 0) {
+ ALOGW("Track data is not available.");
+ return;
+ }
+
uint32_t now = getMpeg4Time();
mOwner->beginBox("trak");
writeTkhdBox(now);
@@ -2803,17 +2880,22 @@ void MPEG4Writer::Track::writeVideoFourCCBox() {
mOwner->writeInt16(0x18); // depth
mOwner->writeInt16(-1); // predefined
- CHECK_LT(23 + mCodecSpecificDataSize, 128);
-
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
writeMp4vEsdsBox();
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
writeD263Box();
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
writeAvccBox();
+ } else if (mIsHEVC) {
+ AVUtils::get()->HEVCMuxerUtils().writeHvccBox(mOwner, mCodecSpecificData,
+ mCodecSpecificDataSize,
+ mOwner->useNalLengthFour());
+ }
+
+ if (!mIsHEVC) {
+ writePaspBox();
}
- writePaspBox();
mOwner->endBox(); // mp4v, s263 or avc1
}
@@ -2898,6 +2980,9 @@ void MPEG4Writer::Track::writeMp4vEsdsBox() {
CHECK_GT(mCodecSpecificDataSize, 0);
mOwner->beginBox("esds");
+ // Make sure all sizes encode to a single byte.
+ CHECK_LT(mCodecSpecificDataSize + 23, 128);
+
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt8(0x03); // ES_DescrTag
@@ -3007,7 +3092,7 @@ void MPEG4Writer::Track::writeMdhdBox(uint32_t now) {
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt32(now); // creation time
mOwner->writeInt32(now); // modification time
- mOwner->writeInt32(mTimeScale); // media timescale
+ mOwner->writeInt32(mTimeScale / mHFRRatio); // media timescale
int32_t mdhdDuration = (trakDurationUs * mTimeScale + 5E5) / 1E6;
mOwner->writeInt32(mdhdDuration); // use media timescale
// Language follows the three letter standard ISO-639-2/T
@@ -3086,7 +3171,7 @@ void MPEG4Writer::Track::writePaspBox() {
int32_t MPEG4Writer::Track::getStartTimeOffsetScaledTime() const {
int64_t trackStartTimeOffsetUs = 0;
int64_t moovStartTimeUs = mOwner->getStartTimestampUs();
- if (mStartTimestampUs != moovStartTimeUs) {
+ if (mStartTimestampUs != moovStartTimeUs && mStszTableEntries->count() != 0) {
CHECK_GT(mStartTimestampUs, moovStartTimeUs);
trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs;
}
diff --git a/media/libstagefright/MediaAdapter.cpp b/media/libstagefright/MediaAdapter.cpp
index d680e0c..ec4550f 100644
--- a/media/libstagefright/MediaAdapter.cpp
+++ b/media/libstagefright/MediaAdapter.cpp
@@ -27,7 +27,8 @@ namespace android {
MediaAdapter::MediaAdapter(const sp<MetaData> &meta)
: mCurrentMediaBuffer(NULL),
mStarted(false),
- mOutputFormat(meta) {
+ mOutputFormat(meta),
+ mStatus(OK) {
}
MediaAdapter::~MediaAdapter() {
@@ -51,6 +52,9 @@ status_t MediaAdapter::stop() {
// If stop() happens immediately after a pushBuffer(), we should
// clean up the mCurrentMediaBuffer
if (mCurrentMediaBuffer != NULL) {
+ mCurrentMediaBuffer->setObserver(this);
+ mCurrentMediaBuffer->claim();
+ mCurrentMediaBuffer->setObserver(0);
mCurrentMediaBuffer->release();
mCurrentMediaBuffer = NULL;
}
@@ -113,13 +117,23 @@ status_t MediaAdapter::pushBuffer(MediaBuffer *buffer) {
ALOGE("pushBuffer called before start");
return INVALID_OPERATION;
}
+ if (mStatus != OK) {
+ ALOGE("pushBuffer called when MediaAdapter in error status");
+ return mStatus;
+ }
mCurrentMediaBuffer = buffer;
mBufferReadCond.signal();
ALOGV("wait for the buffer returned @ pushBuffer! %p", buffer);
mBufferReturnedCond.wait(mAdapterLock);
- return OK;
+ return mStatus;
+}
+
+void MediaAdapter::notifyError(status_t err) {
+ Mutex::Autolock autoLock(mAdapterLock);
+ mStatus = err;
+ mBufferReturnedCond.signal();
}
} // namespace android
diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/MediaBuffer.cpp
index 1f80a47..525a156 100644
--- a/media/libstagefright/MediaBuffer.cpp
+++ b/media/libstagefright/MediaBuffer.cpp
@@ -54,6 +54,7 @@ MediaBuffer::MediaBuffer(size_t size)
mOwnsData(true),
mMetaData(new MetaData),
mOriginal(NULL) {
+ CHECK(mData != NULL);
}
MediaBuffer::MediaBuffer(const sp<GraphicBuffer>& graphicBuffer)
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c2ffdf2..5e0ee55 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -51,6 +51,7 @@
#include <private/android_filesystem_config.h>
#include <utils/Log.h>
#include <utils/Singleton.h>
+#include <stagefright/AVExtensions.h>
namespace android {
@@ -313,7 +314,7 @@ status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
// queue.
if (nameIsType || !strncasecmp(name.c_str(), "omx.", 4)) {
- mCodec = new ACodec;
+ mCodec = AVFactory::get()->createACodec();
} else if (!nameIsType
&& !strncasecmp(name.c_str(), "android.filter.", 15)) {
mCodec = new MediaFilter;
@@ -899,6 +900,8 @@ status_t MediaCodec::getBufferAndFormat(
}
*format = info.mFormat;
}
+ } else {
+ return BAD_INDEX;
}
return OK;
}
@@ -1012,6 +1015,12 @@ bool MediaCodec::handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool
if (omxFlags & OMX_BUFFERFLAG_EOS) {
flags |= BUFFER_FLAG_EOS;
}
+ if (omxFlags & OMX_BUFFERFLAG_EXTRADATA) {
+ flags |= BUFFER_FLAG_EXTRADATA;
+ }
+ if (omxFlags & OMX_BUFFERFLAG_DATACORRUPT) {
+ flags |= BUFFER_FLAG_DATACORRUPT;
+ }
response->setInt32("flags", flags);
response->postReply(replyID);
@@ -1168,7 +1177,8 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findString("componentName", &mComponentName));
- if (mComponentName.startsWith("OMX.google.")) {
+ if (mComponentName.startsWith("OMX.google.") ||
+ mComponentName.startsWith("OMX.ffmpeg.")) {
mFlags |= kFlagUsesSoftwareRenderer;
} else {
mFlags &= ~kFlagUsesSoftwareRenderer;
@@ -1205,6 +1215,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
// reset input surface flag
mHaveInputSurface = false;
+ CHECK(msg->findString("componentName", &mComponentName));
CHECK(msg->findMessage("input-format", &mInputFormat));
CHECK(msg->findMessage("output-format", &mOutputFormat));
@@ -2314,6 +2325,7 @@ size_t MediaCodec::updateBuffers(
uint32_t bufferID;
CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+ Mutex::Autolock al(mBufferLock);
Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
@@ -2401,7 +2413,12 @@ status_t MediaCodec::onQueueInputBuffer(const sp<AMessage> &msg) {
}
if (offset + size > info->mData->capacity()) {
- return -EINVAL;
+ if ( ((int)size == (int)-1) && !(flags & BUFFER_FLAG_EOS)) {
+ size = 0;
+ ALOGD("EOS, reset size to zero");
+ }
+ else
+ return -EINVAL;
}
sp<AMessage> reply = info->mNotify;
@@ -2558,7 +2575,7 @@ ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) {
info->mOwnedByClient = true;
// set image-data
- if (info->mFormat != NULL) {
+ if (info->mFormat != NULL && mIsVideo) {
sp<ABuffer> imageData;
if (info->mFormat->findBuffer("image-data", &imageData)) {
info->mData->meta()->setBuffer("image-data", imageData);
@@ -2673,6 +2690,9 @@ void MediaCodec::onOutputBufferAvailable() {
if (omxFlags & OMX_BUFFERFLAG_EOS) {
flags |= BUFFER_FLAG_EOS;
}
+ if (omxFlags & OMX_BUFFERFLAG_DATACORRUPT) {
+ flags |= BUFFER_FLAG_DATACORRUPT;
+ }
msg->setInt32("flags", flags);
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 5edc04c..ff94314 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -40,6 +40,7 @@
#include <cutils/properties.h>
#include <libexpat/expat.h>
+#include <stagefright/AVExtensions.h>
namespace android {
@@ -174,7 +175,7 @@ MediaCodecList::MediaCodecList()
: mInitCheck(NO_INIT),
mUpdate(false),
mGlobalSettings(new AMessage()) {
- parseTopLevelXMLFile("/etc/media_codecs.xml");
+ parseTopLevelXMLFile(AVUtils::get()->getCustomCodecsLocation());
parseTopLevelXMLFile("/etc/media_codecs_performance.xml", true/* ignore_errors */);
parseTopLevelXMLFile(kProfilingResults, true/* ignore_errors */);
}
@@ -939,7 +940,13 @@ status_t MediaCodecList::addLimit(const char **attrs) {
// complexity: range + default
bool found;
- if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+ // VT specific limits
+ if (name.find("vt-") == 0) {
+ AString value;
+ if (msg->findString("value", &value) && value.size()) {
+ mCurrentInfo->addDetail(name, value);
+ }
+ } else if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
|| name == "blocks-per-second" || name == "complexity"
|| name == "frame-rate" || name == "quality" || name == "size"
|| name == "measured-blocks-per-second" || name.startsWith("measured-frame-rate-")) {
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 7f9f824..14a3c0d 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -36,6 +36,9 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/Utils.h>
+#include <OMX_Core.h>
+#include <stagefright/AVExtensions.h>
+#include <cutils/properties.h>
namespace android {
@@ -214,7 +217,7 @@ void MediaCodecSource::Puller::onMessageReceived(const sp<AMessage> &msg) {
status_t err = mSource->read(&mbuf);
if (mPaused) {
- if (err == OK) {
+ if (err == OK && (NULL != mbuf)) {
mbuf->release();
mbuf = NULL;
}
@@ -399,10 +402,14 @@ status_t MediaCodecSource::initEncoder() {
}
AString outputMIME;
+ AString role;
CHECK(mOutputFormat->findString("mime", &outputMIME));
-
- mEncoder = MediaCodec::CreateByType(
+ if (AVUtils::get()->useQCHWEncoder(mOutputFormat, role)) {
+ mEncoder = MediaCodec::CreateByComponentName(mCodecLooper, role.c_str());
+ } else {
+ mEncoder = MediaCodec::CreateByType(
mCodecLooper, outputMIME.c_str(), true /* encoder */);
+ }
if (mEncoder == NULL) {
return NO_INIT;
@@ -423,9 +430,14 @@ status_t MediaCodecSource::initEncoder() {
return err;
}
+ int32_t hfrRatio = 0;
+ mOutputFormat->findInt32("hfr-ratio", &hfrRatio);
+
mEncoder->getOutputFormat(&mOutputFormat);
convertMessageToMetaData(mOutputFormat, mMeta);
+ AVUtils::get()->HFRUtils().setHFRRatio(mMeta, hfrRatio);
+
if (mFlags & FLAG_USE_SURFACE_INPUT) {
CHECK(mIsVideo);
@@ -514,6 +526,9 @@ void MediaCodecSource::signalEOS(status_t err) {
mOutputBufferQueue.clear();
mEncoderReachedEOS = true;
mErrorCode = err;
+ if (err == OMX_ErrorHardware) {
+ mErrorCode = ERROR_IO;
+ }
mOutputBufferCond.signal();
}
@@ -572,6 +587,9 @@ status_t MediaCodecSource::feedEncoderInputBuffers() {
// push decoding time for video, or drift time for audio
if (mIsVideo) {
mDecodingTimeQueue.push_back(timeUs);
+ if (mFlags & FLAG_USE_METADATA_INPUT) {
+ AVUtils::get()->addDecodingTimesFromBatch(mbuf, mDecodingTimeQueue);
+ }
} else {
#if DEBUG_DRIFT_TIME
if (mFirstSampleTimeUs < 0ll) {
@@ -591,7 +609,7 @@ status_t MediaCodecSource::feedEncoderInputBuffers() {
status_t err = mEncoder->getInputBuffer(bufferIndex, &inbuf);
if (err != OK || inbuf == NULL) {
mbuf->release();
- signalEOS();
+ signalEOS(err);
break;
}
@@ -633,6 +651,9 @@ status_t MediaCodecSource::onStart(MetaData *params) {
resume();
} else {
CHECK(mPuller != NULL);
+ if (mIsVideo) {
+ mEncoder->requestIDRFrame();
+ }
mPuller->resume();
}
return OK;
@@ -643,8 +664,13 @@ status_t MediaCodecSource::onStart(MetaData *params) {
status_t err = OK;
if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ auto key = kKeyTime;
+ if (!property_get_bool("media.camera.ts.monotonic", true)) {
+ key = kKeyTimeBoot;
+ }
+
int64_t startTimeUs;
- if (!params || !params->findInt64(kKeyTime, &startTimeUs)) {
+ if (!params || !params->findInt64(key, &startTimeUs)) {
startTimeUs = -1ll;
}
resume(startTimeUs);
@@ -738,12 +764,14 @@ void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) {
sp<ABuffer> outbuf;
status_t err = mEncoder->getOutputBuffer(index, &outbuf);
if (err != OK || outbuf == NULL) {
- signalEOS();
+ signalEOS(err);
break;
}
MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
memcpy(mbuf->data(), outbuf->data(), outbuf->size());
+ sp<MetaData> meta = mbuf->meta_data();
+ AVUtils::get()->setDeferRelease(meta);
if (!(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
if (mIsVideo) {
@@ -799,7 +827,7 @@ void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findInt32("err", &err));
ALOGE("Encoder (%s) reported error : 0x%x",
mIsVideo ? "video" : "audio", err);
- signalEOS();
+ signalEOS(err);
}
break;
}
@@ -852,7 +880,7 @@ void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) {
}
case kWhatPause:
{
- if (mFlags && FLAG_USE_SURFACE_INPUT) {
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
suspend();
} else {
CHECK(mPuller != NULL);
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 2a50692..089c150 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -64,4 +64,32 @@ const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
const char *MEDIA_MIMETYPE_DATA_TIMED_ID3 = "application/x-id3v4";
+const char *MEDIA_MIMETYPE_VIDEO_FLV1 = "video/x-flv";
+const char *MEDIA_MIMETYPE_VIDEO_MJPEG = "video/x-jpeg";
+const char *MEDIA_MIMETYPE_VIDEO_RV = "video/vnd.rn-realvideo";
+const char *MEDIA_MIMETYPE_VIDEO_VC1 = "video/vc1";
+const char *MEDIA_MIMETYPE_VIDEO_FFMPEG = "video/ffmpeg";
+
+const char *MEDIA_MIMETYPE_AUDIO_PCM = "audio/x-pcm";
+const char *MEDIA_MIMETYPE_AUDIO_RA = "audio/vnd.rn-realaudio";
+const char *MEDIA_MIMETYPE_AUDIO_FFMPEG = "audio/ffmpeg";
+
+const char *MEDIA_MIMETYPE_CONTAINER_APE = "audio/x-ape";
+const char *MEDIA_MIMETYPE_CONTAINER_DIVX = "video/divx";
+const char *MEDIA_MIMETYPE_CONTAINER_DTS = "audio/vnd.dts";
+const char *MEDIA_MIMETYPE_CONTAINER_FLAC = "audio/flac";
+const char *MEDIA_MIMETYPE_CONTAINER_FLV = "video/x-flv";
+const char *MEDIA_MIMETYPE_CONTAINER_MOV = "video/quicktime";
+const char *MEDIA_MIMETYPE_CONTAINER_MP2 = "audio/mpeg2";
+const char *MEDIA_MIMETYPE_CONTAINER_MPG = "video/mpeg";
+const char *MEDIA_MIMETYPE_CONTAINER_RA = "audio/vnd.rn-realaudio";
+const char *MEDIA_MIMETYPE_CONTAINER_RM = "video/vnd.rn-realvideo";
+const char *MEDIA_MIMETYPE_CONTAINER_TS = "video/mp2t";
+const char *MEDIA_MIMETYPE_CONTAINER_WEBM = "video/webm";
+const char *MEDIA_MIMETYPE_CONTAINER_WMA = "audio/x-ms-wma";
+const char *MEDIA_MIMETYPE_CONTAINER_WMV = "video/x-ms-wmv";
+const char *MEDIA_MIMETYPE_CONTAINER_VC1 = "video/vc1";
+const char *MEDIA_MIMETYPE_CONTAINER_HEVC = "video/hevc";
+const char *MEDIA_MIMETYPE_CONTAINER_FFMPEG = "video/ffmpeg";
+
} // namespace android
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index e21fe6e..8c63de2 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -40,8 +40,12 @@
#include <media/stagefright/MetaData.h>
#include <utils/String8.h>
+#include <stagefright/AVExtensions.h>
+
namespace android {
+MediaExtractor::Plugin MediaExtractor::sPlugin;
+
sp<MetaData> MediaExtractor::getMetaData() {
return new MetaData;
}
@@ -52,11 +56,14 @@ uint32_t MediaExtractor::flags() const {
// static
sp<MediaExtractor> MediaExtractor::Create(
- const sp<DataSource> &source, const char *mime) {
+ const sp<DataSource> &source, const char *mime,
+ const uint32_t flags, const sp<AMessage> *prevMeta) {
+
sp<AMessage> meta;
String8 tmp;
if (mime == NULL) {
+ int64_t sniffStart = ALooper::GetNowUs();
float confidence;
if (!source->sniff(&tmp, &confidence, &meta)) {
ALOGV("FAILED to autodetect media content.");
@@ -65,8 +72,11 @@ sp<MediaExtractor> MediaExtractor::Create(
}
mime = tmp.string();
- ALOGV("Autodetected media content as '%s' with confidence %.2f",
- mime, confidence);
+ ALOGV("Autodetected media content as '%s' with confidence %.2f (%.2f ms)",
+ mime, confidence,
+ ((float)(ALooper::GetNowUs() - sniffStart) / 1000.0f));
+ } else if (prevMeta != NULL) {
+ meta = *prevMeta;
}
bool isDrm = false;
@@ -91,8 +101,15 @@ sp<MediaExtractor> MediaExtractor::Create(
}
}
- MediaExtractor *ret = NULL;
- if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
+ sp<MediaExtractor> ret;
+ AString extractorName;
+ if ((ret = AVFactory::get()->createExtendedExtractor(source, mime, meta, flags)) != NULL) {
+ ALOGI("Using extended extractor");
+ } else if (meta.get() && meta->findString("extended-extractor-use", &extractorName)
+ && sPlugin.create) {
+ ALOGI("Use extended extractor for the special mime(%s) or codec", mime);
+ ret = sPlugin.create(source, mime, meta);
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
|| !strcasecmp(mime, "audio/mp4")) {
ret = new MPEG4Extractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
@@ -119,8 +136,11 @@ sp<MediaExtractor> MediaExtractor::Create(
ret = new MPEG2PSExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MIDI)) {
ret = new MidiExtractor(source);
+ } else if (!isDrm && sPlugin.create) {
+ ret = sPlugin.create(source, mime, meta);
}
+ ret = AVFactory::get()->updateExtractor(ret, source, mime, meta, flags);
if (ret != NULL) {
if (isDrm) {
ret->setDrmFlag(true);
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index b13877d..798a855 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -35,6 +35,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/Utils.h>
+#include <stagefright/AVExtensions.h>
namespace android {
@@ -42,7 +43,7 @@ MediaMuxer::MediaMuxer(int fd, OutputFormat format)
: mFormat(format),
mState(UNINITIALIZED) {
if (format == OUTPUT_FORMAT_MPEG_4) {
- mWriter = new MPEG4Writer(fd);
+ mWriter = AVFactory::get()->CreateMPEG4Writer(fd);
} else if (format == OUTPUT_FORMAT_WEBM) {
mWriter = new WebmWriter(fd);
}
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index d6255d6..f72acf7 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -197,7 +197,8 @@ NuCachedSource2::NuCachedSource2(
mHighwaterThresholdBytes(kDefaultHighWaterThreshold),
mLowwaterThresholdBytes(kDefaultLowWaterThreshold),
mKeepAliveIntervalUs(kDefaultKeepAliveIntervalUs),
- mDisconnectAtHighwatermark(disconnectAtHighwatermark) {
+ mDisconnectAtHighwatermark(disconnectAtHighwatermark),
+ mSuspended(false) {
// We are NOT going to support disconnect-at-highwatermark indefinitely
// and we are not guaranteeing support for client-specified cache
// parameters. Both of these are temporary measures to solve a specific
@@ -332,7 +333,7 @@ void NuCachedSource2::fetchInternal() {
}
}
- if (reconnect) {
+ if (reconnect && !mSuspended) {
status_t err =
mSource->reconnectAtOffset(mCacheOffset + mCache->totalSize());
@@ -442,6 +443,13 @@ void NuCachedSource2::onFetch() {
delayUs = 100000ll;
}
+ if (mSuspended) {
+ static_cast<HTTPBase *>(mSource.get())->disconnect();
+ mFinalStatus = -EAGAIN;
+ return;
+ }
+
+
(new AMessage(kWhatFetchMore, mReflector))->post(delayUs);
}
@@ -771,4 +779,25 @@ void NuCachedSource2::RemoveCacheSpecificHeaders(
}
}
+status_t NuCachedSource2::disconnectWhileSuspend() {
+ if (mSource != NULL) {
+ static_cast<HTTPBase *>(mSource.get())->disconnect();
+ mFinalStatus = -EAGAIN;
+ mSuspended = true;
+ } else {
+ return ERROR_UNSUPPORTED;
+ }
+
+ return OK;
+}
+
+status_t NuCachedSource2::connectWhileResume() {
+ mSuspended = false;
+
+ // Begin to connect again and fetch more data
+ (new AMessage(kWhatFetchMore, mReflector))->post();
+
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index d252cb6..4f3e636 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -181,6 +181,7 @@ bool MuxOMX::isLocalNode_l(node_id node) const {
}
// static
+
bool MuxOMX::CanLiveLocally(const char *name) {
#ifdef __LP64__
(void)name; // disable unused parameter warning
@@ -188,7 +189,7 @@ bool MuxOMX::CanLiveLocally(const char *name) {
return false;
#else
// 32 bit processes run only OMX.google.* components locally
- return !strncasecmp(name, "OMX.google.", 11);
+ return !strncasecmp(name, "OMX.google.", 11) || !strncasecmp(name, "OMX.ffmpeg.", 11);
#endif
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7e15e18..de00ff2 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -56,6 +56,11 @@
#include "include/avc_utils.h"
+#ifdef USE_S3D_SUPPORT
+#include "Exynos_OMX_Def.h"
+#include "ExynosHWCService.h"
+#endif
+
namespace android {
// Treat time out as an error if we have not received any output
@@ -150,7 +155,8 @@ static void InitOMXParams(T *params) {
}
static bool IsSoftwareCodec(const char *componentName) {
- if (!strncmp("OMX.google.", componentName, 11)) {
+ if (!strncmp("OMX.google.", componentName, 11)
+ || !strncmp("OMX.ffmpeg.", componentName, 11)) {
return true;
}
@@ -378,7 +384,7 @@ status_t OMXCodec::parseHEVCCodecSpecificData(
const uint8_t *ptr = (const uint8_t *)data;
// verify minimum size and configurationVersion == 1.
- if (size < 7 || ptr[0] != 1) {
+ if (size < 23 || ptr[0] != 1) {
return ERROR_MALFORMED;
}
@@ -393,6 +399,9 @@ status_t OMXCodec::parseHEVCCodecSpecificData(
size -= 1;
size_t j = 0, i = 0;
for (i = 0; i < numofArrays; i++) {
+ if (size < 3) {
+ return ERROR_MALFORMED;
+ }
ptr += 1;
size -= 1;
@@ -856,7 +865,8 @@ void OMXCodec::setVideoInputFormat(
compressionFormat = OMX_VIDEO_CodingAVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
compressionFormat = OMX_VIDEO_CodingHEVC;
- } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4_DP, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG4;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
compressionFormat = OMX_VIDEO_CodingH263;
@@ -1248,7 +1258,8 @@ status_t OMXCodec::setVideoOutputFormat(
OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
compressionFormat = OMX_VIDEO_CodingAVC;
- } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4_DP, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG4;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
compressionFormat = OMX_VIDEO_CodingHEVC;
@@ -1450,6 +1461,8 @@ void OMXCodec::setComponentRole(
"video_decoder.hevc", "video_encoder.hevc" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4,
"video_decoder.mpeg4", "video_encoder.mpeg4" },
+ { MEDIA_MIMETYPE_VIDEO_MPEG4_DP,
+ "video_decoder.mpeg4", NULL },
{ MEDIA_MIMETYPE_VIDEO_H263,
"video_decoder.h263", "video_encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_VP8,
@@ -1565,6 +1578,8 @@ bool OMXCodec::isIntermediateState(State state) {
return state == LOADED_TO_IDLE
|| state == IDLE_TO_EXECUTING
|| state == EXECUTING_TO_IDLE
+ || state == PAUSING
+ || state == FLUSHING
|| state == IDLE_TO_LOADED
|| state == RECONFIGURING;
}
@@ -1706,14 +1721,13 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
int32_t numchannels = 0;
if (delay + padding) {
if (mOutputFormat->findInt32(kKeyChannelCount, &numchannels)) {
- size_t frameSize = numchannels * sizeof(int16_t);
if (mSkipCutBuffer != NULL) {
size_t prevbuffersize = mSkipCutBuffer->size();
if (prevbuffersize != 0) {
ALOGW("Replacing SkipCutBuffer holding %zu bytes", prevbuffersize);
}
}
- mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
+ mSkipCutBuffer = new SkipCutBuffer(delay, padding, numchannels);
}
}
}
@@ -1779,6 +1793,10 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
if (mFlags & kEnableGrallocUsageProtected) {
usage |= GRALLOC_USAGE_PROTECTED;
+#ifdef GRALLOC_USAGE_PRIVATE_NONSECURE
+ if (!(mFlags & kUseSecureInputBuffers))
+ usage |= GRALLOC_USAGE_PRIVATE_NONSECURE;
+#endif
}
err = setNativeWindowSizeFormatAndUsage(
@@ -1814,7 +1832,12 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
// plus an extra buffer to account for incorrect minUndequeuedBufs
CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
-
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ // Some devices don't like to set OMX_IndexParamPortDefinition at this
+ // point (even with an unmodified def), so skip it if possible.
+ // This check was present in KitKat.
+ if (def.nBufferCountActual < def.nBufferCountMin + minUndequeuedBufs) {
+#endif
for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
OMX_U32 newBufferCount =
def.nBufferCountMin + minUndequeuedBufs + extraBuffers;
@@ -1836,6 +1859,9 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
}
CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
+#ifdef BOARD_CANT_REALLOCATE_OMX_BUFFERS
+ }
+#endif
err = native_window_set_buffer_count(
mNativeWindow.get(), def.nBufferCountActual);
@@ -2331,7 +2357,38 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
break;
}
#endif
+#ifdef USE_S3D_SUPPORT
+ case (OMX_EVENTTYPE)OMX_EventS3DInformation:
+ {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<android::IExynosHWCService> hwc = interface_cast<android::IExynosHWCService>(
+ sm->getService(String16("Exynos.HWCService")));
+ if (hwc != NULL) {
+ if (data1 == OMX_TRUE) {
+ int eS3DMode;
+ switch (data2) {
+ case OMX_SEC_FPARGMT_SIDE_BY_SIDE:
+ eS3DMode = S3D_SBS;
+ break;
+ case OMX_SEC_FPARGMT_TOP_BOTTOM:
+ eS3DMode = S3D_TB;
+ break;
+ case OMX_SEC_FPARGMT_CHECKERBRD_INTERL: // unsupport format at HDMI
+ case OMX_SEC_FPARGMT_COLUMN_INTERL:
+ case OMX_SEC_FPARGMT_ROW_INTERL:
+ case OMX_SEC_FPARGMT_TEMPORAL_INTERL:
+ default:
+ eS3DMode = S3D_NONE;
+ }
+ hwc->setHdmiResolution(0, eS3DMode);
+ }
+ } else {
+ ALOGE("Exynos.HWCService is unavailable");
+ }
+ break;
+ }
+#endif
default:
{
CODEC_LOGV("EVENT(%d, %u, %u)", event, data1, data2);
@@ -2456,6 +2513,7 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
// We implicitly resume pulling on our upstream source.
mPaused = false;
+ mNoMoreOutputData = false;
drainInputBuffers();
fillOutputBuffers();
@@ -2572,6 +2630,14 @@ void OMXCodec::onStateChange(OMX_STATETYPE newState) {
break;
}
+ case OMX_StatePause:
+ {
+ CODEC_LOGV("Now paused.");
+ CHECK_EQ((int)mState, (int)PAUSING);
+ setState(PAUSED);
+ break;
+ }
+
case OMX_StateInvalid:
{
setState(ERROR);
@@ -2604,7 +2670,8 @@ status_t OMXCodec::freeBuffersOnPort(
status_t stickyErr = OK;
- for (size_t i = buffers->size(); i-- > 0;) {
+ for (size_t i = buffers->size(); i > 0;) {
+ i--;
BufferInfo *info = &buffers->editItemAt(i);
if (onlyThoseWeOwn && info->mStatus == OWNED_BY_COMPONENT) {
@@ -2686,7 +2753,7 @@ void OMXCodec::onPortSettingsChanged(OMX_U32 portIndex) {
bool OMXCodec::flushPortAsync(OMX_U32 portIndex) {
CHECK(mState == EXECUTING || mState == RECONFIGURING
- || mState == EXECUTING_TO_IDLE);
+ || mState == EXECUTING_TO_IDLE || mState == FLUSHING);
CODEC_LOGV("flushPortAsync(%u): we own %zu out of %zu buffers already.",
portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
@@ -2736,7 +2803,7 @@ status_t OMXCodec::enablePortAsync(OMX_U32 portIndex) {
}
void OMXCodec::fillOutputBuffers() {
- CHECK_EQ((int)mState, (int)EXECUTING);
+ CHECK(mState == EXECUTING || mState == FLUSHING);
// This is a workaround for some decoders not properly reporting
// end-of-output-stream. If we own all input buffers and also own
@@ -2763,7 +2830,7 @@ void OMXCodec::fillOutputBuffers() {
}
void OMXCodec::drainInputBuffers() {
- CHECK(mState == EXECUTING || mState == RECONFIGURING);
+ CHECK(mState == EXECUTING || mState == RECONFIGURING || mState == FLUSHING);
if (mFlags & kUseSecureInputBuffers) {
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
@@ -3510,6 +3577,11 @@ void OMXCodec::clearCodecSpecificData() {
status_t OMXCodec::start(MetaData *meta) {
Mutex::Autolock autoLock(mLock);
+ if (mPaused) {
+ status_t err = resumeLocked(true);
+ return err;
+ }
+
if (mState != LOADED) {
CODEC_LOGE("called start in the unexpected state: %d", mState);
return UNKNOWN_ERROR;
@@ -3620,6 +3692,7 @@ status_t OMXCodec::stopOmxComponent_l() {
isError = true;
}
+ case PAUSED:
case EXECUTING:
{
setState(EXECUTING_TO_IDLE);
@@ -3691,6 +3764,14 @@ status_t OMXCodec::read(
Mutex::Autolock autoLock(mLock);
+ if (mPaused) {
+ err = resumeLocked(false);
+ if(err != OK) {
+ CODEC_LOGE("Failed to restart codec err= %d", err);
+ return err;
+ }
+ }
+
if (mState != EXECUTING && mState != RECONFIGURING) {
return UNKNOWN_ERROR;
}
@@ -3747,6 +3828,8 @@ status_t OMXCodec::read(
mFilledBuffers.clear();
CHECK_EQ((int)mState, (int)EXECUTING);
+ //DSP supports flushing of ports simultaneously. Flushing individual port is not supported.
+ setState(FLUSHING);
bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
@@ -3776,6 +3859,11 @@ status_t OMXCodec::read(
return UNKNOWN_ERROR;
}
+ if (seeking) {
+ CHECK_EQ((int)mState, (int)FLUSHING);
+ setState(EXECUTING);
+ }
+
if (mFilledBuffers.empty()) {
return mSignalledEOS ? mFinalStatus : ERROR_END_OF_STREAM;
}
@@ -4209,11 +4297,60 @@ void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
}
status_t OMXCodec::pause() {
- Mutex::Autolock autoLock(mLock);
+ CODEC_LOGV("pause mState=%d", mState);
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (mState != EXECUTING) {
+ return UNKNOWN_ERROR;
+ }
+
+ while (isIntermediateState(mState)) {
+ mAsyncCompletion.wait(mLock);
+ }
+ if (!strncmp(mComponentName, "OMX.qcom.", 9)) {
+ status_t err = mOMX->sendCommand(mNode,
+ OMX_CommandStateSet, OMX_StatePause);
+ CHECK_EQ(err, (status_t)OK);
+ setState(PAUSING);
+
+ mPaused = true;
+ while (mState != PAUSED && mState != ERROR) {
+ mAsyncCompletion.wait(mLock);
+ }
+ return mState == ERROR ? UNKNOWN_ERROR : OK;
+ } else {
+ mPaused = true;
+ return OK;
+ }
- mPaused = true;
+}
- return OK;
+status_t OMXCodec::resumeLocked(bool drainInputBuf) {
+ CODEC_LOGV("resume mState=%d", mState);
+
+ if (!strncmp(mComponentName, "OMX.qcom.", 9)) {
+ while (isIntermediateState(mState)) {
+ mAsyncCompletion.wait(mLock);
+ }
+ CHECK_EQ(mState, (status_t)PAUSED);
+ status_t err = mOMX->sendCommand(mNode,
+ OMX_CommandStateSet, OMX_StateExecuting);
+ CHECK_EQ(err, (status_t)OK);
+ setState(IDLE_TO_EXECUTING);
+ mPaused = false;
+ while (mState != EXECUTING && mState != ERROR) {
+ mAsyncCompletion.wait(mLock);
+ }
+ if(drainInputBuf)
+ drainInputBuffers();
+ return mState == ERROR ? UNKNOWN_ERROR : OK;
+ } else { // SW Codec
+ mPaused = false;
+ if(drainInputBuf)
+ drainInputBuffers();
+ return OK;
+ }
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 578171f..d63ac96 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -179,6 +179,9 @@ struct MyVorbisExtractor : public MyOggExtractor {
protected:
virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
+ if (granulePos > INT64_MAX / 1000000ll) {
+ return INT64_MAX;
+ }
return granulePos * 1000000ll / mVi.rate;
}
@@ -771,8 +774,13 @@ status_t MyOggExtractor::_readNextPacket(MediaBuffer **out, bool calcVorbisTimes
return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
}
- mCurrentPageSamples =
- mCurrentPage.mGranulePosition - mPrevGranulePosition;
+ // Prevent a harmless unsigned integer overflow by clamping to 0
+ if (mCurrentPage.mGranulePosition >= mPrevGranulePosition) {
+ mCurrentPageSamples =
+ mCurrentPage.mGranulePosition - mPrevGranulePosition;
+ } else {
+ mCurrentPageSamples = 0;
+ }
mFirstPacketInPage = true;
mPrevGranulePosition = mCurrentPage.mGranulePosition;
@@ -917,6 +925,9 @@ int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const {
if (granulePos > mCodecDelay) {
pcmSamplePosition = granulePos - mCodecDelay;
}
+ if (pcmSamplePosition > INT64_MAX / 1000000ll) {
+ return INT64_MAX;
+ }
return pcmSamplePosition * 1000000ll / kOpusSampleRate;
}
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
index 335bd5d..f5558ce 100644
--- a/media/libstagefright/SampleIterator.cpp
+++ b/media/libstagefright/SampleIterator.cpp
@@ -30,6 +30,8 @@
namespace android {
+const uint32_t kMaxSampleCacheSize = 4096;
+
SampleIterator::SampleIterator(SampleTable *table)
: mTable(table),
mInitialized(false),
@@ -37,7 +39,12 @@ SampleIterator::SampleIterator(SampleTable *table)
mTTSSampleIndex(0),
mTTSSampleTime(0),
mTTSCount(0),
- mTTSDuration(0) {
+ mTTSDuration(0),
+ mSampleCache(NULL) {
+ reset();
+}
+
+SampleIterator::~SampleIterator() {
reset();
}
@@ -49,6 +56,10 @@ void SampleIterator::reset() {
mStopChunkSampleIndex = 0;
mSamplesPerChunk = 0;
mChunkDesc = 0;
+ delete[] mSampleCache;
+ mSampleCache = NULL;
+ mSampleCacheSize = 0;
+ mCurrentSampleCacheStartIndex = 0;
}
status_t SampleIterator::seekTo(uint32_t sampleIndex) {
@@ -172,6 +183,13 @@ status_t SampleIterator::findChunkRange(uint32_t sampleIndex) {
if (mSampleToChunkIndex + 1 < mTable->mNumSampleToChunkOffsets) {
mStopChunk = entry[1].startChunk;
+ if (mStopChunk < mFirstChunk ||
+ (mStopChunk - mFirstChunk) > UINT32_MAX / mSamplesPerChunk ||
+ ((mStopChunk - mFirstChunk) * mSamplesPerChunk >
+ UINT32_MAX - mFirstChunkSampleIndex)) {
+
+ return ERROR_OUT_OF_RANGE;
+ }
mStopChunkSampleIndex =
mFirstChunkSampleIndex
+ (mStopChunk - mFirstChunk) * mSamplesPerChunk;
@@ -234,58 +252,73 @@ status_t SampleIterator::getSampleSizeDirect(
return OK;
}
+ bool readNewSampleCache = false;
+
+ // Check if current sample is inside cache, otherwise read new cache
+ if (sampleIndex < mCurrentSampleCacheStartIndex ||
+ ((sampleIndex - mCurrentSampleCacheStartIndex) *
+ mTable->mSampleSizeFieldSize + 4) / 8 >= mSampleCacheSize) {
+ uint32_t prevCacheSize = mSampleCacheSize;
+ mSampleCacheSize = ((mTable->mNumSampleSizes - sampleIndex) *
+ mTable->mSampleSizeFieldSize + 4) / 8;
+ mSampleCacheSize = mSampleCacheSize > kMaxSampleCacheSize ?
+ kMaxSampleCacheSize : mSampleCacheSize;
+ mCurrentSampleCacheStartIndex = sampleIndex;
+ readNewSampleCache = true;
+ if (mSampleCacheSize != prevCacheSize) {
+ delete[] mSampleCache;
+ mSampleCache = new uint8_t[mSampleCacheSize];
+ }
+ }
+
+ if (mSampleCache == NULL) {
+ return ERROR_IO;
+ }
+
+ if (mTable->mSampleSizeFieldSize != 32 &&
+ mTable->mSampleSizeFieldSize != 16 &&
+ mTable->mSampleSizeFieldSize != 8 &&
+ mTable->mSampleSizeFieldSize != 4) {
+ return ERROR_IO;
+ }
+
+ if (readNewSampleCache) {
+ if (mTable->mDataSource->readAt(
+ mTable->mSampleSizeOffset + 12 +
+ mTable->mSampleSizeFieldSize * sampleIndex / 8,
+ mSampleCache,
+ mSampleCacheSize) < (int32_t) mSampleCacheSize) {
+ return ERROR_IO;
+ }
+ }
+
+ uint32_t cacheReadOffset = (sampleIndex - mCurrentSampleCacheStartIndex) *
+ mTable->mSampleSizeFieldSize / 8;
+
switch (mTable->mSampleSizeFieldSize) {
case 32:
{
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + 4 * sampleIndex,
- size, sizeof(*size)) < (ssize_t)sizeof(*size)) {
- return ERROR_IO;
- }
-
- *size = ntohl(*size);
+ *size = ntohl(*((size_t *) &(mSampleCache[cacheReadOffset])));
break;
}
case 16:
{
- uint16_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + 2 * sampleIndex,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = ntohs(x);
+ *size = ntohs(*((uint16_t *) &(mSampleCache[cacheReadOffset])));
break;
}
case 8:
{
- uint8_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + sampleIndex,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = x;
+ *size = mSampleCache[cacheReadOffset];
break;
}
default:
{
- CHECK_EQ(mTable->mSampleSizeFieldSize, 4);
-
- uint8_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + sampleIndex / 2,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = (sampleIndex & 1) ? x & 0x0f : x >> 4;
- break;
+ *size = (sampleIndex - mCurrentSampleCacheStartIndex) & 0x01 ?
+ (mSampleCache[cacheReadOffset] & 0x0f) :
+ (mSampleCache[cacheReadOffset] & 0xf0) >> 4;
}
}
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 72e30f1..8a38c24 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -195,11 +195,11 @@ status_t SampleTable::setChunkOffsetParams(
mNumChunkOffsets = U32_AT(&header[4]);
if (mChunkOffsetType == kChunkOffsetType32) {
- if (data_size < 8 + mNumChunkOffsets * 4) {
+ if ((data_size - 8) / 4 < mNumChunkOffsets) {
return ERROR_MALFORMED;
}
} else {
- if (data_size < 8 + mNumChunkOffsets * 8) {
+ if ((data_size - 8) / 8 < mNumChunkOffsets) {
return ERROR_MALFORMED;
}
}
@@ -210,6 +210,11 @@ status_t SampleTable::setChunkOffsetParams(
status_t SampleTable::setSampleToChunkParams(
off64_t data_offset, size_t data_size) {
if (mSampleToChunkOffset >= 0) {
+ // already set
+ return ERROR_MALFORMED;
+ }
+
+ if (data_offset < 0) {
return ERROR_MALFORMED;
}
@@ -277,6 +282,10 @@ status_t SampleTable::setSampleToChunkParams(
for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
uint8_t buffer[sizeof(SampleToChunkEntry)];
+ if ((SIZE_MAX - 8 - (i * 12)) < (size_t)mSampleToChunkOffset) {
+ return ERROR_MALFORMED;
+ }
+
if (mDataSource->readAt(
mSampleToChunkOffset + 8 + i * sizeof(SampleToChunkEntry),
buffer,
@@ -450,7 +459,7 @@ status_t SampleTable::setCompositionTimeToSampleParams(
size_t numEntries = U32_AT(&header[4]);
- if (data_size != (numEntries + 1) * 8) {
+ if (((SIZE_MAX / 8) - 1 < numEntries) || (data_size != (numEntries + 1) * 8)) {
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 1da1e5e..d30be88 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -24,21 +24,32 @@
namespace android {
-SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
+SkipCutBuffer::SkipCutBuffer(size_t skip, size_t cut, size_t num16BitChannels) {
- if (skip < 0 || cut < 0 || cut > 64 * 1024) {
- ALOGW("out of range skip/cut: %d/%d, using passthrough instead", skip, cut);
- skip = 0;
- cut = 0;
+ mWriteHead = 0;
+ mReadHead = 0;
+ mCapacity = 0;
+ mCutBuffer = NULL;
+
+ if (num16BitChannels == 0 || num16BitChannels > INT32_MAX / 2) {
+ ALOGW("# channels out of range: %zu, using passthrough instead", num16BitChannels);
+ return;
}
+ size_t frameSize = num16BitChannels * 2;
+ if (skip > INT32_MAX / frameSize || cut > INT32_MAX / frameSize
+ || cut * frameSize > INT32_MAX - 4096) {
+ ALOGW("out of range skip/cut: %zu/%zu, using passthrough instead",
+ skip, cut);
+ return;
+ }
+ skip *= frameSize;
+ cut *= frameSize;
mFrontPadding = mSkip = skip;
mBackPadding = cut;
- mWriteHead = 0;
- mReadHead = 0;
mCapacity = cut + 4096;
- mCutBuffer = new char[mCapacity];
- ALOGV("skipcutbuffer %d %d %d", skip, cut, mCapacity);
+ mCutBuffer = new (std::nothrow) char[mCapacity];
+ ALOGV("skipcutbuffer %zu %zu %d", skip, cut, mCapacity);
}
SkipCutBuffer::~SkipCutBuffer() {
@@ -46,6 +57,11 @@ SkipCutBuffer::~SkipCutBuffer() {
}
void SkipCutBuffer::submit(MediaBuffer *buffer) {
+ if (mCutBuffer == NULL) {
+ // passthrough mode
+ return;
+ }
+
int32_t offset = buffer->range_offset();
int32_t buflen = buffer->range_length();
@@ -73,6 +89,11 @@ void SkipCutBuffer::submit(MediaBuffer *buffer) {
}
void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+ if (mCutBuffer == NULL) {
+ // passthrough mode
+ return;
+ }
+
int32_t offset = buffer->offset();
int32_t buflen = buffer->size();
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index db33e83..d5ef1a6 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -28,6 +28,8 @@
#include <media/mediametadataretriever.h>
#include <private/media/VideoFrame.h>
+#include <stagefright/AVExtensions.h>
+
namespace android {
StagefrightMediaScanner::StagefrightMediaScanner() {}
@@ -40,7 +42,11 @@ static bool FileHasAcceptableExtension(const char *extension) {
".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
- ".avi", ".mpeg", ".mpg", ".awb", ".mpga"
+ ".adts", ".dm", ".m2ts", ".mp3d", ".wmv", ".asf", ".flv",
+ ".mov", ".ra", ".rm", ".rmvb", ".ac3", ".ape", ".dts",
+ ".mp1", ".mp2", ".f4v", "hlv", "nrg", "m2v", ".swf",
+ ".avi", ".mpg", ".mpeg", ".awb", ".vc1", ".vob", ".divx",
+ ".mpga", ".mov", ".qcp", ".ec3", ".opus"
};
static const size_t kNumValidExtensions =
sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
@@ -75,7 +81,8 @@ MediaScanResult StagefrightMediaScanner::processFileInternal(
return MEDIA_SCAN_RESULT_SKIPPED;
}
- if (!FileHasAcceptableExtension(extension)) {
+ if (!FileHasAcceptableExtension(extension)
+ && !AVUtils::get()->isEnhancedExtension(extension)) {
return MEDIA_SCAN_RESULT_SKIPPED;
}
@@ -118,6 +125,7 @@ MediaScanResult StagefrightMediaScanner::processFileInternal(
{ "genre", METADATA_KEY_GENRE },
{ "title", METADATA_KEY_TITLE },
{ "year", METADATA_KEY_YEAR },
+ { "date", METADATA_KEY_DATE },
{ "duration", METADATA_KEY_DURATION },
{ "writer", METADATA_KEY_WRITER },
{ "compilation", METADATA_KEY_COMPILATION },
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index e37e909..d39f34b 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -23,10 +23,13 @@
#include <gui/Surface.h>
#include "include/StagefrightMetadataRetriever.h"
+#include "include/HTTPBase.h"
#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
+#include <media/stagefright/FFMPEGSoftCodec.h>
+
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -44,6 +47,8 @@
#include <CharacterEncodingDetector.h>
+#include <stagefright/AVExtensions.h>
+
namespace android {
static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
@@ -62,6 +67,12 @@ StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
ALOGV("~StagefrightMetadataRetriever()");
clearMetadata();
mClient.disconnect();
+
+ if (mSource != NULL &&
+ (mSource->flags() & DataSource::kIsHTTPBasedSource)) {
+ mExtractor.clear();
+ static_cast<HTTPBase *>(mSource.get())->disconnect();
+ }
}
status_t StagefrightMetadataRetriever::setDataSource(
@@ -97,6 +108,7 @@ status_t StagefrightMetadataRetriever::setDataSource(
fd = dup(fd);
ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
+ AVUtils::get()->printFileName(fd);
clearMetadata();
mSource = new FileSource(fd, offset, length);
@@ -154,6 +166,8 @@ static VideoFrame *extractVideoFrame(
// TODO: Use Flexible color instead
videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+ videoFormat->setInt32("thumbnail-mode", 1);
+
status_t err;
sp<ALooper> looper = new ALooper;
looper->start();
@@ -214,6 +228,7 @@ static VideoFrame *extractVideoFrame(
err = decoder->getInputBuffers(&inputBuffers);
if (err != OK) {
ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
+ source->stop();
decoder->release();
return NULL;
}
@@ -222,6 +237,7 @@ static VideoFrame *extractVideoFrame(
err = decoder->getOutputBuffers(&outputBuffers);
if (err != OK) {
ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
+ source->stop();
decoder->release();
return NULL;
}
@@ -257,6 +273,9 @@ static VideoFrame *extractVideoFrame(
if (err != OK) {
ALOGW("Input Error or EOS");
haveMoreInputs = false;
+ //correct the status to continue to get output from decoder
+ err = OK;
+ inputIndex = -1;
break;
}
@@ -346,9 +365,11 @@ static VideoFrame *extractVideoFrame(
}
}
- int32_t width, height;
+ int32_t width, height, stride, slice_height;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
+ CHECK(outputFormat->findInt32("slice-height", &slice_height));
int32_t crop_left, crop_top, crop_right, crop_bottom;
if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
@@ -386,7 +407,7 @@ static VideoFrame *extractVideoFrame(
if (converter.isValid()) {
err = converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ stride, slice_height,
crop_left, crop_top, crop_right, crop_bottom,
frame->mData,
frame->mWidth,
@@ -442,6 +463,10 @@ VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
for (i = 0; i < n; ++i) {
sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ if (meta == NULL) {
+ continue;
+ }
+
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -481,11 +506,19 @@ VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
mime,
false, /* encoder */
NULL, /* matchComponentName */
- OMXCodec::kPreferSoftwareCodecs,
+ 0 /* OMXCodec::kPreferSoftwareCodecs */,
&matchingCodecs);
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const char *componentName = matchingCodecs[i].mName.string();
+ const char *ffmpegComponentName;
+ /* determine whether ffmpeg should override a broken h/w codec */
+ ffmpegComponentName = FFMPEGSoftCodec::overrideComponentName(0, trackMeta, mime, false);
+ if (ffmpegComponentName) {
+ ALOGV("override compoent %s to %s for video frame extraction.", componentName, ffmpegComponentName);
+ componentName = ffmpegComponentName;
+ }
+
VideoFrame *frame =
extractVideoFrame(componentName, trackMeta, source, timeUs, option);
@@ -612,6 +645,10 @@ void StagefrightMetadataRetriever::parseMetaData() {
size_t numTracks = mExtractor->countTracks();
+ if (numTracks == 0) { //If no tracks available, corrupt or not valid stream
+ return;
+ }
+
char tmp[32];
sprintf(tmp, "%zu", numTracks);
@@ -635,6 +672,9 @@ void StagefrightMetadataRetriever::parseMetaData() {
String8 timedTextLang;
for (size_t i = 0; i < numTracks; ++i) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (trackMeta == NULL) {
+ continue;
+ }
int64_t durationUs;
if (trackMeta->findInt64(kKeyDuration, &durationUs)) {
@@ -661,9 +701,12 @@ void StagefrightMetadataRetriever::parseMetaData() {
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
const char *lang;
- trackMeta->findCString(kKeyMediaLanguage, &lang);
- timedTextLang.append(String8(lang));
- timedTextLang.append(String8(":"));
+ if (trackMeta->findCString(kKeyMediaLanguage, &lang)) {
+ timedTextLang.append(String8(lang));
+ timedTextLang.append(String8(":"));
+ } else {
+ ALOGE("No language found for timed text");
+ }
}
}
}
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index e8abf48..147eb45 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -35,6 +35,9 @@
#include <utils/String8.h>
#include <private/gui/ComposerService.h>
+#if QTI_BSP
+#include <gralloc_priv.h>
+#endif
namespace android {
@@ -59,8 +62,12 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeig
BufferQueue::createBufferQueue(&mProducer, &mConsumer);
mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
- mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_HW_TEXTURE);
+ mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER
+ | GRALLOC_USAGE_HW_TEXTURE
+#if QTI_BSP
+ | GRALLOC_USAGE_PRIVATE_WFD
+#endif
+ );
sp<ISurfaceComposer> composer(ComposerService::getComposerService());
@@ -360,7 +367,11 @@ status_t SurfaceMediaSource::read(
mNumFramesEncoded++;
// Pass the data to the MediaBuffer. Pass in only the metadata
-
+ if (mSlots[mCurrentSlot].mGraphicBuffer == NULL) {
+ ALOGV("Read: SurfaceMediaSource mGraphicBuffer is null. Returning"
+ "ERROR_END_OF_STREAM.");
+ return ERROR_END_OF_STREAM;
+ }
passMetadataBuffer(buffer, mSlots[mCurrentSlot].mGraphicBuffer->handle);
(*buffer)->setObserver(this);
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 0d9dc3a..489ccc3 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -35,6 +35,10 @@
#include <media/stagefright/Utils.h>
#include <media/AudioParameter.h>
+#include <stagefright/AVExtensions.h>
+#include <media/stagefright/FFMPEGSoftCodec.h>
+#include <media/stagefright/foundation/ABitReader.h>
+
namespace android {
uint16_t U16_AT(const uint8_t *ptr) {
@@ -70,7 +74,7 @@ uint64_t hton64(uint64_t x) {
return ((uint64_t)htonl(x & 0xffffffff) << 32) | htonl(x >> 32);
}
-static status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length) {
+status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length) {
if (((*buffer)->size() + 4 + length) > ((*buffer)->capacity() - (*buffer)->offset())) {
sp<ABuffer> tmpBuffer = new (std::nothrow) ABuffer((*buffer)->size() + 4 + length + 1024);
if (tmpBuffer.get() == NULL || tmpBuffer->base() == NULL) {
@@ -104,7 +108,7 @@ status_t convertMetaDataToMessage(
int avgBitRate;
if (meta->findInt32(kKeyBitRate, &avgBitRate)) {
- msg->setInt32("bit-rate", avgBitRate);
+ msg->setInt32("bitrate", avgBitRate);
}
int32_t isSync;
@@ -203,6 +207,11 @@ status_t convertMetaDataToMessage(
msg->setInt32("frame-rate", fps);
}
+ int32_t bitsPerSample;
+ if (meta->findInt32(kKeyBitsPerSample, &bitsPerSample)) {
+ msg->setInt32("bits-per-sample", bitsPerSample);
+ }
+
uint32_t type;
const void *data;
size_t size;
@@ -308,7 +317,7 @@ status_t convertMetaDataToMessage(
} else if (meta->findData(kKeyHVCC, &type, &data, &size)) {
const uint8_t *ptr = (const uint8_t *)data;
- if (size < 23 || ptr[0] != 1) { // configurationVersion == 1
+ if (size < 23) { // configurationVersion == 1
ALOGE("b/23680780");
return BAD_VALUE;
}
@@ -453,8 +462,17 @@ status_t convertMetaDataToMessage(
msg->setBuffer("csd-2", buffer);
}
+ AVUtils::get()->convertMetaDataToMessage(meta, &msg);
+
+ FFMPEGSoftCodec::convertMetaDataToMessageFF(meta, &msg);
*format = msg;
+#if 0
+ ALOGI("convertMetaDataToMessage from:");
+ meta->dumpToLog();
+ ALOGI(" to: %s", msg->debugString(0).c_str());
+#endif
+
return OK;
}
@@ -646,6 +664,11 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
if (msg->findInt32("is-adts", &isADTS)) {
meta->setInt32(kKeyIsADTS, isADTS);
}
+
+ int32_t bitsPerSample;
+ if (msg->findInt32("bits-per-sample", &bitsPerSample)) {
+ meta->setInt32(kKeyBitsPerSample, bitsPerSample);
+ }
}
int32_t maxInputSize;
@@ -705,8 +728,10 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
// XXX TODO add whatever other keys there are
+ FFMPEGSoftCodec::convertMessageToMetaDataFF(msg, meta);
+
#if 0
- ALOGI("converted %s to:", msg->debugString(0).c_str());
+ ALOGI("convertMessageToMetaData from %s to:", msg->debugString(0).c_str());
meta->dumpToLog();
#endif
}
@@ -747,13 +772,12 @@ status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
if (meta->findInt32(kKeyBitRate, &bitRate)) {
param.addInt(String8(AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE), bitRate);
}
- if (meta->findInt32(kKeyEncoderDelay, &delaySamples)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
- }
- if (meta->findInt32(kKeyEncoderPadding, &paddingSamples)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
- }
+ meta->findInt32(kKeyEncoderDelay, &delaySamples);
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
+ meta->findInt32(kKeyEncoderPadding, &paddingSamples);
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
+ AVUtils::get()->sendMetaDataToHal(meta, &param);
ALOGV("sendMetaDataToHal: bitRate %d, sampleRate %d, chanMask %d,"
"delaySample %d, paddingSample %d", bitRate, sampleRate,
channelMask, delaySamples, paddingSamples);
@@ -775,6 +799,9 @@ static const struct mime_conv_t mimeLookup[] = {
{ MEDIA_MIMETYPE_AUDIO_AAC, AUDIO_FORMAT_AAC },
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
{ MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
+#ifdef FLAC_OFFLOAD_ENABLED
+ { MEDIA_MIMETYPE_AUDIO_FLAC, AUDIO_FORMAT_FLAC},
+#endif
{ 0, AUDIO_FORMAT_INVALID }
};
@@ -789,7 +816,7 @@ const struct mime_conv_t* p = &mimeLookup[0];
++p;
}
- return BAD_VALUE;
+ return AVUtils::get()->mapMimeToAudioFormat(format, mime);
}
struct aac_format_conv_t {
@@ -843,18 +870,28 @@ bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
} else {
ALOGV("Mime type \"%s\" mapped to audio_format %d", mime, info.format);
}
-
+ info.format = AVUtils::get()->updateAudioFormat(info.format, meta);
if (AUDIO_FORMAT_INVALID == info.format) {
// can't offload if we don't know what the source format is
ALOGE("mime type \"%s\" not a known audio format", mime);
return false;
}
+ if (AVUtils::get()->canOffloadAPE(meta) != true) {
+ return false;
+ }
+ ALOGV("Mime type \"%s\" mapped to audio_format %d", mime, info.format);
+
// Redefine aac format according to its profile
// Offloading depends on audio DSP capabilities.
int32_t aacaot = -1;
if (meta->findInt32(kKeyAACAOT, &aacaot)) {
- mapAACProfileToAudioFormat(info.format,(OMX_AUDIO_AACPROFILETYPE) aacaot);
+ bool isADTSSupported = false;
+ isADTSSupported = AVUtils::get()->mapAACProfileToAudioFormat(meta, info.format,
+ (OMX_AUDIO_AACPROFILETYPE) aacaot);
+ if (!isADTSSupported) {
+ mapAACProfileToAudioFormat(info.format,(OMX_AUDIO_AACPROFILETYPE) aacaot);
+ }
}
int32_t srate = -1;
@@ -864,7 +901,7 @@ bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
info.sample_rate = srate;
int32_t cmask = 0;
- if (!meta->findInt32(kKeyChannelMask, &cmask)) {
+ if (!meta->findInt32(kKeyChannelMask, &cmask) || 0 == cmask) {
ALOGV("track of type '%s' does not publish channel mask", mime);
// Try a channel count instead
@@ -1017,5 +1054,240 @@ void readFromAMessage(
*sync = settings;
}
+audio_format_t getPCMFormat(const sp<AMessage> &format) {
+ int32_t bits = 16;
+ if (format->findInt32("bits-per-sample", &bits)) {
+ if (bits == 8)
+ return AUDIO_FORMAT_PCM_8_BIT;
+ if (bits == 24)
+ return AUDIO_FORMAT_PCM_32_BIT;
+ if (bits == 32)
+ return AUDIO_FORMAT_PCM_FLOAT;
+ }
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+void updateVideoTrackInfoFromESDS_MPEG4Video(sp<MetaData> meta) {
+ const char* mime = NULL;
+ if (meta != NULL && meta->findCString(kKeyMIMEType, &mime) &&
+ mime && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ if (!meta->findData(kKeyESDS, &type, &data, &size) || !data) {
+ ALOGW("ESDS atom is invalid");
+ return;
+ }
+
+ if (checkDPFromCodecSpecificData((const uint8_t*) data, size)) {
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4_DP);
+ }
+ }
+}
+
+bool checkDPFromCodecSpecificData(const uint8_t *data, size_t size) {
+ bool retVal = false;
+ size_t offset = 0, startCodeOffset = 0;
+ bool isStartCode = false;
+ const int kVolStartCode = 0x20;
+ const char kStartCode[] = "\x00\x00\x01";
+ // must contain at least 4 bytes for video_object_layer_start_code
+ const size_t kMinCsdSize = 4;
+
+ if (!data || (size < kMinCsdSize)) {
+ ALOGV("Invalid CSD (expected at least %zu bytes)", kMinCsdSize);
+ return retVal;
+ }
+
+ while (offset < size - 3) {
+ if ((data[offset + 3] & 0xf0) == kVolStartCode) {
+ if (!memcmp(&data[offset], kStartCode, 3)) {
+ startCodeOffset = offset;
+ isStartCode = true;
+ break;
+ }
+ }
+
+ offset++;
+ }
+
+ if (isStartCode) {
+ retVal = checkDPFromVOLHeader((const uint8_t*) &data[startCodeOffset],
+ (size - startCodeOffset));
+ }
+
+ return retVal;
+}
+
+bool checkDPFromVOLHeader(const uint8_t *data, size_t size) {
+ bool retVal = false;
+ // must contain at least 4 bytes for video_object_layer_start_code + 9 bits of data
+ const size_t kMinHeaderSize = 6;
+
+ if (!data || (size < kMinHeaderSize)) {
+ ALOGV("Invalid VOL header (expected at least %zu bytes)", kMinHeaderSize);
+ return false;
+ }
+
+ ALOGV("Checking for MPEG4 DP bit");
+ ABitReader br(&data[4], (size - 4));
+ br.skipBits(1); // random_accessible_vol
+
+ unsigned videoObjectTypeIndication = br.getBits(8);
+ if (videoObjectTypeIndication == 0x12u) {
+ ALOGW("checkDPFromVOLHeader: videoObjectTypeIndication:%u",
+ videoObjectTypeIndication);
+ return false;
+ }
+
+ unsigned videoObjectLayerVerid = 1;
+ if (br.getBits(1)) {
+ videoObjectLayerVerid = br.getBits(4);
+ br.skipBits(3); // video_object_layer_priority
+ ALOGV("checkDPFromVOLHeader: videoObjectLayerVerid:%u",
+ videoObjectLayerVerid);
+ }
+
+ if (br.getBits(4) == 0x0f) { // aspect_ratio_info
+ ALOGV("checkDPFromVOLHeader: extended PAR");
+ br.skipBits(8); // par_width
+ br.skipBits(8); // par_height
+ }
+
+ if (br.getBits(1)) { // vol_control_parameters
+ br.skipBits(2); // chroma_format
+ br.skipBits(1); // low_delay
+ if (br.getBits(1)) { // vbv_parameters
+ br.skipBits(15); // first_half_bit_rate
+ br.skipBits(1); // marker_bit
+ br.skipBits(15); // latter_half_bit_rate
+ br.skipBits(1); // marker_bit
+ br.skipBits(15); // first_half_vbv_buffer_size
+ br.skipBits(1); // marker_bit
+ br.skipBits(3); // latter_half_vbv_buffer_size
+ br.skipBits(11); // first_half_vbv_occupancy
+ br.skipBits(1); // marker_bit
+ br.skipBits(15); // latter_half_vbv_occupancy
+ br.skipBits(1); // marker_bit
+ }
+ }
+
+ unsigned videoObjectLayerShape = br.getBits(2);
+ if (videoObjectLayerShape != 0x00u /* rectangular */) {
+ ALOGV("checkDPFromVOLHeader: videoObjectLayerShape:%x",
+ videoObjectLayerShape);
+ return false;
+ }
+
+ br.skipBits(1); // marker_bit
+ unsigned vopTimeIncrementResolution = br.getBits(16);
+ br.skipBits(1); // marker_bit
+ if (br.getBits(1)) { // fixed_vop_rate
+ // range [0..vopTimeIncrementResolution)
+
+ // vopTimeIncrementResolution
+ // 2 => 0..1, 1 bit
+ // 3 => 0..2, 2 bits
+ // 4 => 0..3, 2 bits
+ // 5 => 0..4, 3 bits
+ // ...
+
+ if (vopTimeIncrementResolution <= 0u) {
+ return BAD_VALUE;
+ }
+
+ --vopTimeIncrementResolution;
+ unsigned numBits = 0;
+ while (vopTimeIncrementResolution > 0) {
+ ++numBits;
+ vopTimeIncrementResolution >>= 1;
+ }
+
+ br.skipBits(numBits); // fixed_vop_time_increment
+ }
+
+ br.skipBits(1); // marker_bit
+ br.skipBits(13); // video_object_layer_width
+ br.skipBits(1); // marker_bit
+ br.skipBits(13); // video_object_layer_height
+ br.skipBits(1); // marker_bit
+ br.skipBits(1); // interlaced
+ br.skipBits(1); // obmc_disable
+ unsigned spriteEnable = 0;
+ if (videoObjectLayerVerid == 1) {
+ spriteEnable = br.getBits(1);
+ } else {
+ spriteEnable = br.getBits(2);
+ }
+
+ if (spriteEnable == 0x1) { // static
+ int spriteWidth = br.getBits(13);
+ ALOGV("checkDPFromVOLHeader: spriteWidth:%d", spriteWidth);
+ br.skipBits(1) ; // marker_bit
+ br.skipBits(13); // sprite_height
+ br.skipBits(1); // marker_bit
+ br.skipBits(13); // sprite_left_coordinate
+ br.skipBits(1); // marker_bit
+ br.skipBits(13); // sprite_top_coordinate
+ br.skipBits(1); // marker_bit
+ br.skipBits(6); // no_of_sprite_warping_points
+ br.skipBits(2); // sprite_warping_accuracy
+ br.skipBits(1); // sprite_brightness_change
+ br.skipBits(1); // low_latency_sprite_enable
+ } else if (spriteEnable == 0x2) { // GMC
+ br.skipBits(6); // no_of_sprite_warping_points
+ br.skipBits(2); // sprite_warping_accuracy
+ br.skipBits(1); // sprite_brightness_change
+ }
+
+ if (videoObjectLayerVerid != 1
+ && videoObjectLayerShape != 0x0u) {
+ br.skipBits(1);
+ }
+
+ if (br.getBits(1)) { // not_8_bit
+ br.skipBits(4); // quant_precision
+ br.skipBits(4); // bits_per_pixel
+ }
+
+ if (videoObjectLayerShape == 0x3) {
+ br.skipBits(1);
+ br.skipBits(1);
+ br.skipBits(1);
+ }
+
+ if (br.getBits(1)) { // quant_type
+ if (br.getBits(1)) { // load_intra_quant_mat
+ unsigned IntraQuantMat = 1;
+ for (int i = 0; i < 64 && IntraQuantMat; i++) {
+ IntraQuantMat = br.getBits(8);
+ }
+ }
+
+ if (br.getBits(1)) { // load_non_intra_quant_matrix
+ unsigned NonIntraQuantMat = 1;
+ for (int i = 0; i < 64 && NonIntraQuantMat; i++) {
+ NonIntraQuantMat = br.getBits(8);
+ }
+ }
+ } /* quantType */
+
+ if (videoObjectLayerVerid != 1) {
+ unsigned quarterSample = br.getBits(1);
+ ALOGV("checkDPFromVOLHeader: quarterSample:%u",
+ quarterSample);
+ }
+
+ br.skipBits(1); // complexity_estimation_disable
+ br.skipBits(1); // resync_marker_disable
+ unsigned dataPartitioned = br.getBits(1);
+ if (dataPartitioned) {
+ retVal = true;
+ }
+
+ ALOGD("checkDPFromVOLHeader: DP:%u", dataPartitioned);
+ return retVal;
+}
+
} // namespace android
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 5fe9bf9..c17faf3 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -257,8 +257,7 @@ void VideoFrameScheduler::PLL::prime(size_t numSamplesToUse) {
mPhase = firstTime;
}
}
- ALOGV("priming[%zu] phase:%lld period:%lld",
- numSamplesToUse, (long long)mPhase, (long long)mPeriod);
+ ALOGV("priming[%zu] phase:%lld period:%lld ", numSamplesToUse, (long long)mPhase, (long long)mPeriod);
}
nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
@@ -460,14 +459,16 @@ nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
mTimeCorrection -= mVsyncPeriod / 2;
renderTime -= mVsyncPeriod / 2;
nextVsyncTime -= mVsyncPeriod;
- --vsyncsForLastFrame;
+ if (vsyncsForLastFrame > 0)
+ --vsyncsForLastFrame;
} else if (mTimeCorrection < -correctionLimit &&
(vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
// add a VSYNC
mTimeCorrection += mVsyncPeriod / 2;
renderTime += mVsyncPeriod / 2;
nextVsyncTime += mVsyncPeriod;
- ++vsyncsForLastFrame;
+ if (vsyncsForLastFrame < ULONG_MAX)
+ ++vsyncsForLastFrame;
}
ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
}
diff --git a/media/libstagefright/WAVEWriter.cpp b/media/libstagefright/WAVEWriter.cpp
new file mode 100644
index 0000000..2bd4287
--- /dev/null
+++ b/media/libstagefright/WAVEWriter.cpp
@@ -0,0 +1,324 @@
+/*
+* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+* * Neither the name of The Linux Foundation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WAVEWriter"
+#include <utils/Log.h>
+
+#include <media/stagefright/WAVEWriter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/mediarecorder.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <inttypes.h>
+
+namespace android {
+
+static struct wav_header hdr;
+
+
+WAVEWriter::WAVEWriter(const char *filename)
+ : mFd(-1),
+ mInitCheck(NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false) {
+
+ mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (mFd >= 0) {
+ mInitCheck = OK;
+ }
+}
+
+WAVEWriter::WAVEWriter(int fd)
+ : mFd(dup(fd)),
+ mInitCheck(mFd < 0? NO_INIT: OK),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false) {
+}
+
+WAVEWriter::~WAVEWriter() {
+ if (mStarted) {
+ stop();
+ }
+
+ if (mFd != -1) {
+ close(mFd);
+ mFd = -1;
+ }
+}
+
+status_t WAVEWriter::initCheck() const {
+ return mInitCheck;
+}
+
+status_t WAVEWriter::addSource(const sp<MediaSource> &source) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource != NULL) {
+ ALOGE("A source already exists, return");
+ return UNKNOWN_ERROR;
+ }
+
+ sp<MetaData> meta = source->getFormat();
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ int32_t channelCount;
+ int32_t sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+ memset(&hdr, 0, sizeof(struct wav_header));
+ hdr.riff_id = ID_RIFF;
+ hdr.riff_fmt = ID_WAVE;
+ hdr.fmt_id = ID_FMT;
+ hdr.fmt_sz = 16;
+ hdr.audio_format = FORMAT_PCM;
+ hdr.num_channels = channelCount;
+ hdr.sample_rate = sampleRate;
+ hdr.bits_per_sample = 16;
+ hdr.byte_rate = (sampleRate * channelCount * hdr.bits_per_sample) / 8;
+ hdr.block_align = ( hdr.bits_per_sample * channelCount ) / 8;
+ hdr.data_id = ID_DATA;
+ hdr.data_sz = 0;
+ hdr.riff_sz = hdr.data_sz + 44 - 8;
+
+ if (write(mFd, &hdr, sizeof(hdr)) != sizeof(hdr)) {
+ ALOGE("Write header error, return ERROR_IO");
+ return -ERROR_IO;
+ }
+
+ mSource = source;
+
+ return OK;
+}
+
+status_t WAVEWriter::start(MetaData * /* params */) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource == NULL) {
+ ALOGE("NULL Source");
+ return UNKNOWN_ERROR;
+ }
+
+ if (mStarted && mPaused) {
+ mPaused = false;
+ mResumed = true;
+ return OK;
+ } else if (mStarted) {
+ ALOGE("Already startd, return");
+ return OK;
+ }
+
+ status_t err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ mReachedEOS = false;
+ mDone = false;
+
+ pthread_create(&mThread, &attr, ThreadWrapper, this);
+ pthread_attr_destroy(&attr);
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t WAVEWriter::pause() {
+ if (!mStarted) {
+ return OK;
+ }
+ mPaused = true;
+ return OK;
+}
+
+status_t WAVEWriter::stop() {
+ if (!mStarted) {
+ return OK;
+ }
+
+ mDone = true;
+
+ void *dummy;
+ pthread_join(mThread, &dummy);
+
+ status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+ {
+ status_t status = mSource->stop();
+ if (err == OK &&
+ (status != OK && status != ERROR_END_OF_STREAM)) {
+ err = status;
+ }
+ }
+
+ mStarted = false;
+ return err;
+}
+
+bool WAVEWriter::exceedsFileSizeLimit() {
+ if (mMaxFileSizeLimitBytes == 0) {
+ return false;
+ }
+ return mEstimatedSizeBytes >= mMaxFileSizeLimitBytes;
+}
+
+bool WAVEWriter::exceedsFileDurationLimit() {
+ if (mMaxFileDurationLimitUs == 0) {
+ return false;
+ }
+ return mEstimatedDurationUs >= mMaxFileDurationLimitUs;
+}
+
+// static
+void *WAVEWriter::ThreadWrapper(void *me) {
+ return (void *) (uintptr_t)static_cast<WAVEWriter *>(me)->threadFunc();
+}
+
+status_t WAVEWriter::threadFunc() {
+ mEstimatedDurationUs = 0;
+ mEstimatedSizeBytes = 0;
+ bool stoppedPrematurely = true;
+ int64_t previousPausedDurationUs = 0;
+ int64_t maxTimestampUs = 0;
+ status_t err = OK;
+
+ prctl(PR_SET_NAME, (unsigned long)"WAVEWriter", 0, 0, 0);
+ hdr.data_sz = 0;
+ while (!mDone) {
+ MediaBuffer *buffer;
+ err = mSource->read(&buffer);
+
+ if (err != OK) {
+ break;
+ }
+
+ if (mPaused) {
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+
+ mEstimatedSizeBytes += buffer->range_length();
+ if (exceedsFileSizeLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ break;
+ }
+
+ int64_t timestampUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timestampUs));
+ if (timestampUs > mEstimatedDurationUs) {
+ mEstimatedDurationUs = timestampUs;
+ }
+ if (mResumed) {
+ previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+ mResumed = false;
+ }
+ timestampUs -= previousPausedDurationUs;
+ ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64,
+ timestampUs, previousPausedDurationUs);
+ if (timestampUs > maxTimestampUs) {
+ maxTimestampUs = timestampUs;
+ }
+
+ if (exceedsFileDurationLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+ break;
+ }
+ ssize_t n = write(mFd,
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+
+ hdr.data_sz += (ssize_t)buffer->range_length();
+ hdr.riff_sz = hdr.data_sz + 44 - 8;
+
+ if (n < (ssize_t)buffer->range_length()) {
+ buffer->release();
+ buffer = NULL;
+
+ break;
+ }
+
+ if (stoppedPrematurely) {
+ stoppedPrematurely = false;
+ }
+
+ buffer->release();
+ buffer = NULL;
+ }
+
+ if (stoppedPrematurely) {
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS, UNKNOWN_ERROR);
+ }
+
+ lseek(mFd, 0, SEEK_SET);
+ write(mFd, &hdr, sizeof(hdr));
+ lseek(mFd, 0, SEEK_END);
+
+ close(mFd);
+ mFd = -1;
+ mReachedEOS = true;
+ if (err == ERROR_END_OF_STREAM) {
+ return OK;
+ }
+ return err;
+}
+
+bool WAVEWriter::reachedEOS() {
+ return mReachedEOS;
+}
+
+} // namespace android
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 335ac84..62bb416 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -29,6 +29,7 @@
#include <media/stagefright/MetaData.h>
#include <utils/String8.h>
#include <cutils/bitops.h>
+#include <system/audio.h>
#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
@@ -193,15 +194,17 @@ status_t WAVExtractor::init() {
}
mNumChannels = U16_LE_AT(&formatSpec[2]);
+
+ if (mNumChannels < 1 || mNumChannels > 8) {
+ ALOGE("Unsupported number of channels (%d)", mNumChannels);
+ return ERROR_UNSUPPORTED;
+ }
+
if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
if (mNumChannels != 1 && mNumChannels != 2) {
ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
mNumChannels);
}
- } else {
- if (mNumChannels < 1 && mNumChannels > 8) {
- return ERROR_UNSUPPORTED;
- }
}
mSampleRate = U32_LE_AT(&formatSpec[4]);
@@ -284,6 +287,7 @@ status_t WAVExtractor::init() {
case WAVE_FORMAT_PCM:
mTrackMeta->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ mTrackMeta->setInt32(kKeyBitsPerSample, mBitsPerSample);
break;
case WAVE_FORMAT_ALAW:
mTrackMeta->setCString(
@@ -311,9 +315,17 @@ status_t WAVExtractor::init() {
1000000LL * (mDataSize / 65 * 320) / 8000;
} else {
size_t bytesPerSample = mBitsPerSample >> 3;
+
+ if (!bytesPerSample || !mNumChannels)
+ return ERROR_MALFORMED;
+
+ size_t num_samples = mDataSize / (mNumChannels * bytesPerSample);
+
+ if (!mSampleRate)
+ return ERROR_MALFORMED;
+
durationUs =
- 1000000LL * (mDataSize / (mNumChannels * bytesPerSample))
- / mSampleRate;
+ 1000000LL * num_samples / mSampleRate;
}
mTrackMeta->setInt64(kKeyDuration, durationUs);
@@ -359,15 +371,16 @@ WAVSource::~WAVSource() {
}
status_t WAVSource::start(MetaData * /* params */) {
- ALOGV("WAVSource::start");
- CHECK(!mStarted);
+ if (mStarted) {
+ return OK;
+ }
mGroup = new MediaBufferGroup;
mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
- if (mBitsPerSample == 8) {
- // As a temporary buffer for 8->16 bit conversion.
+ if (mBitsPerSample == 8 || mBitsPerSample == 24) {
+ // As a temporary buffer for 8->16/24->32 bit conversion.
mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
}
@@ -427,9 +440,15 @@ status_t WAVSource::read(
}
// make sure that maxBytesToRead is multiple of 3, in 24-bit case
- size_t maxBytesToRead =
- mBitsPerSample == 8 ? kMaxFrameSize / 2 :
- (mBitsPerSample == 24 ? 3*(kMaxFrameSize/3): kMaxFrameSize);
+ size_t maxBytesToRead;
+ if(8 == mBitsPerSample)
+ maxBytesToRead = kMaxFrameSize / 2;
+ else if (24 == mBitsPerSample) {
+ maxBytesToRead = 3*(kMaxFrameSize/4);
+ } else
+ maxBytesToRead = kMaxFrameSize;
+ ALOGV("%s mBitsPerSample %d, kMaxFrameSize %zu, ",
+ __func__, mBitsPerSample, kMaxFrameSize);
size_t maxBytesAvailable =
(mCurrentPos - mOffset >= (off64_t)mSize)
@@ -488,23 +507,24 @@ status_t WAVSource::read(
buffer->release();
buffer = tmp;
} else if (mBitsPerSample == 24) {
- // Convert 24-bit signed samples to 16-bit signed.
-
- const uint8_t *src =
- (const uint8_t *)buffer->data() + buffer->range_offset();
- int16_t *dst = (int16_t *)src;
-
- size_t numSamples = buffer->range_length() / 3;
- for (size_t i = 0; i < numSamples; ++i) {
- int32_t x = (int32_t)(src[0] | src[1] << 8 | src[2] << 16);
- x = (x << 8) >> 8; // sign extension
-
- x = x >> 8;
- *dst++ = (int16_t)x;
- src += 3;
+ // Padding done here to convert to 32-bit samples
+ MediaBuffer *tmp;
+ CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
+ ssize_t numBytes = buffer->range_length() / 3;
+ tmp->set_range(0, 4 * numBytes);
+ int8_t *dst = (int8_t *)tmp->data();
+ const uint8_t *src = (const uint8_t *)buffer->data();
+ ALOGV("numBytes = %zd", numBytes);
+ while(numBytes-- > 0) {
+ *dst++ = 0x0;
+ *dst++ = src[0];
+ *dst++ = src[1];
+ *dst++ = src[2];
+ src += 3;
}
-
- buffer->set_range(buffer->range_offset(), 2 * numSamples);
+ buffer->release();
+ buffer = tmp;
+ ALOGV("length = %zu", buffer->range_length());
}
}
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 8ef2dca..98b5c0e 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -25,6 +25,7 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
#include <utils/misc.h>
@@ -393,7 +394,7 @@ sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit) {
meta->setInt32(kKeyWidth, width);
meta->setInt32(kKeyHeight, height);
- if (sarWidth > 1 || sarHeight > 1) {
+ if (sarWidth > 1 && sarHeight > 1) {
// We treat 0:0 (unspecified) as 1:1.
meta->setInt32(kKeySARWidth, sarWidth);
@@ -443,25 +444,34 @@ bool IsIDR(const sp<ABuffer> &buffer) {
}
bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit) {
- const uint8_t *data = accessUnit->data();
+ MediaBuffer *mediaBuffer =
+ (MediaBuffer *)(accessUnit->getMediaBufferBase());
+ const uint8_t *data =
+ (mediaBuffer != NULL) ? (uint8_t *) mediaBuffer->data() : accessUnit->data();
size_t size = accessUnit->size();
const uint8_t *nalStart;
size_t nalSize;
+ bool bIsReferenceFrame = true;
while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
CHECK_GT(nalSize, 0u);
unsigned nalType = nalStart[0] & 0x1f;
if (nalType == 5) {
- return true;
+ bIsReferenceFrame = true;
+ break;
} else if (nalType == 1) {
unsigned nal_ref_idc = (nalStart[0] >> 5) & 3;
- return nal_ref_idc != 0;
+ bIsReferenceFrame = (nal_ref_idc != 0);
+ break;
}
}
- return true;
+ if (mediaBuffer != NULL) {
+ mediaBuffer->release();
+ }
+ return bIsReferenceFrame;
}
sp<MetaData> MakeAACCodecSpecificData(
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 8ddff90..0d3151d 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -68,6 +68,8 @@ SoftAAC2::SoftAAC2(
mOutputBufferCount(0),
mSignalledError(false),
mLastInHeader(NULL),
+ mLastHeaderTimeUs(-1),
+ mNextOutBufferTimeUs(0),
mOutputPortSettingsChange(NONE) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
@@ -517,6 +519,27 @@ int32_t SoftAAC2::outputDelayRingBufferSpaceLeft() {
return mOutputDelayRingBufferSize - outputDelayRingBufferSamplesAvailable();
}
+void SoftAAC2::updateTimeStamp(int64_t inHeaderTimeUs) {
+ // use new input buffer timestamp as Anchor Time if its
+ // a) first buffer or
+ // b) first buffer post seek or
+ // c) different from last buffer timestamp
+ //If input buffer timestamp is same as last input buffer timestamp then
+ //treat this as a erroneous timestamp and ignore new input buffer
+ //timestamp and use last output buffer timestamp as Anchor Time.
+ int64_t anchorTimeUs = 0;
+ if ((mLastHeaderTimeUs != inHeaderTimeUs)) {
+ anchorTimeUs = inHeaderTimeUs;
+ mLastHeaderTimeUs = inHeaderTimeUs;
+ //Store current buffer's timestamp so that it can used as reference
+ //in cases where first frame/buffer is skipped/dropped.
+ //e.g to compensate decoder delay
+ mNextOutBufferTimeUs = inHeaderTimeUs;
+ } else {
+ anchorTimeUs = mNextOutBufferTimeUs;
+ }
+ mBufferTimestamps.add(anchorTimeUs);
+}
void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError || mOutputPortSettingsChange != NONE) {
@@ -646,7 +669,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
// insert buffer size and time stamp
mBufferSizes.add(inBufferLength[0]);
if (mLastInHeader != inHeader) {
- mBufferTimestamps.add(inHeader->nTimeStamp);
+ updateTimeStamp(inHeader->nTimeStamp);
mLastInHeader = inHeader;
} else {
int64_t currentTime = mBufferTimestamps.top();
@@ -658,7 +681,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
inBufferLength[0] = inHeader->nFilledLen;
mLastInHeader = inHeader;
- mBufferTimestamps.add(inHeader->nTimeStamp);
+ updateTimeStamp(inHeader->nTimeStamp);
mBufferSizes.add(inHeader->nFilledLen);
}
@@ -783,6 +806,14 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
if (inHeader && inHeader->nFilledLen == 0) {
inInfo->mOwnedByUs = false;
mInputBufferCount++;
+
+ //During Port reconfiguration current frames is skipped and next frame
+ //is sent for decoding.
+ //Update mNextOutBufferTimeUs with current frame's timestamp if port reconfiguration is
+ //happening in last frame of current buffer otherwise LastOutBufferTimeUs
+ //will be zero(post seek).
+ mNextOutBufferTimeUs = mBufferTimestamps.top() + mStreamInfo->aacSamplesPerFrame *
+ 1000000ll / mStreamInfo->aacSampleRate;
inQueue.erase(inQueue.begin());
mLastInHeader = NULL;
inInfo = NULL;
@@ -903,6 +934,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
*currentBufLeft -= decodedSize;
*nextTimeStamp += mStreamInfo->aacSamplesPerFrame *
1000000ll / mStreamInfo->aacSampleRate;
+ mNextOutBufferTimeUs = *nextTimeStamp;
ALOGV("adjusted nextTimeStamp/size to %lld/%d",
(long long) *nextTimeStamp, *currentBufLeft);
} else {
@@ -910,6 +942,7 @@ void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
if (mBufferTimestamps.size() > 0) {
mBufferTimestamps.removeAt(0);
nextTimeStamp = &mBufferTimestamps.editItemAt(0);
+ mNextOutBufferTimeUs = *nextTimeStamp;
mBufferSizes.removeAt(0);
currentBufLeft = &mBufferSizes.editItemAt(0);
ALOGV("moved to next time/size: %lld/%d",
@@ -1004,6 +1037,8 @@ void SoftAAC2::onPortFlushCompleted(OMX_U32 portIndex) {
mDecodedSizes.clear();
mLastInHeader = NULL;
mEndOfInput = false;
+ mLastHeaderTimeUs = -1;
+ mNextOutBufferTimeUs = 0;
} else {
int avail;
while ((avail = outputDelayRingBufferSamplesAvailable()) > 0) {
@@ -1066,6 +1101,8 @@ void SoftAAC2::onReset() {
mBufferSizes.clear();
mDecodedSizes.clear();
mLastInHeader = NULL;
+ mLastHeaderTimeUs = -1;
+ mNextOutBufferTimeUs = 0;
// To make the codec behave the same before and after a reset, we need to invalidate the
// streaminfo struct. This does that:
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index c3e4459..3fe958e 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -59,6 +59,8 @@ private:
size_t mOutputBufferCount;
bool mSignalledError;
OMX_BUFFERHEADERTYPE *mLastInHeader;
+ int64_t mLastHeaderTimeUs;
+ int64_t mNextOutBufferTimeUs;
Vector<int32_t> mBufferSizes;
Vector<int32_t> mDecodedSizes;
Vector<int64_t> mBufferTimestamps;
@@ -90,6 +92,7 @@ private:
int32_t outputDelayRingBufferGetSamples(INT_PCM *samples, int numSamples);
int32_t outputDelayRingBufferSamplesAvailable();
int32_t outputDelayRingBufferSpaceLeft();
+ void updateTimeStamp(int64_t inHeaderTimesUs);
DISALLOW_EVIL_CONSTRUCTORS(SoftAAC2);
};
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 58ec3ba..530f6a7 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -1,6 +1,5 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
-include frameworks/av/media/libstagefright/codecs/common/Config.mk
AAC_LIBRARY = fraunhofer
@@ -35,24 +34,28 @@ LOCAL_SRC_FILES += \
src/transform.c \
src/memalign.c
-ifeq ($(VOTT), v5)
-LOCAL_SRC_FILES += \
- src/asm/ARMV5E/AutoCorrelation_v5.s \
- src/asm/ARMV5E/band_nrg_v5.s \
- src/asm/ARMV5E/CalcWindowEnergy_v5.s \
- src/asm/ARMV5E/PrePostMDCT_v5.s \
- src/asm/ARMV5E/R4R8First_v5.s \
- src/asm/ARMV5E/Radix4FFT_v5.s
-endif
-
-ifeq ($(VOTT), v7)
-LOCAL_SRC_FILES += \
- src/asm/ARMV5E/AutoCorrelation_v5.s \
- src/asm/ARMV5E/band_nrg_v5.s \
- src/asm/ARMV5E/CalcWindowEnergy_v5.s \
- src/asm/ARMV7/PrePostMDCT_v7.s \
- src/asm/ARMV7/R4R8First_v7.s \
- src/asm/ARMV7/Radix4FFT_v7.s
+ifneq ($(ARCH_ARM_HAVE_NEON),true)
+ LOCAL_SRC_FILES_arm := \
+ src/asm/ARMV5E/AutoCorrelation_v5.s \
+ src/asm/ARMV5E/band_nrg_v5.s \
+ src/asm/ARMV5E/CalcWindowEnergy_v5.s \
+ src/asm/ARMV5E/PrePostMDCT_v5.s \
+ src/asm/ARMV5E/R4R8First_v5.s \
+ src/asm/ARMV5E/Radix4FFT_v5.s
+
+ LOCAL_CFLAGS_arm := -DARMV5E -DARM_INASM -DARMV5_INASM
+ LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/src/asm/ARMV5E
+else
+ LOCAL_SRC_FILES_arm := \
+ src/asm/ARMV5E/AutoCorrelation_v5.s \
+ src/asm/ARMV5E/band_nrg_v5.s \
+ src/asm/ARMV5E/CalcWindowEnergy_v5.s \
+ src/asm/ARMV7/PrePostMDCT_v7.s \
+ src/asm/ARMV7/R4R8First_v7.s \
+ src/asm/ARMV7/Radix4FFT_v7.s
+ LOCAL_CFLAGS_arm := -DARMV5E -DARMV7Neon -DARM_INASM -DARMV5_INASM -DARMV6_INASM
+ LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/src/asm/ARMV5E
+ LOCAL_C_INCLUDES_arm += $(LOCAL_PATH)/src/asm/ARMV7
endif
LOCAL_MODULE := libstagefright_aacenc
@@ -71,17 +74,6 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/inc \
$(LOCAL_PATH)/basic_op
-ifeq ($(VOTT), v5)
-LOCAL_CFLAGS += -DARMV5E -DARM_INASM -DARMV5_INASM
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-endif
-
-ifeq ($(VOTT), v7)
-LOCAL_CFLAGS += -DARMV5E -DARMV7Neon -DARM_INASM -DARMV5_INASM -DARMV6_INASM
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
-endif
-
LOCAL_CFLAGS += -Werror
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/codecs/amrnb/common/Android.mk b/media/libstagefright/codecs/amrnb/common/Android.mk
index 5e632a6..80b67bb 100644
--- a/media/libstagefright/codecs/amrnb/common/Android.mk
+++ b/media/libstagefright/codecs/amrnb/common/Android.mk
@@ -7,7 +7,6 @@ LOCAL_SRC_FILES := \
src/bitno_tab.cpp \
src/bitreorder_tab.cpp \
src/bits2prm.cpp \
- src/bytesused.cpp \
src/c2_9pf_tab.cpp \
src/copy.cpp \
src/div_32.cpp \
@@ -38,7 +37,6 @@ LOCAL_SRC_FILES := \
src/mult_r.cpp \
src/norm_l.cpp \
src/norm_s.cpp \
- src/overflow_tbl.cpp \
src/ph_disp_tab.cpp \
src/pow2.cpp \
src/pow2_tbl.cpp \
diff --git a/media/libstagefright/codecs/amrnb/common/include/bytesused.h b/media/libstagefright/codecs/amrnb/common/include/bytesused.h
deleted file mode 100644
index 934efbe..0000000
--- a/media/libstagefright/codecs/amrnb/common/include/bytesused.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/****************************************************************************************
-Portions of this file are derived from the following 3GPP standard:
-
- 3GPP TS 26.073
- ANSI-C code for the Adaptive Multi-Rate (AMR) speech codec
- Available from http://www.3gpp.org
-
-(C) 2004, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
-Permission to distribute, modify and use this file under the standard license
-terms listed above has been obtained from the copyright holder.
-****************************************************************************************/
-/*
-
- Pathname: .audio/gsm-amr/c/include/BytesUsed.h
-
-------------------------------------------------------------------------------
- REVISION HISTORY
-
- Description: Added #ifdef __cplusplus after Include section.
-
- Who: Date:
- Description:
-
-------------------------------------------------------------------------------
- INCLUDE DESCRIPTION
-
- This file declares a table BytesUsed.
-
-------------------------------------------------------------------------------
-*/
-
-/*----------------------------------------------------------------------------
-; CONTINUE ONLY IF NOT ALREADY DEFINED
-----------------------------------------------------------------------------*/
-#ifndef BYTESUSED_H
-#define BYTESUSED_H
-
-/*----------------------------------------------------------------------------
-; INCLUDES
-----------------------------------------------------------------------------*/
-
-/*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
- /*----------------------------------------------------------------------------
- ; MACROS
- ; Define module specific macros here
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; DEFINES
- ; Include all pre-processor statements here.
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; EXTERNAL VARIABLES REFERENCES
- ; Declare variables used in this module but defined elsewhere
- ----------------------------------------------------------------------------*/
- extern const short BytesUsed[];
-
- /*----------------------------------------------------------------------------
- ; SIMPLE TYPEDEF'S
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; ENUMERATED TYPEDEF'S
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; STRUCTURES TYPEDEF'S
- ----------------------------------------------------------------------------*/
-
-
- /*----------------------------------------------------------------------------
- ; GLOBAL FUNCTION DEFINITIONS
- ; Function Prototype declaration
- ----------------------------------------------------------------------------*/
-
-
- /*----------------------------------------------------------------------------
- ; END
- ----------------------------------------------------------------------------*/
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
diff --git a/media/libstagefright/codecs/amrnb/common/src/bytesused.cpp b/media/libstagefright/codecs/amrnb/common/src/bytesused.cpp
deleted file mode 100644
index b61bac4..0000000
--- a/media/libstagefright/codecs/amrnb/common/src/bytesused.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/****************************************************************************************
-Portions of this file are derived from the following 3GPP standard:
-
- 3GPP TS 26.073
- ANSI-C code for the Adaptive Multi-Rate (AMR) speech codec
- Available from http://www.3gpp.org
-
-(C) 2004, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
-Permission to distribute, modify and use this file under the standard license
-terms listed above has been obtained from the copyright holder.
-****************************************************************************************/
-/*
-
- Pathname: ./audio/gsm-amr/c/src/BytesUsed.c
-
-------------------------------------------------------------------------------
- REVISION HISTORY
-
- Description: Corrected entries for all SID frames and updated function
- description. Updated copyright year.
-
- Description: Added #ifdef __cplusplus and removed "extern" from table
- definition. Removed corresponding header file from Include
- section.
-
- Description: Put "extern" back.
-
- Who: Date:
- Description:
-
-------------------------------------------------------------------------------
- INPUT AND OUTPUT DEFINITIONS
-
- Inputs:
- None
-
- Local Stores/Buffers/Pointers Needed:
- None
-
- Global Stores/Buffers/Pointers Needed:
- None
-
- Outputs:
- None
-
- Pointers and Buffers Modified:
- None
-
- Local Stores Modified:
- None
-
- Global Stores Modified:
- None
-
-------------------------------------------------------------------------------
- FUNCTION DESCRIPTION
-
- This function creates a table called BytesUsed that holds the value that
- describes the number of bytes required to hold one frame worth of data in
- the WMF (non-IF2) frame format. Each table entry is the sum of the frame
- type byte and the number of bytes used up by the core speech data for each
- 3GPP frame type.
-
-------------------------------------------------------------------------------
- REQUIREMENTS
-
- None
-
-------------------------------------------------------------------------------
- REFERENCES
-
- [1] "AMR Speech Codec Frame Structure", 3GPP TS 26.101 version 4.1.0
- Release 4, June 2001, page 13.
-
-------------------------------------------------------------------------------
- PSEUDO-CODE
-
-
-------------------------------------------------------------------------------
- RESOURCES USED
- When the code is written for a specific target processor the
- the resources used should be documented below.
-
- STACK USAGE: [stack count for this module] + [variable to represent
- stack usage for each subroutine called]
-
- where: [stack usage variable] = stack usage for [subroutine
- name] (see [filename].ext)
-
- DATA MEMORY USED: x words
-
- PROGRAM MEMORY USED: x words
-
- CLOCK CYCLES: [cycle count equation for this module] + [variable
- used to represent cycle count for each subroutine
- called]
-
- where: [cycle count variable] = cycle count for [subroutine
- name] (see [filename].ext)
-
-------------------------------------------------------------------------------
-*/
-
-
-/*----------------------------------------------------------------------------
-; INCLUDES
-----------------------------------------------------------------------------*/
-#include "typedef.h"
-
-/*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
- /*----------------------------------------------------------------------------
- ; MACROS
- ; Define module specific macros here
- ----------------------------------------------------------------------------*/
-
-
- /*----------------------------------------------------------------------------
- ; DEFINES
- ; Include all pre-processor statements here. Include conditional
- ; compile variables also.
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; LOCAL FUNCTION DEFINITIONS
- ; Function Prototype declaration
- ----------------------------------------------------------------------------*/
-
-
- /*----------------------------------------------------------------------------
- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS
- ; Variable declaration - defined here and used outside this module
- ----------------------------------------------------------------------------*/
- const short BytesUsed[16] =
- {
- 13, /* 4.75 */
- 14, /* 5.15 */
- 16, /* 5.90 */
- 18, /* 6.70 */
- 20, /* 7.40 */
- 21, /* 7.95 */
- 27, /* 10.2 */
- 32, /* 12.2 */
- 6, /* GsmAmr comfort noise */
- 7, /* Gsm-Efr comfort noise */
- 6, /* IS-641 comfort noise */
- 6, /* Pdc-Efr comfort noise */
- 0, /* future use */
- 0, /* future use */
- 0, /* future use */
- 1 /* No transmission */
- };
- /*----------------------------------------------------------------------------
- ; EXTERNAL FUNCTION REFERENCES
- ; Declare functions defined elsewhere and referenced in this module
- ----------------------------------------------------------------------------*/
-
-
- /*----------------------------------------------------------------------------
- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES
- ; Declare variables used in this module but defined elsewhere
- ----------------------------------------------------------------------------*/
-
-
- /*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-}
-#endif
-
-/*----------------------------------------------------------------------------
-; FUNCTION CODE
-----------------------------------------------------------------------------*/
-
-/*----------------------------------------------------------------------------
-; Define all local variables
-----------------------------------------------------------------------------*/
-
-
-/*----------------------------------------------------------------------------
-; Function body here
-----------------------------------------------------------------------------*/
-
-
-/*----------------------------------------------------------------------------
-; Return nothing or data or data pointer
-----------------------------------------------------------------------------*/
-
diff --git a/media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp b/media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp
deleted file mode 100644
index c4a016d..0000000
--- a/media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp
+++ /dev/null
@@ -1,174 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/****************************************************************************************
-Portions of this file are derived from the following 3GPP standard:
-
- 3GPP TS 26.073
- ANSI-C code for the Adaptive Multi-Rate (AMR) speech codec
- Available from http://www.3gpp.org
-
-(C) 2004, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
-Permission to distribute, modify and use this file under the standard license
-terms listed above has been obtained from the copyright holder.
-****************************************************************************************/
-/*
-
- Filename: /audio/gsm_amr/c/src/overflow_tbl.c
-
-------------------------------------------------------------------------------
- REVISION HISTORY
-
- Description: Added #ifdef __cplusplus and removed "extern" from table
- definition.
-
- Description: Put "extern" back.
-
- Who: Date:
- Description:
-
-------------------------------------------------------------------------------
- MODULE DESCRIPTION
-
- This file contains the declaration for overflow_tbl[] used by the l_shl()
- and l_shr() functions.
-
-------------------------------------------------------------------------------
-*/
-
-/*----------------------------------------------------------------------------
-; INCLUDES
-----------------------------------------------------------------------------*/
-#include "typedef.h"
-
-/*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
- /*----------------------------------------------------------------------------
- ; MACROS
- ; [Define module specific macros here]
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; DEFINES
- ; [Include all pre-processor statements here. Include conditional
- ; compile variables also.]
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; LOCAL FUNCTION DEFINITIONS
- ; [List function prototypes here]
- ----------------------------------------------------------------------------*/
-
- /*----------------------------------------------------------------------------
- ; LOCAL VARIABLE DEFINITIONS
- ; [Variable declaration - defined here and used outside this module]
- ----------------------------------------------------------------------------*/
- const Word32 overflow_tbl [32] = {0x7fffffffL, 0x3fffffffL,
- 0x1fffffffL, 0x0fffffffL,
- 0x07ffffffL, 0x03ffffffL,
- 0x01ffffffL, 0x00ffffffL,
- 0x007fffffL, 0x003fffffL,
- 0x001fffffL, 0x000fffffL,
- 0x0007ffffL, 0x0003ffffL,
- 0x0001ffffL, 0x0000ffffL,
- 0x00007fffL, 0x00003fffL,
- 0x00001fffL, 0x00000fffL,
- 0x000007ffL, 0x000003ffL,
- 0x000001ffL, 0x000000ffL,
- 0x0000007fL, 0x0000003fL,
- 0x0000001fL, 0x0000000fL,
- 0x00000007L, 0x00000003L,
- 0x00000001L, 0x00000000L
- };
-
- /*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-}
-#endif
-
-/*
-------------------------------------------------------------------------------
- FUNCTION NAME:
-------------------------------------------------------------------------------
- INPUT AND OUTPUT DEFINITIONS
-
- Inputs:
- None
-
- Outputs:
- None
-
- Returns:
- None
-
- Global Variables Used:
- None
-
- Local Variables Needed:
- None
-
-------------------------------------------------------------------------------
- FUNCTION DESCRIPTION
-
- None
-
-------------------------------------------------------------------------------
- REQUIREMENTS
-
- None
-
-------------------------------------------------------------------------------
- REFERENCES
-
- [1] l_shl() function in basic_op2.c, UMTS GSM AMR speech codec, R99 -
- Version 3.2.0, March 2, 2001
-
-------------------------------------------------------------------------------
- PSEUDO-CODE
-
-
-------------------------------------------------------------------------------
- RESOURCES USED [optional]
-
- When the code is written for a specific target processor the
- the resources used should be documented below.
-
- HEAP MEMORY USED: x bytes
-
- STACK MEMORY USED: x bytes
-
- CLOCK CYCLES: (cycle count equation for this function) + (variable
- used to represent cycle count for each subroutine
- called)
- where: (cycle count variable) = cycle count for [subroutine
- name]
-
-------------------------------------------------------------------------------
- CAUTION [optional]
- [State any special notes, constraints or cautions for users of this function]
-
-------------------------------------------------------------------------------
-*/
-
-/*----------------------------------------------------------------------------
-; FUNCTION CODE
-----------------------------------------------------------------------------*/
-
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 76a7f40..21109d9 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -80,6 +80,7 @@ LOCAL_SHARED_LIBRARIES := \
libstagefright_amrnb_common
LOCAL_MODULE := libstagefright_soft_amrdec
+LOCAL_CLANG := false
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index 024a292..8ded6df 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -1,8 +1,5 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
-include frameworks/av/media/libstagefright/codecs/common/Config.mk
-
-
LOCAL_SRC_FILES := \
src/autocorr.c \
@@ -53,42 +50,41 @@ LOCAL_SRC_FILES := \
src/weight_a.c \
src/mem_align.c
-
-ifeq ($(VOTT), v5)
-LOCAL_SRC_FILES += \
- src/asm/ARMV5E/convolve_opt.s \
- src/asm/ARMV5E/cor_h_vec_opt.s \
- src/asm/ARMV5E/Deemph_32_opt.s \
- src/asm/ARMV5E/Dot_p_opt.s \
- src/asm/ARMV5E/Filt_6k_7k_opt.s \
- src/asm/ARMV5E/Norm_Corr_opt.s \
- src/asm/ARMV5E/pred_lt4_1_opt.s \
- src/asm/ARMV5E/residu_asm_opt.s \
- src/asm/ARMV5E/scale_sig_opt.s \
- src/asm/ARMV5E/Syn_filt_32_opt.s \
- src/asm/ARMV5E/syn_filt_opt.s
-
-endif
-
-ifeq ($(VOTT), v7)
-LOCAL_SRC_FILES += \
- src/asm/ARMV7/convolve_neon.s \
- src/asm/ARMV7/cor_h_vec_neon.s \
- src/asm/ARMV7/Deemph_32_neon.s \
- src/asm/ARMV7/Dot_p_neon.s \
- src/asm/ARMV7/Filt_6k_7k_neon.s \
- src/asm/ARMV7/Norm_Corr_neon.s \
- src/asm/ARMV7/pred_lt4_1_neon.s \
- src/asm/ARMV7/residu_asm_neon.s \
- src/asm/ARMV7/scale_sig_neon.s \
- src/asm/ARMV7/Syn_filt_32_neon.s \
- src/asm/ARMV7/syn_filt_neon.s
-
+ifneq ($(ARCH_ARM_HAVE_NEON),true)
+ LOCAL_SRC_FILES_arm := \
+ src/asm/ARMV5E/convolve_opt.s \
+ src/asm/ARMV5E/cor_h_vec_opt.s \
+ src/asm/ARMV5E/Deemph_32_opt.s \
+ src/asm/ARMV5E/Dot_p_opt.s \
+ src/asm/ARMV5E/Filt_6k_7k_opt.s \
+ src/asm/ARMV5E/Norm_Corr_opt.s \
+ src/asm/ARMV5E/pred_lt4_1_opt.s \
+ src/asm/ARMV5E/residu_asm_opt.s \
+ src/asm/ARMV5E/scale_sig_opt.s \
+ src/asm/ARMV5E/Syn_filt_32_opt.s \
+ src/asm/ARMV5E/syn_filt_opt.s
+
+ LOCAL_CFLAGS_arm := -DARM -DASM_OPT
+ LOCAL_C_INCLUDES_arm = $(LOCAL_PATH)/src/asm/ARMV5E
+else
+ LOCAL_SRC_FILES_arm := \
+ src/asm/ARMV7/convolve_neon.s \
+ src/asm/ARMV7/cor_h_vec_neon.s \
+ src/asm/ARMV7/Deemph_32_neon.s \
+ src/asm/ARMV7/Dot_p_neon.s \
+ src/asm/ARMV7/Filt_6k_7k_neon.s \
+ src/asm/ARMV7/Norm_Corr_neon.s \
+ src/asm/ARMV7/pred_lt4_1_neon.s \
+ src/asm/ARMV7/residu_asm_neon.s \
+ src/asm/ARMV7/scale_sig_neon.s \
+ src/asm/ARMV7/Syn_filt_32_neon.s \
+ src/asm/ARMV7/syn_filt_neon.s
+
+ LOCAL_CFLAGS_arm := -DARM -DARMV7 -DASM_OPT
+ LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/src/asm/ARMV5E
+ LOCAL_C_INCLUDES_arm += $(LOCAL_PATH)/src/asm/ARMV7
endif
-# ARMV5E/Filt_6k_7k_opt.s does not compile with Clang.
-LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
-
LOCAL_MODULE := libstagefright_amrwbenc
LOCAL_ARM_MODE := arm
@@ -104,18 +100,9 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/src \
$(LOCAL_PATH)/inc
-ifeq ($(VOTT), v5)
-LOCAL_CFLAGS += -DARM -DASM_OPT
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-endif
-
-ifeq ($(VOTT), v7)
-LOCAL_CFLAGS += -DARM -DARMV7 -DASM_OPT
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
-endif
-
LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#LOCAL_SANITIZE := signed-integer-overflow
include $(BUILD_STATIC_LIBRARY)
@@ -132,6 +119,8 @@ LOCAL_C_INCLUDES := \
frameworks/native/include/media/openmax
LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#LOCAL_SANITIZE := signed-integer-overflow
LOCAL_STATIC_LIBRARIES := \
libstagefright_amrwbenc
diff --git a/media/libstagefright/codecs/amrwbenc/inc/acelp.h b/media/libstagefright/codecs/amrwbenc/inc/acelp.h
index 5a1e536..97555d5 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/acelp.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/acelp.h
@@ -18,7 +18,7 @@
/*--------------------------------------------------------------------------*
* ACELP.H *
*--------------------------------------------------------------------------*
- * Function *
+ * Function *
*--------------------------------------------------------------------------*/
#ifndef __ACELP_H__
#define __ACELP_H__
@@ -33,68 +33,68 @@
Word16 median5(Word16 x[]);
void Autocorr(
- Word16 x[], /* (i) : Input signal */
- Word16 m, /* (i) : LPC order */
- Word16 r_h[], /* (o) : Autocorrelations (msb) */
- Word16 r_l[] /* (o) : Autocorrelations (lsb) */
- );
+ Word16 x[], /* (i) : Input signal */
+ Word16 m, /* (i) : LPC order */
+ Word16 r_h[], /* (o) : Autocorrelations (msb) */
+ Word16 r_l[] /* (o) : Autocorrelations (lsb) */
+ );
void Lag_window(
- Word16 r_h[], /* (i/o) : Autocorrelations (msb) */
- Word16 r_l[] /* (i/o) : Autocorrelations (lsb) */
- );
+ Word16 r_h[], /* (i/o) : Autocorrelations (msb) */
+ Word16 r_l[] /* (i/o) : Autocorrelations (lsb) */
+ );
void Init_Levinson(
- Word16 * mem /* output :static memory (18 words) */
- );
+ Word16 * mem /* output :static memory (18 words) */
+ );
void Levinson(
- Word16 Rh[], /* (i) : Rh[M+1] Vector of autocorrelations (msb) */
- Word16 Rl[], /* (i) : Rl[M+1] Vector of autocorrelations (lsb) */
- Word16 A[], /* (o) Q12 : A[M] LPC coefficients (m = 16) */
- Word16 rc[], /* (o) Q15 : rc[M] Reflection coefficients. */
- Word16 * mem /* (i/o) :static memory (18 words) */
- );
+ Word16 Rh[], /* (i) : Rh[M+1] Vector of autocorrelations (msb) */
+ Word16 Rl[], /* (i) : Rl[M+1] Vector of autocorrelations (lsb) */
+ Word16 A[], /* (o) Q12 : A[M] LPC coefficients (m = 16) */
+ Word16 rc[], /* (o) Q15 : rc[M] Reflection coefficients. */
+ Word16 * mem /* (i/o) :static memory (18 words) */
+ );
void Az_isp(
- Word16 a[], /* (i) Q12 : predictor coefficients */
- Word16 isp[], /* (o) Q15 : Immittance spectral pairs */
- Word16 old_isp[] /* (i) : old isp[] (in case not found M roots) */
- );
+ Word16 a[], /* (i) Q12 : predictor coefficients */
+ Word16 isp[], /* (o) Q15 : Immittance spectral pairs */
+ Word16 old_isp[] /* (i) : old isp[] (in case not found M roots) */
+ );
void Isp_Az(
- Word16 isp[], /* (i) Q15 : Immittance spectral pairs */
- Word16 a[], /* (o) Q12 : predictor coefficients (order = M) */
- Word16 m,
- Word16 adaptive_scaling /* (i) 0 : adaptive scaling disabled */
- /* 1 : adaptive scaling enabled */
- );
+ Word16 isp[], /* (i) Q15 : Immittance spectral pairs */
+ Word16 a[], /* (o) Q12 : predictor coefficients (order = M) */
+ Word16 m,
+ Word16 adaptive_scaling /* (i) 0 : adaptive scaling disabled */
+ /* 1 : adaptive scaling enabled */
+ );
void Isp_isf(
- Word16 isp[], /* (i) Q15 : isp[m] (range: -1<=val<1) */
- Word16 isf[], /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
- Word16 m /* (i) : LPC order */
- );
+ Word16 isp[], /* (i) Q15 : isp[m] (range: -1<=val<1) */
+ Word16 isf[], /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+ Word16 m /* (i) : LPC order */
+ );
void Isf_isp(
- Word16 isf[], /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
- Word16 isp[], /* (o) Q15 : isp[m] (range: -1<=val<1) */
- Word16 m /* (i) : LPC order */
- );
+ Word16 isf[], /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+ Word16 isp[], /* (o) Q15 : isp[m] (range: -1<=val<1) */
+ Word16 m /* (i) : LPC order */
+ );
void Int_isp(
- Word16 isp_old[], /* input : isps from past frame */
- Word16 isp_new[], /* input : isps from present frame */
- Word16 frac[], /* input : fraction for 3 first subfr (Q15) */
- Word16 Az[] /* output: LP coefficients in 4 subframes */
- );
+ Word16 isp_old[], /* input : isps from past frame */
+ Word16 isp_new[], /* input : isps from present frame */
+ Word16 frac[], /* input : fraction for 3 first subfr (Q15) */
+ Word16 Az[] /* output: LP coefficients in 4 subframes */
+ );
void Weight_a(
- Word16 a[], /* (i) Q12 : a[m+1] LPC coefficients */
- Word16 ap[], /* (o) Q12 : Spectral expanded LPC coefficients */
- Word16 gamma, /* (i) Q15 : Spectral expansion factor. */
- Word16 m /* (i) : LPC order. */
- );
+ Word16 a[], /* (i) Q12 : a[m+1] LPC coefficients */
+ Word16 ap[], /* (o) Q12 : Spectral expanded LPC coefficients */
+ Word16 gamma, /* (i) Q15 : Spectral expansion factor. */
+ Word16 m /* (i) : LPC order. */
+ );
/*-----------------------------------------------------------------*
@@ -102,214 +102,214 @@ void Weight_a(
*-----------------------------------------------------------------*/
void Qpisf_2s_46b(
- Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
- Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
- Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
- Word16 * indice, /* (o) : quantization indices */
- Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
- );
+ Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+ Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
+ Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
+ Word16 * indice, /* (o) : quantization indices */
+ Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
+ );
void Qpisf_2s_36b(
- Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
- Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
- Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
- Word16 * indice, /* (o) : quantization indices */
- Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
- );
+ Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+ Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
+ Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
+ Word16 * indice, /* (o) : quantization indices */
+ Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
+ );
void Dpisf_2s_46b(
- Word16 * indice, /* input: quantization indices */
- Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
- Word16 * past_isfq, /* i/0 : past ISF quantizer */
- Word16 * isfold, /* input : past quantized ISF */
- Word16 * isf_buf, /* input : isf buffer */
- Word16 bfi, /* input : Bad frame indicator */
- Word16 enc_dec
- );
+ Word16 * indice, /* input: quantization indices */
+ Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
+ Word16 * past_isfq, /* i/0 : past ISF quantizer */
+ Word16 * isfold, /* input : past quantized ISF */
+ Word16 * isf_buf, /* input : isf buffer */
+ Word16 bfi, /* input : Bad frame indicator */
+ Word16 enc_dec
+ );
void Dpisf_2s_36b(
- Word16 * indice, /* input: quantization indices */
- Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
- Word16 * past_isfq, /* i/0 : past ISF quantizer */
- Word16 * isfold, /* input : past quantized ISF */
- Word16 * isf_buf, /* input : isf buffer */
- Word16 bfi, /* input : Bad frame indicator */
- Word16 enc_dec
- );
+ Word16 * indice, /* input: quantization indices */
+ Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
+ Word16 * past_isfq, /* i/0 : past ISF quantizer */
+ Word16 * isfold, /* input : past quantized ISF */
+ Word16 * isf_buf, /* input : isf buffer */
+ Word16 bfi, /* input : Bad frame indicator */
+ Word16 enc_dec
+ );
void Qisf_ns(
- Word16 * isf1, /* input : ISF in the frequency domain (0..0.5) */
- Word16 * isf_q, /* output: quantized ISF */
- Word16 * indice /* output: quantization indices */
- );
+ Word16 * isf1, /* input : ISF in the frequency domain (0..0.5) */
+ Word16 * isf_q, /* output: quantized ISF */
+ Word16 * indice /* output: quantization indices */
+ );
void Disf_ns(
- Word16 * indice, /* input: quantization indices */
- Word16 * isf_q /* input : ISF in the frequency domain (0..0.5) */
- );
+ Word16 * indice, /* input: quantization indices */
+ Word16 * isf_q /* input : ISF in the frequency domain (0..0.5) */
+ );
Word16 Sub_VQ( /* output: return quantization index */
- Word16 * x, /* input : ISF residual vector */
- Word16 * dico, /* input : quantization codebook */
- Word16 dim, /* input : dimention of vector */
- Word16 dico_size, /* input : size of quantization codebook */
- Word32 * distance /* output: error of quantization */
- );
+ Word16 * x, /* input : ISF residual vector */
+ Word16 * dico, /* input : quantization codebook */
+ Word16 dim, /* input : dimention of vector */
+ Word16 dico_size, /* input : size of quantization codebook */
+ Word32 * distance /* output: error of quantization */
+ );
void Reorder_isf(
- Word16 * isf, /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
- Word16 min_dist, /* (i) Q15 : minimum distance to keep */
- Word16 n /* (i) : number of ISF */
- );
+ Word16 * isf, /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
+ Word16 min_dist, /* (i) Q15 : minimum distance to keep */
+ Word16 n /* (i) : number of ISF */
+ );
/*-----------------------------------------------------------------*
* filter prototypes *
*-----------------------------------------------------------------*/
void Init_Decim_12k8(
- Word16 mem[] /* output: memory (2*NB_COEF_DOWN) set to zeros */
- );
+ Word16 mem[] /* output: memory (2*NB_COEF_DOWN) set to zeros */
+ );
void Decim_12k8(
- Word16 sig16k[], /* input: signal to downsampling */
- Word16 lg, /* input: length of input */
- Word16 sig12k8[], /* output: decimated signal */
- Word16 mem[] /* in/out: memory (2*NB_COEF_DOWN) */
- );
+ Word16 sig16k[], /* input: signal to downsampling */
+ Word16 lg, /* input: length of input */
+ Word16 sig12k8[], /* output: decimated signal */
+ Word16 mem[] /* in/out: memory (2*NB_COEF_DOWN) */
+ );
void Init_HP50_12k8(Word16 mem[]);
void HP50_12k8(
- Word16 signal[], /* input/output signal */
- Word16 lg, /* lenght of signal */
- Word16 mem[] /* filter memory [6] */
- );
+ Word16 signal[], /* input/output signal */
+ Word16 lg, /* lenght of signal */
+ Word16 mem[] /* filter memory [6] */
+ );
void Init_HP400_12k8(Word16 mem[]);
void HP400_12k8(
- Word16 signal[], /* input/output signal */
- Word16 lg, /* lenght of signal */
- Word16 mem[] /* filter memory [6] */
- );
+ Word16 signal[], /* input/output signal */
+ Word16 lg, /* lenght of signal */
+ Word16 mem[] /* filter memory [6] */
+ );
void Init_Filt_6k_7k(Word16 mem[]);
void Filt_6k_7k(
- Word16 signal[], /* input: signal */
- Word16 lg, /* input: length of input */
- Word16 mem[] /* in/out: memory (size=30) */
- );
+ Word16 signal[], /* input: signal */
+ Word16 lg, /* input: length of input */
+ Word16 mem[] /* in/out: memory (size=30) */
+ );
void Filt_6k_7k_asm(
- Word16 signal[], /* input: signal */
- Word16 lg, /* input: length of input */
- Word16 mem[] /* in/out: memory (size=30) */
- );
+ Word16 signal[], /* input: signal */
+ Word16 lg, /* input: length of input */
+ Word16 mem[] /* in/out: memory (size=30) */
+ );
void LP_Decim2(
- Word16 x[], /* in/out: signal to process */
- Word16 l, /* input : size of filtering */
- Word16 mem[] /* in/out: memory (size=3) */
- );
+ Word16 x[], /* in/out: signal to process */
+ Word16 l, /* input : size of filtering */
+ Word16 mem[] /* in/out: memory (size=3) */
+ );
void Preemph(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : preemphasis coefficient */
- Word16 lg, /* (i) : lenght of filtering */
- Word16 * mem /* (i/o) : memory (x[-1]) */
- );
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : preemphasis coefficient */
+ Word16 lg, /* (i) : lenght of filtering */
+ Word16 * mem /* (i/o) : memory (x[-1]) */
+ );
void Preemph2(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : preemphasis coefficient */
- Word16 lg, /* (i) : lenght of filtering */
- Word16 * mem /* (i/o) : memory (x[-1]) */
- );
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : preemphasis coefficient */
+ Word16 lg, /* (i) : lenght of filtering */
+ Word16 * mem /* (i/o) : memory (x[-1]) */
+ );
void Deemph(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : deemphasis factor */
- Word16 L, /* (i) : vector size */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- );
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : deemphasis factor */
+ Word16 L, /* (i) : vector size */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ );
void Deemph2(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : deemphasis factor */
- Word16 L, /* (i) : vector size */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- );
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : deemphasis factor */
+ Word16 L, /* (i) : vector size */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ );
void Deemph_32(
- Word16 x_hi[], /* (i) : input signal (bit31..16) */
- Word16 x_lo[], /* (i) : input signal (bit15..4) */
- Word16 y[], /* (o) : output signal (x16) */
- Word16 mu, /* (i) Q15 : deemphasis factor */
- Word16 L, /* (i) : vector size */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- );
+ Word16 x_hi[], /* (i) : input signal (bit31..16) */
+ Word16 x_lo[], /* (i) : input signal (bit15..4) */
+ Word16 y[], /* (o) : output signal (x16) */
+ Word16 mu, /* (i) Q15 : deemphasis factor */
+ Word16 L, /* (i) : vector size */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ );
void Deemph_32_asm(
- Word16 x_hi[], /* (i) : input signal (bit31..16) */
- Word16 x_lo[], /* (i) : input signal (bit15..4) */
- Word16 y[], /* (o) : output signal (x16) */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- );
+ Word16 x_hi[], /* (i) : input signal (bit31..16) */
+ Word16 x_lo[], /* (i) : input signal (bit15..4) */
+ Word16 y[], /* (o) : output signal (x16) */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ );
void Convolve(
- Word16 x[], /* (i) : input vector */
- Word16 h[], /* (i) Q15 : impulse response */
- Word16 y[], /* (o) 12 bits: output vector */
- Word16 L /* (i) : vector size */
- );
+ Word16 x[], /* (i) : input vector */
+ Word16 h[], /* (i) Q15 : impulse response */
+ Word16 y[], /* (o) 12 bits: output vector */
+ Word16 L /* (i) : vector size */
+ );
void Convolve_asm(
- Word16 x[], /* (i) : input vector */
- Word16 h[], /* (i) Q15 : impulse response */
- Word16 y[], /* (o) 12 bits: output vector */
- Word16 L /* (i) : vector size */
- );
+ Word16 x[], /* (i) : input vector */
+ Word16 h[], /* (i) Q15 : impulse response */
+ Word16 y[], /* (o) 12 bits: output vector */
+ Word16 L /* (i) : vector size */
+ );
void Residu(
- Word16 a[], /* (i) Q12 : prediction coefficients */
- Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
- Word16 y[], /* (o) : residual signal */
- Word16 lg /* (i) : size of filtering */
- );
+ Word16 a[], /* (i) Q12 : prediction coefficients */
+ Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
+ Word16 y[], /* (o) : residual signal */
+ Word16 lg /* (i) : size of filtering */
+ );
void Residu_opt(
- Word16 a[], /* (i) Q12 : prediction coefficients */
- Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
- Word16 y[], /* (o) : residual signal */
- Word16 lg /* (i) : size of filtering */
- );
+ Word16 a[], /* (i) Q12 : prediction coefficients */
+ Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
+ Word16 y[], /* (o) : residual signal */
+ Word16 lg /* (i) : size of filtering */
+ );
void Syn_filt(
- Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
- Word16 x[], /* (i) : input signal */
- Word16 y[], /* (o) : output signal */
- Word16 lg, /* (i) : size of filtering */
- Word16 mem[], /* (i/o) : memory associated with this filtering. */
- Word16 update /* (i) : 0=no update, 1=update of memory. */
- );
+ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
+ Word16 x[], /* (i) : input signal */
+ Word16 y[], /* (o) : output signal */
+ Word16 lg, /* (i) : size of filtering */
+ Word16 mem[], /* (i/o) : memory associated with this filtering. */
+ Word16 update /* (i) : 0=no update, 1=update of memory. */
+ );
void Syn_filt_asm(
- Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
- Word16 x[], /* (i) : input signal */
- Word16 y[], /* (o) : output signal */
- Word16 mem[] /* (i/o) : memory associated with this filtering. */
- );
+ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
+ Word16 x[], /* (i) : input signal */
+ Word16 y[], /* (o) : output signal */
+ Word16 mem[] /* (i/o) : memory associated with this filtering. */
+ );
void Syn_filt_32(
- Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
- Word16 m, /* (i) : order of LP filter */
- Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
- Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
- Word16 sig_hi[], /* (o) /16 : synthesis high */
- Word16 sig_lo[], /* (o) /16 : synthesis low */
- Word16 lg /* (i) : size of filtering */
- );
+ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
+ Word16 m, /* (i) : order of LP filter */
+ Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
+ Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
+ Word16 sig_hi[], /* (o) /16 : synthesis high */
+ Word16 sig_lo[], /* (o) /16 : synthesis low */
+ Word16 lg /* (i) : size of filtering */
+ );
void Syn_filt_32_asm(
- Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
- Word16 m, /* (i) : order of LP filter */
- Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
- Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
- Word16 sig_hi[], /* (o) /16 : synthesis high */
- Word16 sig_lo[], /* (o) /16 : synthesis low */
- Word16 lg /* (i) : size of filtering */
- );
+ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
+ Word16 m, /* (i) : order of LP filter */
+ Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
+ Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
+ Word16 sig_hi[], /* (o) /16 : synthesis high */
+ Word16 sig_lo[], /* (o) /16 : synthesis low */
+ Word16 lg /* (i) : size of filtering */
+ );
/*-----------------------------------------------------------------*
* pitch prototypes *
*-----------------------------------------------------------------*/
@@ -443,12 +443,12 @@ void ACELP_4t64_fx(
Word16 nbbits, /* (i) : 20, 36, 44, 52, 64, 72 or 88 bits */
Word16 ser_size, /* (i) : bit rate */
Word16 _index[] /* (o) : index (20): 5+5+5+5 = 20 bits. */
- /* (o) : index (36): 9+9+9+9 = 36 bits. */
- /* (o) : index (44): 13+9+13+9 = 44 bits. */
- /* (o) : index (52): 13+13+13+13 = 52 bits. */
- /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits. */
- /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits. */
- /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits. */
+ /* (o) : index (36): 9+9+9+9 = 36 bits. */
+ /* (o) : index (44): 13+9+13+9 = 44 bits. */
+ /* (o) : index (52): 13+13+13+13 = 52 bits. */
+ /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits. */
+ /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits. */
+ /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits. */
);
void Pit_shrp(
diff --git a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
index f42a27c..d36f455 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
@@ -25,8 +25,8 @@
#define MAX_32 (Word32)0x7fffffffL
#define MIN_32 (Word32)0x80000000L
-#define MAX_16 (Word16)+32767 /* 0x7fff */
-#define MIN_16 (Word16)-32768 /* 0x8000 */
+#define MAX_16 (Word16)+32767 /* 0x7fff */
+#define MIN_16 (Word16)-32768 /* 0x8000 */
#define static_vo static __inline
@@ -41,22 +41,22 @@
#define L_negate(L_var1) (((L_var1) == (MIN_32)) ? (MAX_32) : (-(L_var1))) /* Long negate, 2*/
-#define extract_h(a) ((Word16)(a >> 16))
-#define extract_l(x) (Word16)((x))
-#define add1(a,b) (a + b)
-#define vo_L_msu(a,b,c) ( a - (( b * c ) << 1) )
+#define extract_h(a) ((Word16)(a >> 16))
+#define extract_l(x) (Word16)((x))
+#define add1(a,b) (a + b)
+#define vo_L_msu(a,b,c) ( a - (( b * c ) << 1) )
#define vo_mult32(a, b) ((a) * (b))
-#define vo_mult(a,b) (( a * b ) >> 15 )
-#define vo_L_mult(a,b) (((a) * (b)) << 1)
-#define vo_shr_r(var1, var2) ((var1+((Word16)(1L<<(var2-1))))>>var2)
-#define vo_sub(a,b) (a - b)
-#define vo_L_deposit_h(a) ((Word32)((a) << 16))
-#define vo_round(a) ((a + 0x00008000) >> 16)
-#define vo_extract_l(a) ((Word16)(a))
-#define vo_L_add(a,b) (a + b)
-#define vo_L_sub(a,b) (a - b)
-#define vo_mult_r(a,b) ((( a * b ) + 0x4000 ) >> 15 )
-#define vo_negate(a) (-a)
+#define vo_mult(a,b) (( a * b ) >> 15 )
+#define vo_L_mult(a,b) (((a) * (b)) << 1)
+#define vo_shr_r(var1, var2) ((var1+((Word16)(1L<<(var2-1))))>>var2)
+#define vo_sub(a,b) (a - b)
+#define vo_L_deposit_h(a) ((Word32)((a) << 16))
+#define vo_round(a) ((a + 0x00008000) >> 16)
+#define vo_extract_l(a) ((Word16)(a))
+#define vo_L_add(a,b) (a + b)
+#define vo_L_sub(a,b) (a - b)
+#define vo_mult_r(a,b) ((( a * b ) + 0x4000 ) >> 15 )
+#define vo_negate(a) (-a)
#define vo_L_shr_r(L_var1, var2) ((L_var1+((Word32)(1L<<(var2-1))))>>var2)
@@ -65,25 +65,25 @@
| Prototypes for basic arithmetic operators |
|___________________________________________________________________________|
*/
-static_vo Word16 add (Word16 var1, Word16 var2); /* Short add,1 */
-static_vo Word16 sub (Word16 var1, Word16 var2); /* Short sub,1 */
+static_vo Word16 add (Word16 var1, Word16 var2); /* Short add,1 */
+static_vo Word16 sub (Word16 var1, Word16 var2); /* Short sub,1 */
static_vo Word16 shl (Word16 var1, Word16 var2); /* Short shift left, 1 */
static_vo Word16 shr (Word16 var1, Word16 var2); /* Short shift right, 1 */
static_vo Word16 mult (Word16 var1, Word16 var2); /* Short mult, 1 */
static_vo Word32 L_mult (Word16 var1, Word16 var2); /* Long mult, 1 */
static_vo Word16 voround (Word32 L_var1); /* Round, 1 */
-static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2); /* Mac, 1 */
-static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2); /* Msu, 1 */
-static_vo Word32 L_add (Word32 L_var1, Word32 L_var2); /* Long add, 2 */
-static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2); /* Long sub, 2 */
-static_vo Word16 mult_r (Word16 var1, Word16 var2); /* Mult with round, 2 */
-static_vo Word32 L_shl2(Word32 L_var1, Word16 var2); /* var2 > 0*/
-static_vo Word32 L_shl (Word32 L_var1, Word16 var2); /* Long shift left, 2 */
-static_vo Word32 L_shr (Word32 L_var1, Word16 var2); /* Long shift right, 2*/
-static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2); /* Long shift right with round, 3 */
-static_vo Word16 norm_s (Word16 var1); /* Short norm, 15 */
-static_vo Word16 div_s (Word16 var1, Word16 var2); /* Short division, 18 */
-static_vo Word16 norm_l (Word32 L_var1); /* Long norm, 30 */
+static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2); /* Mac, 1 */
+static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2); /* Msu, 1 */
+static_vo Word32 L_add (Word32 L_var1, Word32 L_var2); /* Long add, 2 */
+static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2); /* Long sub, 2 */
+static_vo Word16 mult_r (Word16 var1, Word16 var2); /* Mult with round, 2 */
+static_vo Word32 L_shl2(Word32 L_var1, Word16 var2); /* var2 > 0*/
+static_vo Word32 L_shl (Word32 L_var1, Word16 var2); /* Long shift left, 2 */
+static_vo Word32 L_shr (Word32 L_var1, Word16 var2); /* Long shift right, 2*/
+static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2); /* Long shift right with round, 3 */
+static_vo Word16 norm_s (Word16 var1); /* Short norm, 15 */
+static_vo Word16 div_s (Word16 var1, Word16 var2); /* Short division, 18 */
+static_vo Word16 norm_l (Word32 L_var1); /* Long norm, 30 */
/*___________________________________________________________________________
| |
@@ -125,11 +125,11 @@ static_vo Word16 norm_l (Word32 L_var1); /* Long norm,
*/
static_vo Word16 add (Word16 var1, Word16 var2)
{
- Word16 var_out;
- Word32 L_sum;
- L_sum = (Word32) var1 + var2;
- var_out = saturate (L_sum);
- return (var_out);
+ Word16 var_out;
+ Word32 L_sum;
+ L_sum = (Word32) var1 + var2;
+ var_out = saturate (L_sum);
+ return (var_out);
}
/*___________________________________________________________________________
@@ -168,11 +168,11 @@ static_vo Word16 add (Word16 var1, Word16 var2)
static_vo Word16 sub (Word16 var1, Word16 var2)
{
- Word16 var_out;
- Word32 L_diff;
- L_diff = (Word32) var1 - var2;
- var_out = saturate (L_diff);
- return (var_out);
+ Word16 var_out;
+ Word32 L_diff;
+ L_diff = (Word32) var1 - var2;
+ var_out = saturate (L_diff);
+ return (var_out);
}
/*___________________________________________________________________________
@@ -212,27 +212,31 @@ static_vo Word16 sub (Word16 var1, Word16 var2)
static_vo Word16 shl (Word16 var1, Word16 var2)
{
- Word16 var_out;
- Word32 result;
- if (var2 < 0)
- {
- if (var2 < -16)
- var2 = -16;
- var_out = var1 >> ((Word16)-var2);
- }
- else
- {
- result = (Word32) var1 *((Word32) 1 << var2);
- if ((var2 > 15 && var1 != 0) || (result != (Word32) ((Word16) result)))
- {
- var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
- }
- else
- {
- var_out = extract_l (result);
- }
- }
- return (var_out);
+ Word16 var_out;
+ Word32 result;
+ if (var2 < 0)
+ {
+ if (var2 < -16)
+ var2 = -16;
+ var_out = var1 >> ((Word16)-var2);
+ }
+ else
+ {
+ if (var2 > 15 && var1 != 0)
+ {
+ var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
+ }
+ else
+ {
+ result = (Word32) var1 *((Word32) 1 << var2);
+ if ((result != (Word32) ((Word16) result))) {
+ var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
+ } else {
+ var_out = extract_l (result);
+ }
+ }
+ }
+ return (var_out);
}
/*___________________________________________________________________________
@@ -272,32 +276,32 @@ static_vo Word16 shl (Word16 var1, Word16 var2)
static_vo Word16 shr (Word16 var1, Word16 var2)
{
- Word16 var_out;
- if (var2 < 0)
- {
- if (var2 < -16)
- var2 = -16;
- var_out = shl(var1, (Word16)-var2);
- }
- else
- {
- if (var2 >= 15)
- {
- var_out = (Word16)((var1 < 0) ? -1 : 0);
- }
- else
- {
- if (var1 < 0)
- {
- var_out = (Word16)(~((~var1) >> var2));
- }
- else
- {
- var_out = (Word16)(var1 >> var2);
- }
- }
- }
- return (var_out);
+ Word16 var_out;
+ if (var2 < 0)
+ {
+ if (var2 < -16)
+ var2 = -16;
+ var_out = shl(var1, (Word16)-var2);
+ }
+ else
+ {
+ if (var2 >= 15)
+ {
+ var_out = (Word16)((var1 < 0) ? -1 : 0);
+ }
+ else
+ {
+ if (var1 < 0)
+ {
+ var_out = (Word16)(~((~var1) >> var2));
+ }
+ else
+ {
+ var_out = (Word16)(var1 >> var2);
+ }
+ }
+ }
+ return (var_out);
}
/*___________________________________________________________________________
@@ -337,14 +341,14 @@ static_vo Word16 shr (Word16 var1, Word16 var2)
static_vo Word16 mult (Word16 var1, Word16 var2)
{
- Word16 var_out;
- Word32 L_product;
- L_product = (Word32) var1 *(Word32) var2;
- L_product = (L_product & (Word32) 0xffff8000L) >> 15;
- if (L_product & (Word32) 0x00010000L)
- L_product = L_product | (Word32) 0xffff0000L;
- var_out = saturate (L_product);
- return (var_out);
+ Word16 var_out;
+ Word32 L_product;
+ L_product = (Word32) var1 *(Word32) var2;
+ L_product = (L_product & (Word32) 0xffff8000L) >> 15;
+ if (L_product & (Word32) 0x00010000L)
+ L_product = L_product | (Word32) 0xffff0000L;
+ var_out = saturate (L_product);
+ return (var_out);
}
/*___________________________________________________________________________
@@ -384,17 +388,17 @@ static_vo Word16 mult (Word16 var1, Word16 var2)
static_vo Word32 L_mult (Word16 var1, Word16 var2)
{
- Word32 L_var_out;
- L_var_out = (Word32) var1 *(Word32) var2;
- if (L_var_out != (Word32) 0x40000000L)
- {
- L_var_out *= 2;
- }
- else
- {
- L_var_out = MAX_32;
- }
- return (L_var_out);
+ Word32 L_var_out;
+ L_var_out = (Word32) var1 *(Word32) var2;
+ if (L_var_out != (Word32) 0x40000000L)
+ {
+ L_var_out *= 2;
+ }
+ else
+ {
+ L_var_out = MAX_32;
+ }
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -430,11 +434,11 @@ static_vo Word32 L_mult (Word16 var1, Word16 var2)
static_vo Word16 voround (Word32 L_var1)
{
- Word16 var_out;
- Word32 L_rounded;
- L_rounded = L_add (L_var1, (Word32) 0x00008000L);
- var_out = extract_h (L_rounded);
- return (var_out);
+ Word16 var_out;
+ Word32 L_rounded;
+ L_rounded = L_add (L_var1, (Word32) 0x00008000L);
+ var_out = extract_h (L_rounded);
+ return (var_out);
}
/*___________________________________________________________________________
@@ -476,11 +480,11 @@ static_vo Word16 voround (Word32 L_var1)
static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2)
{
- Word32 L_var_out;
- Word32 L_product;
- L_product = ((var1 * var2) << 1);
- L_var_out = L_add (L_var3, L_product);
- return (L_var_out);
+ Word32 L_var_out;
+ Word32 L_product;
+ L_product = ((var1 * var2) << 1);
+ L_var_out = L_add (L_var3, L_product);
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -522,11 +526,11 @@ static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2)
static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2)
{
- Word32 L_var_out;
- Word32 L_product;
- L_product = (var1 * var2)<<1;
- L_var_out = L_sub (L_var3, L_product);
- return (L_var_out);
+ Word32 L_var_out;
+ Word32 L_product;
+ L_product = (var1 * var2)<<1;
+ L_var_out = L_sub (L_var3, L_product);
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -563,16 +567,16 @@ static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2)
static_vo Word32 L_add (Word32 L_var1, Word32 L_var2)
{
- Word32 L_var_out;
- L_var_out = L_var1 + L_var2;
- if (((L_var1 ^ L_var2) & MIN_32) == 0)
- {
- if ((L_var_out ^ L_var1) & MIN_32)
- {
- L_var_out = (L_var1 < 0) ? MIN_32 : MAX_32;
- }
- }
- return (L_var_out);
+ Word32 L_var_out;
+ L_var_out = L_var1 + L_var2;
+ if (((L_var1 ^ L_var2) & MIN_32) == 0)
+ {
+ if ((L_var_out ^ L_var1) & MIN_32)
+ {
+ L_var_out = (L_var1 < 0) ? MIN_32 : MAX_32;
+ }
+ }
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -609,16 +613,16 @@ static_vo Word32 L_add (Word32 L_var1, Word32 L_var2)
static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2)
{
- Word32 L_var_out;
- L_var_out = L_var1 - L_var2;
- if (((L_var1 ^ L_var2) & MIN_32) != 0)
- {
- if ((L_var_out ^ L_var1) & MIN_32)
- {
- L_var_out = (L_var1 < 0L) ? MIN_32 : MAX_32;
- }
- }
- return (L_var_out);
+ Word32 L_var_out;
+ L_var_out = L_var1 - L_var2;
+ if (((L_var1 ^ L_var2) & MIN_32) != 0)
+ {
+ if ((L_var_out ^ L_var1) & MIN_32)
+ {
+ L_var_out = (L_var1 < 0L) ? MIN_32 : MAX_32;
+ }
+ }
+ return (L_var_out);
}
@@ -658,18 +662,18 @@ static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2)
static_vo Word16 mult_r (Word16 var1, Word16 var2)
{
- Word16 var_out;
- Word32 L_product_arr;
- L_product_arr = (Word32) var1 *(Word32) var2; /* product */
- L_product_arr += (Word32) 0x00004000L; /* round */
- L_product_arr &= (Word32) 0xffff8000L;
- L_product_arr >>= 15; /* shift */
- if (L_product_arr & (Word32) 0x00010000L) /* sign extend when necessary */
- {
- L_product_arr |= (Word32) 0xffff0000L;
- }
- var_out = saturate (L_product_arr);
- return (var_out);
+ Word16 var_out;
+ Word32 L_product_arr;
+ L_product_arr = (Word32) var1 *(Word32) var2; /* product */
+ L_product_arr += (Word32) 0x00004000L; /* round */
+ L_product_arr &= (Word32) 0xffff8000L;
+ L_product_arr >>= 15; /* shift */
+ if (L_product_arr & (Word32) 0x00010000L) /* sign extend when necessary */
+ {
+ L_product_arr |= (Word32) 0xffff0000L;
+ }
+ var_out = saturate (L_product_arr);
+ return (var_out);
}
/*___________________________________________________________________________
@@ -708,61 +712,61 @@ static_vo Word16 mult_r (Word16 var1, Word16 var2)
static_vo Word32 L_shl (Word32 L_var1, Word16 var2)
{
- Word32 L_var_out = 0L;
- if (var2 <= 0)
- {
- if (var2 < -32)
- var2 = -32;
- L_var_out = (L_var1 >> (Word16)-var2);
- }
- else
- {
- for (; var2 > 0; var2--)
- {
- if (L_var1 > (Word32) 0X3fffffffL)
- {
- L_var_out = MAX_32;
- break;
- }
- else
- {
- if (L_var1 < (Word32) 0xc0000000L)
- {
- //Overflow = 1;
- L_var_out = MIN_32;
- break;
- }
- }
- L_var1 *= 2;
- L_var_out = L_var1;
- }
- }
- return (L_var_out);
+ Word32 L_var_out = 0L;
+ if (var2 <= 0)
+ {
+ if (var2 < -32)
+ var2 = -32;
+ L_var_out = (L_var1 >> (Word16)-var2);
+ }
+ else
+ {
+ for (; var2 > 0; var2--)
+ {
+ if (L_var1 > (Word32) 0X3fffffffL)
+ {
+ L_var_out = MAX_32;
+ break;
+ }
+ else
+ {
+ if (L_var1 < (Word32) 0xc0000000L)
+ {
+ //Overflow = 1;
+ L_var_out = MIN_32;
+ break;
+ }
+ }
+ L_var1 *= 2;
+ L_var_out = L_var1;
+ }
+ }
+ return (L_var_out);
}
static_vo Word32 L_shl2(Word32 L_var1, Word16 var2)
{
- Word32 L_var_out = 0L;
+ Word32 L_var_out = 0L;
- for (; var2 > 0; var2--)
- {
- if (L_var1 > (Word32) 0X3fffffffL)
- {
- L_var_out = MAX_32;
- break;
- }
- else
- {
- if (L_var1 < (Word32) 0xc0000000L)
- {
- L_var_out = MIN_32;
- break;
- }
- }
- L_var1 <<=1 ;
- L_var_out = L_var1;
- }
- return (L_var_out);
+ for (; var2 > 0; var2--)
+ {
+ if (L_var1 > (Word32) 0X3fffffffL)
+ {
+ L_var_out = MAX_32;
+ break;
+ }
+ else
+ {
+ if (L_var1 < (Word32) 0xc0000000L)
+ {
+ L_var_out = MIN_32;
+ break;
+ }
+ }
+ L_var1 <<=1 ;
+ L_var_out = L_var1;
+ }
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -801,32 +805,32 @@ static_vo Word32 L_shl2(Word32 L_var1, Word16 var2)
static_vo Word32 L_shr (Word32 L_var1, Word16 var2)
{
- Word32 L_var_out;
- if (var2 < 0)
- {
- if (var2 < -32)
- var2 = -32;
- L_var_out = L_shl2(L_var1, (Word16)-var2);
- }
- else
- {
- if (var2 >= 31)
- {
- L_var_out = (L_var1 < 0L) ? -1 : 0;
- }
- else
- {
- if (L_var1 < 0)
- {
- L_var_out = ~((~L_var1) >> var2);
- }
- else
- {
- L_var_out = L_var1 >> var2;
- }
- }
- }
- return (L_var_out);
+ Word32 L_var_out;
+ if (var2 < 0)
+ {
+ if (var2 < -32)
+ var2 = -32;
+ L_var_out = L_shl2(L_var1, (Word16)-var2);
+ }
+ else
+ {
+ if (var2 >= 31)
+ {
+ L_var_out = (L_var1 < 0L) ? -1 : 0;
+ }
+ else
+ {
+ if (L_var1 < 0)
+ {
+ L_var_out = ~((~L_var1) >> var2);
+ }
+ else
+ {
+ L_var_out = L_var1 >> var2;
+ }
+ }
+ }
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -873,23 +877,23 @@ static_vo Word32 L_shr (Word32 L_var1, Word16 var2)
static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2)
{
- Word32 L_var_out;
- if (var2 > 31)
- {
- L_var_out = 0;
- }
- else
- {
- L_var_out = L_shr (L_var1, var2);
- if (var2 > 0)
- {
- if ((L_var1 & ((Word32) 1 << (var2 - 1))) != 0)
- {
- L_var_out++;
- }
- }
- }
- return (L_var_out);
+ Word32 L_var_out;
+ if (var2 > 31)
+ {
+ L_var_out = 0;
+ }
+ else
+ {
+ L_var_out = L_shr (L_var1, var2);
+ if (var2 > 0)
+ {
+ if ((L_var1 & ((Word32) 1 << (var2 - 1))) != 0)
+ {
+ L_var_out++;
+ }
+ }
+ }
+ return (L_var_out);
}
/*___________________________________________________________________________
@@ -927,30 +931,30 @@ static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2)
static_vo Word16 norm_s (Word16 var1)
{
- Word16 var_out = 0;
- if (var1 == 0)
- {
- var_out = 0;
- }
- else
- {
- if (var1 == -1)
- {
- var_out = 15;
- }
- else
- {
- if (var1 < 0)
- {
- var1 = (Word16)~var1;
- }
- for (var_out = 0; var1 < 0x4000; var_out++)
- {
- var1 <<= 1;
- }
- }
- }
- return (var_out);
+ Word16 var_out = 0;
+ if (var1 == 0)
+ {
+ var_out = 0;
+ }
+ else
+ {
+ if (var1 == -1)
+ {
+ var_out = 15;
+ }
+ else
+ {
+ if (var1 < 0)
+ {
+ var1 = (Word16)~var1;
+ }
+ for (var_out = 0; var1 < 0x4000; var_out++)
+ {
+ var1 <<= 1;
+ }
+ }
+ }
+ return (var_out);
}
/*___________________________________________________________________________
@@ -992,47 +996,47 @@ static_vo Word16 norm_s (Word16 var1)
static_vo Word16 div_s (Word16 var1, Word16 var2)
{
- Word16 var_out = 0;
- Word16 iteration;
- Word32 L_num;
- Word32 L_denom;
- if ((var1 < 0) || (var2 < 0))
- {
- var_out = MAX_16;
- return var_out;
- }
- if (var2 == 0)
- {
- var_out = MAX_16;
- return var_out;
- }
- if (var1 == 0)
- {
- var_out = 0;
- }
- else
- {
- if (var1 == var2)
- {
- var_out = MAX_16;
- }
- else
- {
- L_num = L_deposit_l (var1);
- L_denom = L_deposit_l(var2);
- for (iteration = 0; iteration < 15; iteration++)
- {
- var_out <<= 1;
- L_num <<= 1;
- if (L_num >= L_denom)
- {
- L_num -= L_denom;
- var_out += 1;
- }
- }
- }
- }
- return (var_out);
+ Word16 var_out = 0;
+ Word16 iteration;
+ Word32 L_num;
+ Word32 L_denom;
+ if ((var1 < 0) || (var2 < 0))
+ {
+ var_out = MAX_16;
+ return var_out;
+ }
+ if (var2 == 0)
+ {
+ var_out = MAX_16;
+ return var_out;
+ }
+ if (var1 == 0)
+ {
+ var_out = 0;
+ }
+ else
+ {
+ if (var1 == var2)
+ {
+ var_out = MAX_16;
+ }
+ else
+ {
+ L_num = L_deposit_l (var1);
+ L_denom = L_deposit_l(var2);
+ for (iteration = 0; iteration < 15; iteration++)
+ {
+ var_out <<= 1;
+ L_num <<= 1;
+ if (L_num >= L_denom)
+ {
+ L_num -= L_denom;
+ var_out += 1;
+ }
+ }
+ }
+ }
+ return (var_out);
}
/*___________________________________________________________________________
@@ -1070,20 +1074,20 @@ static_vo Word16 div_s (Word16 var1, Word16 var2)
static_vo Word16 norm_l (Word32 L_var1)
{
- Word16 var_out = 0;
- if (L_var1 != 0)
- {
- var_out = 31;
- if (L_var1 != (Word32) 0xffffffffL)
- {
- L_var1 ^= (L_var1 >>31);
- for (var_out = 0; L_var1 < (Word32) 0x40000000L; var_out++)
- {
- L_var1 <<= 1;
- }
- }
- }
- return (var_out);
+ Word16 var_out = 0;
+ if (L_var1 != 0)
+ {
+ var_out = 31;
+ if (L_var1 != (Word32) 0xffffffffL)
+ {
+ L_var1 ^= (L_var1 >>31);
+ for (var_out = 0; L_var1 < (Word32) 0x40000000L; var_out++)
+ {
+ L_var1 <<= 1;
+ }
+ }
+ }
+ return (var_out);
}
#endif //__BASIC_OP_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/bits.h b/media/libstagefright/codecs/amrwbenc/inc/bits.h
index e880684..ff9c0c1 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/bits.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/bits.h
@@ -18,7 +18,7 @@
/*--------------------------------------------------------------------------*
* BITS.H *
*--------------------------------------------------------------------------*
-* Number of bits for different modes *
+* Number of bits for different modes *
*--------------------------------------------------------------------------*/
#ifndef __BITS_H__
@@ -52,16 +52,16 @@
#define RX_FRAME_TYPE (Word16)0x6b20
static const Word16 nb_of_bits[NUM_OF_MODES] = {
- NBBITS_7k,
- NBBITS_9k,
- NBBITS_12k,
- NBBITS_14k,
- NBBITS_16k,
- NBBITS_18k,
- NBBITS_20k,
- NBBITS_23k,
- NBBITS_24k,
- NBBITS_SID
+ NBBITS_7k,
+ NBBITS_9k,
+ NBBITS_12k,
+ NBBITS_14k,
+ NBBITS_16k,
+ NBBITS_18k,
+ NBBITS_20k,
+ NBBITS_23k,
+ NBBITS_24k,
+ NBBITS_SID
};
/*typedef struct
@@ -74,18 +74,18 @@ Word16 prev_ft;
//typedef struct
//{
-// Word16 prev_ft;
-// Word16 prev_mode;
+// Word16 prev_ft;
+// Word16 prev_mode;
//} RX_State;
int PackBits(Word16 prms[], Word16 coding_mode, Word16 mode, Coder_State *st);
void Parm_serial(
- Word16 value, /* input : parameter value */
- Word16 no_of_bits, /* input : number of bits */
- Word16 ** prms
- );
+ Word16 value, /* input : parameter value */
+ Word16 no_of_bits, /* input : number of bits */
+ Word16 ** prms
+ );
#endif //__BITS_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/cod_main.h b/media/libstagefright/codecs/amrwbenc/inc/cod_main.h
index 53ca55e..170981e 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/cod_main.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/cod_main.h
@@ -18,7 +18,7 @@
/*--------------------------------------------------------------------------*
* COD_MAIN.H *
*--------------------------------------------------------------------------*
- * Static memory in the encoder *
+ * Static memory in the encoder *
*--------------------------------------------------------------------------*/
#ifndef __COD_MAIN_H__
#define __COD_MAIN_H__
@@ -79,21 +79,21 @@ typedef struct
Word16 vad_hist;
Word16 gain_alpha;
/* TX_State structure */
- Word16 sid_update_counter;
+ Word16 sid_update_counter;
Word16 sid_handover_debt;
Word16 prev_ft;
- Word16 allow_dtx;
- /*some input/output buffer parameters */
- unsigned char *inputStream;
- int inputSize;
- VOAMRWBMODE mode;
- VOAMRWBFRAMETYPE frameType;
- unsigned short *outputStream;
- int outputSize;
- FrameStream *stream;
- VO_MEM_OPERATOR *pvoMemop;
- VO_MEM_OPERATOR voMemoprator;
- VO_PTR hCheck;
+ Word16 allow_dtx;
+ /*some input/output buffer parameters */
+ unsigned char *inputStream;
+ int inputSize;
+ VOAMRWBMODE mode;
+ VOAMRWBFRAMETYPE frameType;
+ unsigned short *outputStream;
+ int outputSize;
+ FrameStream *stream;
+ VO_MEM_OPERATOR *pvoMemop;
+ VO_MEM_OPERATOR voMemoprator;
+ VO_PTR hCheck;
} Coder_State;
typedef void* HAMRENC;
diff --git a/media/libstagefright/codecs/amrwbenc/inc/dtx.h b/media/libstagefright/codecs/amrwbenc/inc/dtx.h
index 0bdda67..82a9bf4 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/dtx.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/dtx.h
@@ -16,9 +16,9 @@
/*--------------------------------------------------------------------------*
- * DTX.H *
+ * DTX.H *
*--------------------------------------------------------------------------*
- * Static memory, constants and frametypes for the DTX *
+ * Static memory, constants and frametypes for the DTX *
*--------------------------------------------------------------------------*/
#ifndef __DTX_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/log2.h b/media/libstagefright/codecs/amrwbenc/inc/log2.h
index b065eb4..3d9a6c4 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/log2.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/log2.h
@@ -45,17 +45,17 @@
********************************************************************************
*/
void Log2 (
- Word32 L_x, /* (i) : input value */
- Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
- Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1)*/
- );
+ Word32 L_x, /* (i) : input value */
+ Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
+ Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1)*/
+ );
void Log2_norm (
- Word32 L_x, /* (i) : input value (normalized) */
- Word16 exp, /* (i) : norm_l (L_x) */
- Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
- Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1) */
- );
+ Word32 L_x, /* (i) : input value (normalized) */
+ Word16 exp, /* (i) : norm_l (L_x) */
+ Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
+ Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1) */
+ );
#endif //__LOG2_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/main.h b/media/libstagefright/codecs/amrwbenc/inc/main.h
index 3a6f963..adef2df 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/main.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/main.h
@@ -17,9 +17,9 @@
/*--------------------------------------------------------------------------*
- * MAIN.H *
+ * MAIN.H *
*--------------------------------------------------------------------------*
- * Main functions *
+ * Main functions *
*--------------------------------------------------------------------------*/
#ifndef __MAIN_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/math_op.h b/media/libstagefright/codecs/amrwbenc/inc/math_op.h
index 7b6196b..c3c00bc 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/math_op.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/math_op.h
@@ -16,40 +16,40 @@
/*--------------------------------------------------------------------------*
- * MATH_OP.H *
+ * MATH_OP.H *
*--------------------------------------------------------------------------*
- * Mathematical operations *
+ * Mathematical operations *
*--------------------------------------------------------------------------*/
#ifndef __MATH_OP_H__
#define __MATH_OP_H__
Word32 Isqrt( /* (o) Q31 : output value (range: 0<=val<1) */
- Word32 L_x /* (i) Q0 : input value (range: 0<=val<=7fffffff) */
- );
+ Word32 L_x /* (i) Q0 : input value (range: 0<=val<=7fffffff) */
+ );
void Isqrt_n(
- Word32 * frac, /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
- Word16 * exp /* (i/o) : exponent (value = frac x 2^exponent) */
- );
+ Word32 * frac, /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
+ Word16 * exp /* (i/o) : exponent (value = frac x 2^exponent) */
+ );
Word32 Pow2( /* (o) Q0 : result (range: 0<=val<=0x7fffffff) */
- Word16 exponant, /* (i) Q0 : Integer part. (range: 0<=val<=30) */
- Word16 fraction /* (i) Q15 : Fractionnal part. (range: 0.0<=val<1.0) */
- );
+ Word16 exponant, /* (i) Q0 : Integer part. (range: 0<=val<=30) */
+ Word16 fraction /* (i) Q15 : Fractionnal part. (range: 0.0<=val<1.0) */
+ );
Word32 Dot_product12( /* (o) Q31: normalized result (1 < val <= -1) */
- Word16 x[], /* (i) 12bits: x vector */
- Word16 y[], /* (i) 12bits: y vector */
- Word16 lg, /* (i) : vector length */
- Word16 * exp /* (o) : exponent of result (0..+30) */
- );
+ Word16 x[], /* (i) 12bits: x vector */
+ Word16 y[], /* (i) 12bits: y vector */
+ Word16 lg, /* (i) : vector length */
+ Word16 * exp /* (o) : exponent of result (0..+30) */
+ );
Word32 Dot_product12_asm( /* (o) Q31: normalized result (1 < val <= -1) */
- Word16 x[], /* (i) 12bits: x vector */
- Word16 y[], /* (i) 12bits: y vector */
- Word16 lg, /* (i) : vector length */
- Word16 * exp /* (o) : exponent of result (0..+30) */
- );
+ Word16 x[], /* (i) 12bits: x vector */
+ Word16 y[], /* (i) 12bits: y vector */
+ Word16 lg, /* (i) : vector length */
+ Word16 * exp /* (o) : exponent of result (0..+30) */
+ );
#endif //__MATH_OP_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/mem_align.h b/media/libstagefright/codecs/amrwbenc/inc/mem_align.h
index 442786a..2ae5a6c 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/mem_align.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/mem_align.h
@@ -14,9 +14,9 @@
** limitations under the License.
*/
/*******************************************************************************
- File: mem_align.h
+ File: mem_align.h
- Content: Memory alloc alignments functions
+ Content: Memory alloc alignments functions
*******************************************************************************/
@@ -29,7 +29,7 @@
extern void *mem_malloc(VO_MEM_OPERATOR *pMemop, unsigned int size, unsigned char alignment, unsigned int CodecID);
extern void mem_free(VO_MEM_OPERATOR *pMemop, void *mem_ptr, unsigned int CodecID);
-#endif /* __VO_MEM_ALIGN_H__ */
+#endif /* __VO_MEM_ALIGN_H__ */
diff --git a/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h b/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h
index 4a13f16..77487ed 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h
@@ -17,36 +17,36 @@
/*--------------------------------------------------------------------------*
* P_MED_O.H *
*--------------------------------------------------------------------------*
- * Median open-loop lag search *
+ * Median open-loop lag search *
*--------------------------------------------------------------------------*/
#ifndef __P_MED_O_H__
#define __P_MED_O_H__
Word16 Pitch_med_ol( /* output: open loop pitch lag */
- Word16 wsp[], /* input : signal used to compute the open loop pitch */
- /* wsp[-pit_max] to wsp[-1] should be known */
- Word16 L_min, /* input : minimum pitch lag */
- Word16 L_max, /* input : maximum pitch lag */
- Word16 L_frame, /* input : length of frame to compute pitch */
- Word16 L_0, /* input : old_ open-loop pitch */
- Word16 * gain, /* output: normalize correlation of hp_wsp for the Lag */
- Word16 * hp_wsp_mem, /* i:o : memory of the hypass filter for hp_wsp[] (lg=9) */
- Word16 * old_hp_wsp, /* i:o : hypass wsp[] */
- Word16 wght_flg /* input : is weighting function used */
- );
+ Word16 wsp[], /* input : signal used to compute the open loop pitch */
+ /* wsp[-pit_max] to wsp[-1] should be known */
+ Word16 L_min, /* input : minimum pitch lag */
+ Word16 L_max, /* input : maximum pitch lag */
+ Word16 L_frame, /* input : length of frame to compute pitch */
+ Word16 L_0, /* input : old_ open-loop pitch */
+ Word16 * gain, /* output: normalize correlation of hp_wsp for the Lag */
+ Word16 * hp_wsp_mem, /* i:o : memory of the hypass filter for hp_wsp[] (lg=9) */
+ Word16 * old_hp_wsp, /* i:o : hypass wsp[] */
+ Word16 wght_flg /* input : is weighting function used */
+ );
Word16 Med_olag( /* output : median of 5 previous open-loop lags */
- Word16 prev_ol_lag, /* input : previous open-loop lag */
- Word16 old_ol_lag[5]
- );
+ Word16 prev_ol_lag, /* input : previous open-loop lag */
+ Word16 old_ol_lag[5]
+ );
void Hp_wsp(
- Word16 wsp[], /* i : wsp[] signal */
- Word16 hp_wsp[], /* o : hypass wsp[] */
- Word16 lg, /* i : lenght of signal */
- Word16 mem[] /* i/o : filter memory [9] */
- );
+ Word16 wsp[], /* i : wsp[] signal */
+ Word16 hp_wsp[], /* o : hypass wsp[] */
+ Word16 lg, /* i : lenght of signal */
+ Word16 mem[] /* i/o : filter memory [9] */
+ );
#endif //__P_MED_O_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h b/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h
index b5d5280..67140fc 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h
@@ -19,7 +19,7 @@
/*--------------------------------------------------------------------------*
* Q_PULSE.H *
*--------------------------------------------------------------------------*
- * Coding and decoding of algebraic codebook *
+ * Coding and decoding of algebraic codebook *
*--------------------------------------------------------------------------*/
#ifndef __Q_PULSE_H__
@@ -28,38 +28,38 @@
#include "typedef.h"
Word32 quant_1p_N1( /* (o) return (N+1) bits */
- Word16 pos, /* (i) position of the pulse */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos, /* (i) position of the pulse */
+ Word16 N); /* (i) number of bits for position */
Word32 quant_2p_2N1( /* (o) return (2*N)+1 bits */
- Word16 pos1, /* (i) position of the pulse 1 */
- Word16 pos2, /* (i) position of the pulse 2 */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos1, /* (i) position of the pulse 1 */
+ Word16 pos2, /* (i) position of the pulse 2 */
+ Word16 N); /* (i) number of bits for position */
Word32 quant_3p_3N1( /* (o) return (3*N)+1 bits */
- Word16 pos1, /* (i) position of the pulse 1 */
- Word16 pos2, /* (i) position of the pulse 2 */
- Word16 pos3, /* (i) position of the pulse 3 */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos1, /* (i) position of the pulse 1 */
+ Word16 pos2, /* (i) position of the pulse 2 */
+ Word16 pos3, /* (i) position of the pulse 3 */
+ Word16 N); /* (i) number of bits for position */
Word32 quant_4p_4N1( /* (o) return (4*N)+1 bits */
- Word16 pos1, /* (i) position of the pulse 1 */
- Word16 pos2, /* (i) position of the pulse 2 */
- Word16 pos3, /* (i) position of the pulse 3 */
- Word16 pos4, /* (i) position of the pulse 4 */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos1, /* (i) position of the pulse 1 */
+ Word16 pos2, /* (i) position of the pulse 2 */
+ Word16 pos3, /* (i) position of the pulse 3 */
+ Word16 pos4, /* (i) position of the pulse 4 */
+ Word16 N); /* (i) number of bits for position */
Word32 quant_4p_4N( /* (o) return 4*N bits */
- Word16 pos[], /* (i) position of the pulse 1..4 */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos[], /* (i) position of the pulse 1..4 */
+ Word16 N); /* (i) number of bits for position */
Word32 quant_5p_5N( /* (o) return 5*N bits */
- Word16 pos[], /* (i) position of the pulse 1..5 */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos[], /* (i) position of the pulse 1..5 */
+ Word16 N); /* (i) number of bits for position */
Word32 quant_6p_6N_2( /* (o) return (6*N)-2 bits */
- Word16 pos[], /* (i) position of the pulse 1..6 */
- Word16 N); /* (i) number of bits for position */
+ Word16 pos[], /* (i) position of the pulse 1..6 */
+ Word16 N); /* (i) number of bits for position */
#endif //__Q_PULSE_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/stream.h b/media/libstagefright/codecs/amrwbenc/inc/stream.h
index 4c1d0f0..ec1a700 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/stream.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/stream.h
@@ -17,7 +17,7 @@
/***********************************************************************
-File: stream.h
+File: stream.h
Contains: VOME API Buffer Operator Implement Header
@@ -28,16 +28,16 @@ Contains: VOME API Buffer Operator Implement Header
#include "voMem.h"
#define Frame_Maxsize 1024 * 2 //Work Buffer 10K
#define Frame_MaxByte 640 //AMR_WB Encoder one frame 320 samples = 640 Bytes
-#define MIN(a,b) ((a) < (b)? (a) : (b))
+#define MIN(a,b) ((a) < (b)? (a) : (b))
typedef struct{
- unsigned char *set_ptr;
- unsigned char *frame_ptr;
- unsigned char *frame_ptr_bk;
- int set_len;
- int framebuffer_len;
- int frame_storelen;
- int used_len;
+ unsigned char *set_ptr;
+ unsigned char *frame_ptr;
+ unsigned char *frame_ptr_bk;
+ int set_len;
+ int framebuffer_len;
+ int frame_storelen;
+ int used_len;
}FrameStream;
void voAWB_UpdateFrameBuffer(FrameStream *stream, VO_MEM_OPERATOR *pMemOP);
diff --git a/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h b/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h
index 6822f48..9a9af4f 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h
@@ -37,28 +37,28 @@
typedef struct
{
- Word16 bckr_est[COMPLEN]; /* background noise estimate */
- Word16 ave_level[COMPLEN]; /* averaged input components for stationary */
- /* estimation */
- Word16 old_level[COMPLEN]; /* input levels of the previous frame */
- Word16 sub_level[COMPLEN]; /* input levels calculated at the end of a frame (lookahead) */
- Word16 a_data5[F_5TH_CNT][2]; /* memory for the filter bank */
- Word16 a_data3[F_3TH_CNT]; /* memory for the filter bank */
+ Word16 bckr_est[COMPLEN]; /* background noise estimate */
+ Word16 ave_level[COMPLEN]; /* averaged input components for stationary */
+ /* estimation */
+ Word16 old_level[COMPLEN]; /* input levels of the previous frame */
+ Word16 sub_level[COMPLEN]; /* input levels calculated at the end of a frame (lookahead) */
+ Word16 a_data5[F_5TH_CNT][2]; /* memory for the filter bank */
+ Word16 a_data3[F_3TH_CNT]; /* memory for the filter bank */
- Word16 burst_count; /* counts length of a speech burst */
- Word16 hang_count; /* hangover counter */
- Word16 stat_count; /* stationary counter */
+ Word16 burst_count; /* counts length of a speech burst */
+ Word16 hang_count; /* hangover counter */
+ Word16 stat_count; /* stationary counter */
- /* Note that each of the following two variables holds 15 flags. Each flag reserves 1 bit of the
- * variable. The newest flag is in the bit 15 (assuming that LSB is bit 1 and MSB is bit 16). */
- Word16 vadreg; /* flags for intermediate VAD decisions */
- Word16 tone_flag; /* tone detection flags */
+ /* Note that each of the following two variables holds 15 flags. Each flag reserves 1 bit of the
+ * variable. The newest flag is in the bit 15 (assuming that LSB is bit 1 and MSB is bit 16). */
+ Word16 vadreg; /* flags for intermediate VAD decisions */
+ Word16 tone_flag; /* tone detection flags */
- Word16 sp_est_cnt; /* counter for speech level estimation */
- Word16 sp_max; /* maximum level */
- Word16 sp_max_cnt; /* counts frames that contains speech */
- Word16 speech_level; /* estimated speech level */
- Word32 prev_pow_sum; /* power of previous frame */
+ Word16 sp_est_cnt; /* counter for speech level estimation */
+ Word16 sp_max; /* maximum level */
+ Word16 sp_max_cnt; /* counts frames that contains speech */
+ Word16 speech_level; /* estimated speech level */
+ Word32 prev_pow_sum; /* power of previous frame */
} VadVars;
diff --git a/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h b/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h
index 04fd318..00b1779 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h
@@ -16,9 +16,9 @@
/*-------------------------------------------------------------------*
- * WB_VAD_C.H *
+ * WB_VAD_C.H *
*-------------------------------------------------------------------*
- * Constants for Voice Activity Detection. *
+ * Constants for Voice Activity Detection. *
*-------------------------------------------------------------------*/
#ifndef __WB_VAD_C_H__
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s
index 282db92..42ebc32 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s
@@ -99,6 +99,6 @@ LOOP:
LDMFD r13!, {r4 - r12, r15}
@ENDP
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s
index 4aa317e..3f060ff 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s
@@ -75,6 +75,6 @@ Dot_product12_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
index f23b5a0..9cad479 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
@@ -183,6 +183,6 @@ Filt_6k_7k_end:
Lable1:
.word fir_6k_7k-Lable1
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s
index 49bdc2b..ffedbde 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s
@@ -226,6 +226,6 @@ Norm_corr_asm_end:
ADD r13, r13, #voSTACK
LDMFD r13!, {r4 - r12, r15}
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s
index 3f4930c..9743b9e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s
@@ -221,6 +221,6 @@ Syn_filt_32_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s
index 71bb532..cd75179 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s
@@ -181,6 +181,6 @@ Convolve_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s
index 2d4c7cc..eedccc7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s
@@ -143,7 +143,7 @@ the_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
index deb7efc..60c2a47 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
@@ -45,7 +45,8 @@ pred_lt4_asm:
SUBLT r5, r5, #2 @x--
SUB r5, r5, #30 @x -= 15
RSB r4, r2, #3 @k = 3 - frac
- ADRL r8, Table
+ ADR r8, Table
+ NOP @space for fixed up relative address of ADR
LDR r6, [r8]
ADD r6, r8
MOV r8, r4, LSL #6
@@ -456,7 +457,7 @@ pred_lt4_end:
Table:
.word inter4_2-Table
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s
index 5ff0964..d71d790 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s
@@ -220,7 +220,7 @@ end:
LDMFD r13!, {r4 -r12,pc}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s
index b300224..e8802f5 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s
@@ -67,7 +67,7 @@ The_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s
index 0c287a4..2a1e0d7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s
@@ -233,6 +233,6 @@ Syn_filt_asm_end:
ADD r13, r13, #700
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s
index 1d5893f..91feea0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s
@@ -98,5 +98,5 @@ LOOP:
LDMFD r13!, {r4 - r12, r15}
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s
index 8230944..7149a49 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s
@@ -123,5 +123,5 @@ Dot_product12_end:
LDMFD r13!, {r4 - r12, r15}
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
index 8df0caa..e0f992f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
@@ -226,6 +226,6 @@ Filt_6k_7k_end:
Lable1:
.word fir_6k_7k-Lable1
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s
index 4263cd4..28e6d6c 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s
@@ -265,6 +265,6 @@ Norm_corr_asm_end:
ADD r13, r13, #voSTACK
LDMFD r13!, {r4 - r12, r15}
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s
index e786dde..9687431 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s
@@ -128,6 +128,6 @@ Syn_filt_32_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s
index 8efa9fb..9fb3a6e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s
@@ -174,5 +174,5 @@ Convolve_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s
index 8904289..a4deda3 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s
@@ -143,7 +143,7 @@ LOOPj2:
the_end:
LDMFD r13!, {r4 - r12, r15}
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
index 67be1ed..f8b634f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
@@ -99,5 +99,5 @@ pred_lt4_end:
Lable1:
.word inter4_2-Lable1
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s
index 394fa83..bc3d780 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s
@@ -122,6 +122,6 @@ Residu_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s
index e45daac..89c0572 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s
@@ -133,6 +133,6 @@ Scale_sig_asm_end:
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s
index 5731bdb..029560e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s
@@ -101,6 +101,6 @@ Syn_filt_asm_end:
ADD r13, r13, #700
LDMFD r13!, {r4 - r12, r15}
@ENDFUNC
- .END
+ .end
diff --git a/media/libstagefright/codecs/amrwbenc/src/autocorr.c b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
index 0b2ea89..3ea53f7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/autocorr.c
+++ b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
@@ -31,100 +31,100 @@
#define UNUSED(x) (void)(x)
void Autocorr(
- Word16 x[], /* (i) : Input signal */
- Word16 m, /* (i) : LPC order */
- Word16 r_h[], /* (o) Q15: Autocorrelations (msb) */
- Word16 r_l[] /* (o) : Autocorrelations (lsb) */
- )
+ Word16 x[], /* (i) : Input signal */
+ Word16 m, /* (i) : LPC order */
+ Word16 r_h[], /* (o) Q15: Autocorrelations (msb) */
+ Word16 r_l[] /* (o) : Autocorrelations (lsb) */
+ )
{
- Word32 i, norm, shift;
- Word16 y[L_WINDOW];
- Word32 L_sum, L_sum1, L_tmp, F_LEN;
- Word16 *p1,*p2,*p3;
- const Word16 *p4;
+ Word32 i, norm, shift;
+ Word16 y[L_WINDOW];
+ Word32 L_sum, L_sum1, L_tmp, F_LEN;
+ Word16 *p1,*p2,*p3;
+ const Word16 *p4;
UNUSED(m);
- /* Windowing of signal */
- p1 = x;
- p4 = vo_window;
- p3 = y;
-
- for (i = 0; i < L_WINDOW; i+=4)
- {
- *p3++ = vo_mult_r((*p1++), (*p4++));
- *p3++ = vo_mult_r((*p1++), (*p4++));
- *p3++ = vo_mult_r((*p1++), (*p4++));
- *p3++ = vo_mult_r((*p1++), (*p4++));
- }
-
- /* calculate energy of signal */
- L_sum = vo_L_deposit_h(16); /* sqrt(256), avoid overflow after rounding */
- for (i = 0; i < L_WINDOW; i++)
- {
- L_tmp = vo_L_mult(y[i], y[i]);
- L_tmp = (L_tmp >> 8);
- L_sum += L_tmp;
- }
-
- /* scale signal to avoid overflow in autocorrelation */
- norm = norm_l(L_sum);
- shift = 4 - (norm >> 1);
- if(shift > 0)
- {
- p1 = y;
- for (i = 0; i < L_WINDOW; i+=4)
- {
- *p1 = vo_shr_r(*p1, shift);
- p1++;
- *p1 = vo_shr_r(*p1, shift);
- p1++;
- *p1 = vo_shr_r(*p1, shift);
- p1++;
- *p1 = vo_shr_r(*p1, shift);
- p1++;
- }
- }
-
- /* Compute and normalize r[0] */
- L_sum = 1;
- for (i = 0; i < L_WINDOW; i+=4)
- {
- L_sum += vo_L_mult(y[i], y[i]);
- L_sum += vo_L_mult(y[i+1], y[i+1]);
- L_sum += vo_L_mult(y[i+2], y[i+2]);
- L_sum += vo_L_mult(y[i+3], y[i+3]);
- }
-
- norm = norm_l(L_sum);
- L_sum = (L_sum << norm);
-
- r_h[0] = L_sum >> 16;
- r_l[0] = (L_sum & 0xffff)>>1;
-
- /* Compute r[1] to r[m] */
- for (i = 1; i <= 8; i++)
- {
- L_sum1 = 0;
- L_sum = 0;
- F_LEN = (Word32)(L_WINDOW - 2*i);
- p1 = y;
- p2 = y + (2*i)-1;
- do{
- L_sum1 += *p1 * *p2++;
- L_sum += *p1++ * *p2;
- }while(--F_LEN!=0);
-
- L_sum1 += *p1 * *p2++;
-
- L_sum1 = L_sum1<<norm;
- L_sum = L_sum<<norm;
-
- r_h[(2*i)-1] = L_sum1 >> 15;
- r_l[(2*i)-1] = L_sum1 & 0x00007fff;
- r_h[(2*i)] = L_sum >> 15;
- r_l[(2*i)] = L_sum & 0x00007fff;
- }
- return;
+ /* Windowing of signal */
+ p1 = x;
+ p4 = vo_window;
+ p3 = y;
+
+ for (i = 0; i < L_WINDOW; i+=4)
+ {
+ *p3++ = vo_mult_r((*p1++), (*p4++));
+ *p3++ = vo_mult_r((*p1++), (*p4++));
+ *p3++ = vo_mult_r((*p1++), (*p4++));
+ *p3++ = vo_mult_r((*p1++), (*p4++));
+ }
+
+ /* calculate energy of signal */
+ L_sum = vo_L_deposit_h(16); /* sqrt(256), avoid overflow after rounding */
+ for (i = 0; i < L_WINDOW; i++)
+ {
+ L_tmp = vo_L_mult(y[i], y[i]);
+ L_tmp = (L_tmp >> 8);
+ L_sum += L_tmp;
+ }
+
+ /* scale signal to avoid overflow in autocorrelation */
+ norm = norm_l(L_sum);
+ shift = 4 - (norm >> 1);
+ if(shift > 0)
+ {
+ p1 = y;
+ for (i = 0; i < L_WINDOW; i+=4)
+ {
+ *p1 = vo_shr_r(*p1, shift);
+ p1++;
+ *p1 = vo_shr_r(*p1, shift);
+ p1++;
+ *p1 = vo_shr_r(*p1, shift);
+ p1++;
+ *p1 = vo_shr_r(*p1, shift);
+ p1++;
+ }
+ }
+
+ /* Compute and normalize r[0] */
+ L_sum = 1;
+ for (i = 0; i < L_WINDOW; i+=4)
+ {
+ L_sum += vo_L_mult(y[i], y[i]);
+ L_sum += vo_L_mult(y[i+1], y[i+1]);
+ L_sum += vo_L_mult(y[i+2], y[i+2]);
+ L_sum += vo_L_mult(y[i+3], y[i+3]);
+ }
+
+ norm = norm_l(L_sum);
+ L_sum = (L_sum << norm);
+
+ r_h[0] = L_sum >> 16;
+ r_l[0] = (L_sum & 0xffff)>>1;
+
+ /* Compute r[1] to r[m] */
+ for (i = 1; i <= 8; i++)
+ {
+ L_sum1 = 0;
+ L_sum = 0;
+ F_LEN = (Word32)(L_WINDOW - 2*i);
+ p1 = y;
+ p2 = y + (2*i)-1;
+ do{
+ L_sum1 += *p1 * *p2++;
+ L_sum += *p1++ * *p2;
+ }while(--F_LEN!=0);
+
+ L_sum1 += *p1 * *p2++;
+
+ L_sum1 = L_sum1<<norm;
+ L_sum = L_sum<<norm;
+
+ r_h[(2*i)-1] = L_sum1 >> 15;
+ r_l[(2*i)-1] = L_sum1 & 0x00007fff;
+ r_h[(2*i)] = L_sum >> 15;
+ r_l[(2*i)] = L_sum & 0x00007fff;
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/az_isp.c b/media/libstagefright/codecs/amrwbenc/src/az_isp.c
index 43db27a..d7074f0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/az_isp.c
+++ b/media/libstagefright/codecs/amrwbenc/src/az_isp.c
@@ -58,138 +58,138 @@
static __inline Word16 Chebps2(Word16 x, Word16 f[], Word32 n);
void Az_isp(
- Word16 a[], /* (i) Q12 : predictor coefficients */
- Word16 isp[], /* (o) Q15 : Immittance spectral pairs */
- Word16 old_isp[] /* (i) : old isp[] (in case not found M roots) */
- )
+ Word16 a[], /* (i) Q12 : predictor coefficients */
+ Word16 isp[], /* (o) Q15 : Immittance spectral pairs */
+ Word16 old_isp[] /* (i) : old isp[] (in case not found M roots) */
+ )
{
- Word32 i, j, nf, ip, order;
- Word16 xlow, ylow, xhigh, yhigh, xmid, ymid, xint;
- Word16 x, y, sign, exp;
- Word16 *coef;
- Word16 f1[NC + 1], f2[NC];
- Word32 t0;
- /*-------------------------------------------------------------*
- * find the sum and diff polynomials F1(z) and F2(z) *
- * F1(z) = [A(z) + z^M A(z^-1)] *
- * F2(z) = [A(z) - z^M A(z^-1)]/(1-z^-2) *
- * *
- * for (i=0; i<NC; i++) *
- * { *
- * f1[i] = a[i] + a[M-i]; *
- * f2[i] = a[i] - a[M-i]; *
- * } *
- * f1[NC] = 2.0*a[NC]; *
- * *
- * for (i=2; i<NC; i++) Divide by (1-z^-2) *
- * f2[i] += f2[i-2]; *
- *-------------------------------------------------------------*/
- for (i = 0; i < NC; i++)
- {
- t0 = a[i] << 15;
- f1[i] = vo_round(t0 + (a[M - i] << 15)); /* =(a[i]+a[M-i])/2 */
- f2[i] = vo_round(t0 - (a[M - i] << 15)); /* =(a[i]-a[M-i])/2 */
- }
- f1[NC] = a[NC];
- for (i = 2; i < NC; i++) /* Divide by (1-z^-2) */
- f2[i] = add1(f2[i], f2[i - 2]);
+ Word32 i, j, nf, ip, order;
+ Word16 xlow, ylow, xhigh, yhigh, xmid, ymid, xint;
+ Word16 x, y, sign, exp;
+ Word16 *coef;
+ Word16 f1[NC + 1], f2[NC];
+ Word32 t0;
+ /*-------------------------------------------------------------*
+ * find the sum and diff polynomials F1(z) and F2(z) *
+ * F1(z) = [A(z) + z^M A(z^-1)] *
+ * F2(z) = [A(z) - z^M A(z^-1)]/(1-z^-2) *
+ * *
+ * for (i=0; i<NC; i++) *
+ * { *
+ * f1[i] = a[i] + a[M-i]; *
+ * f2[i] = a[i] - a[M-i]; *
+ * } *
+ * f1[NC] = 2.0*a[NC]; *
+ * *
+ * for (i=2; i<NC; i++) Divide by (1-z^-2) *
+ * f2[i] += f2[i-2]; *
+ *-------------------------------------------------------------*/
+ for (i = 0; i < NC; i++)
+ {
+ t0 = a[i] << 15;
+ f1[i] = vo_round(t0 + (a[M - i] << 15)); /* =(a[i]+a[M-i])/2 */
+ f2[i] = vo_round(t0 - (a[M - i] << 15)); /* =(a[i]-a[M-i])/2 */
+ }
+ f1[NC] = a[NC];
+ for (i = 2; i < NC; i++) /* Divide by (1-z^-2) */
+ f2[i] = add1(f2[i], f2[i - 2]);
- /*---------------------------------------------------------------------*
- * Find the ISPs (roots of F1(z) and F2(z) ) using the *
- * Chebyshev polynomial evaluation. *
- * The roots of F1(z) and F2(z) are alternatively searched. *
- * We start by finding the first root of F1(z) then we switch *
- * to F2(z) then back to F1(z) and so on until all roots are found. *
- * *
- * - Evaluate Chebyshev pol. at grid points and check for sign change.*
- * - If sign change track the root by subdividing the interval *
- * 2 times and ckecking sign change. *
- *---------------------------------------------------------------------*/
- nf = 0; /* number of found frequencies */
- ip = 0; /* indicator for f1 or f2 */
- coef = f1;
- order = NC;
- xlow = vogrid[0];
- ylow = Chebps2(xlow, coef, order);
- j = 0;
- while ((nf < M - 1) && (j < GRID_POINTS))
- {
- j ++;
- xhigh = xlow;
- yhigh = ylow;
- xlow = vogrid[j];
- ylow = Chebps2(xlow, coef, order);
- if ((ylow * yhigh) <= (Word32) 0)
- {
- /* divide 2 times the interval */
- for (i = 0; i < 2; i++)
- {
- xmid = (xlow >> 1) + (xhigh >> 1); /* xmid = (xlow + xhigh)/2 */
- ymid = Chebps2(xmid, coef, order);
- if ((ylow * ymid) <= (Word32) 0)
- {
- yhigh = ymid;
- xhigh = xmid;
- } else
- {
- ylow = ymid;
- xlow = xmid;
- }
- }
- /*-------------------------------------------------------------*
- * Linear interpolation *
- * xint = xlow - ylow*(xhigh-xlow)/(yhigh-ylow); *
- *-------------------------------------------------------------*/
- x = xhigh - xlow;
- y = yhigh - ylow;
- if (y == 0)
- {
- xint = xlow;
- } else
- {
- sign = y;
- y = abs_s(y);
- exp = norm_s(y);
- y = y << exp;
- y = div_s((Word16) 16383, y);
- t0 = x * y;
- t0 = (t0 >> (19 - exp));
- y = vo_extract_l(t0); /* y= (xhigh-xlow)/(yhigh-ylow) in Q11 */
- if (sign < 0)
- y = -y;
- t0 = ylow * y; /* result in Q26 */
- t0 = (t0 >> 10); /* result in Q15 */
- xint = vo_sub(xlow, vo_extract_l(t0)); /* xint = xlow - ylow*y */
- }
- isp[nf] = xint;
- xlow = xint;
- nf++;
- if (ip == 0)
- {
- ip = 1;
- coef = f2;
- order = NC - 1;
- } else
- {
- ip = 0;
- coef = f1;
- order = NC;
- }
- ylow = Chebps2(xlow, coef, order);
- }
- }
- /* Check if M-1 roots found */
- if(nf < M - 1)
- {
- for (i = 0; i < M; i++)
- {
- isp[i] = old_isp[i];
- }
- } else
- {
- isp[M - 1] = a[M] << 3; /* From Q12 to Q15 with saturation */
- }
- return;
+ /*---------------------------------------------------------------------*
+ * Find the ISPs (roots of F1(z) and F2(z) ) using the *
+ * Chebyshev polynomial evaluation. *
+ * The roots of F1(z) and F2(z) are alternatively searched. *
+ * We start by finding the first root of F1(z) then we switch *
+ * to F2(z) then back to F1(z) and so on until all roots are found. *
+ * *
+ * - Evaluate Chebyshev pol. at grid points and check for sign change.*
+ * - If sign change track the root by subdividing the interval *
+ * 2 times and ckecking sign change. *
+ *---------------------------------------------------------------------*/
+ nf = 0; /* number of found frequencies */
+ ip = 0; /* indicator for f1 or f2 */
+ coef = f1;
+ order = NC;
+ xlow = vogrid[0];
+ ylow = Chebps2(xlow, coef, order);
+ j = 0;
+ while ((nf < M - 1) && (j < GRID_POINTS))
+ {
+ j ++;
+ xhigh = xlow;
+ yhigh = ylow;
+ xlow = vogrid[j];
+ ylow = Chebps2(xlow, coef, order);
+ if ((ylow * yhigh) <= (Word32) 0)
+ {
+ /* divide 2 times the interval */
+ for (i = 0; i < 2; i++)
+ {
+ xmid = (xlow >> 1) + (xhigh >> 1); /* xmid = (xlow + xhigh)/2 */
+ ymid = Chebps2(xmid, coef, order);
+ if ((ylow * ymid) <= (Word32) 0)
+ {
+ yhigh = ymid;
+ xhigh = xmid;
+ } else
+ {
+ ylow = ymid;
+ xlow = xmid;
+ }
+ }
+ /*-------------------------------------------------------------*
+ * Linear interpolation *
+ * xint = xlow - ylow*(xhigh-xlow)/(yhigh-ylow); *
+ *-------------------------------------------------------------*/
+ x = xhigh - xlow;
+ y = yhigh - ylow;
+ if (y == 0)
+ {
+ xint = xlow;
+ } else
+ {
+ sign = y;
+ y = abs_s(y);
+ exp = norm_s(y);
+ y = y << exp;
+ y = div_s((Word16) 16383, y);
+ t0 = x * y;
+ t0 = (t0 >> (19 - exp));
+ y = vo_extract_l(t0); /* y= (xhigh-xlow)/(yhigh-ylow) in Q11 */
+ if (sign < 0)
+ y = -y;
+ t0 = ylow * y; /* result in Q26 */
+ t0 = (t0 >> 10); /* result in Q15 */
+ xint = vo_sub(xlow, vo_extract_l(t0)); /* xint = xlow - ylow*y */
+ }
+ isp[nf] = xint;
+ xlow = xint;
+ nf++;
+ if (ip == 0)
+ {
+ ip = 1;
+ coef = f2;
+ order = NC - 1;
+ } else
+ {
+ ip = 0;
+ coef = f1;
+ order = NC;
+ }
+ ylow = Chebps2(xlow, coef, order);
+ }
+ }
+ /* Check if M-1 roots found */
+ if(nf < M - 1)
+ {
+ for (i = 0; i < M; i++)
+ {
+ isp[i] = old_isp[i];
+ }
+ } else
+ {
+ isp[M - 1] = a[M] << 3; /* From Q12 to Q15 with saturation */
+ }
+ return;
}
/*--------------------------------------------------------------*
@@ -213,55 +213,55 @@ void Az_isp(
static __inline Word16 Chebps2(Word16 x, Word16 f[], Word32 n)
{
- Word32 i, cheb;
- Word16 b0_h, b0_l, b1_h, b1_l, b2_h, b2_l;
- Word32 t0;
+ Word32 i, cheb;
+ Word16 b0_h, b0_l, b1_h, b1_l, b2_h, b2_l;
+ Word32 t0;
- /* Note: All computation are done in Q24. */
+ /* Note: All computation are done in Q24. */
- t0 = f[0] << 13;
- b2_h = t0 >> 16;
- b2_l = (t0 & 0xffff)>>1;
+ t0 = f[0] << 13;
+ b2_h = t0 >> 16;
+ b2_l = (t0 & 0xffff)>>1;
- t0 = ((b2_h * x)<<1) + (((b2_l * x)>>15)<<1);
- t0 <<= 1;
- t0 += (f[1] << 13); /* + f[1] in Q24 */
+ t0 = ((b2_h * x)<<1) + (((b2_l * x)>>15)<<1);
+ t0 <<= 1;
+ t0 += (f[1] << 13); /* + f[1] in Q24 */
- b1_h = t0 >> 16;
- b1_l = (t0 & 0xffff) >> 1;
+ b1_h = t0 >> 16;
+ b1_l = (t0 & 0xffff) >> 1;
- for (i = 2; i < n; i++)
- {
- t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
+ for (i = 2; i < n; i++)
+ {
+ t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
- t0 += (b2_h * (-16384))<<1;
- t0 += (f[i] << 12);
- t0 <<= 1;
- t0 -= (b2_l << 1); /* t0 = 2.0*x*b1 - b2 + f[i]; */
+ t0 += (b2_h * (-16384))<<1;
+ t0 += (f[i] << 12);
+ t0 <<= 1;
+ t0 -= (b2_l << 1); /* t0 = 2.0*x*b1 - b2 + f[i]; */
- b0_h = t0 >> 16;
- b0_l = (t0 & 0xffff) >> 1;
+ b0_h = t0 >> 16;
+ b0_l = (t0 & 0xffff) >> 1;
- b2_l = b1_l; /* b2 = b1; */
- b2_h = b1_h;
- b1_l = b0_l; /* b1 = b0; */
- b1_h = b0_h;
- }
+ b2_l = b1_l; /* b2 = b1; */
+ b2_h = b1_h;
+ b1_l = b0_l; /* b1 = b0; */
+ b1_h = b0_h;
+ }
- t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
- t0 += (b2_h * (-32768))<<1; /* t0 = x*b1 - b2 */
- t0 -= (b2_l << 1);
- t0 += (f[n] << 12); /* t0 = x*b1 - b2 + f[i]/2 */
+ t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
+ t0 += (b2_h * (-32768))<<1; /* t0 = x*b1 - b2 */
+ t0 -= (b2_l << 1);
+ t0 += (f[n] << 12); /* t0 = x*b1 - b2 + f[i]/2 */
- t0 = L_shl2(t0, 6); /* Q24 to Q30 with saturation */
+ t0 = L_shl2(t0, 6); /* Q24 to Q30 with saturation */
- cheb = extract_h(t0); /* Result in Q14 */
+ cheb = extract_h(t0); /* Result in Q14 */
- if (cheb == -32768)
- {
- cheb = -32767; /* to avoid saturation in Az_isp */
- }
- return (cheb);
+ if (cheb == -32768)
+ {
+ cheb = -32767; /* to avoid saturation in Az_isp */
+ }
+ return (cheb);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/bits.c b/media/libstagefright/codecs/amrwbenc/src/bits.c
index e78dc1f..6b8bddd 100644
--- a/media/libstagefright/codecs/amrwbenc/src/bits.c
+++ b/media/libstagefright/codecs/amrwbenc/src/bits.c
@@ -17,7 +17,7 @@
/***********************************************************************
File: bits.c
- Description: Performs bit stream manipulation
+ Description: Performs bit stream manipulation
************************************************************************/
@@ -33,151 +33,151 @@
int PackBits(Word16 prms[], /* i: analysis parameters */
- Word16 coding_mode, /* i: coding bit-stream ratio mode */
- Word16 mode, /* i: coding bit-stream ratio mode*/
- Coder_State *st /*i/o: coder global parameters struct */
- )
+ Word16 coding_mode, /* i: coding bit-stream ratio mode */
+ Word16 mode, /* i: coding bit-stream ratio mode*/
+ Coder_State *st /*i/o: coder global parameters struct */
+ )
{
- Word16 i, frame_type;
- UWord8 temp;
- UWord8 *stream_ptr;
- Word16 bitstreamformat = st->frameType;
-
- unsigned short* dataOut = st->outputStream;
-
- if (coding_mode == MRDTX)
- {
- st->sid_update_counter--;
-
- if (st->prev_ft == TX_SPEECH)
- {
- frame_type = TX_SID_FIRST;
- st->sid_update_counter = 3;
- } else
- {
- if ((st->sid_handover_debt > 0) && (st->sid_update_counter > 2))
- {
- /* ensure extra updates are properly delayed after a possible SID_FIRST */
- frame_type = TX_SID_UPDATE;
- st->sid_handover_debt--;
- } else
- {
- if (st->sid_update_counter == 0)
- {
- frame_type = TX_SID_UPDATE;
- st->sid_update_counter = 8;
- } else
- {
- frame_type = TX_NO_DATA;
- }
- }
- }
- } else
- {
- st->sid_update_counter = 8;
- frame_type = TX_SPEECH;
- }
- st->prev_ft = frame_type;
-
- if(bitstreamformat == 0) /* default file format */
- {
- *(dataOut) = TX_FRAME_TYPE;
- *(dataOut + 1) = frame_type;
- *(dataOut + 2) = mode;
- for (i = 0; i < nb_of_bits[coding_mode]; i++)
- {
- *(dataOut + 3 + i) = prms[i];
- }
- return (3 + nb_of_bits[coding_mode])<<1;
- } else
- {
- if (bitstreamformat == 1) /* ITU file format */
- {
- *(dataOut) = 0x6b21;
- if(frame_type != TX_NO_DATA && frame_type != TX_SID_FIRST)
- {
- *(dataOut + 1) = nb_of_bits[coding_mode];
- for (i = 0; i < nb_of_bits[coding_mode]; i++)
- {
- if(prms[i] == BIT_0){
- *(dataOut + 2 + i) = BIT_0_ITU;
- }
- else{
- *(dataOut + 2 + i) = BIT_1_ITU;
- }
- }
- return (2 + nb_of_bits[coding_mode])<<1;
- } else
- {
- *(dataOut + 1) = 0;
- return 2<<1;
- }
- } else /* MIME/storage file format */
- {
+ Word16 i, frame_type;
+ UWord8 temp;
+ UWord8 *stream_ptr;
+ Word16 bitstreamformat = st->frameType;
+
+ unsigned short* dataOut = st->outputStream;
+
+ if (coding_mode == MRDTX)
+ {
+ st->sid_update_counter--;
+
+ if (st->prev_ft == TX_SPEECH)
+ {
+ frame_type = TX_SID_FIRST;
+ st->sid_update_counter = 3;
+ } else
+ {
+ if ((st->sid_handover_debt > 0) && (st->sid_update_counter > 2))
+ {
+ /* ensure extra updates are properly delayed after a possible SID_FIRST */
+ frame_type = TX_SID_UPDATE;
+ st->sid_handover_debt--;
+ } else
+ {
+ if (st->sid_update_counter == 0)
+ {
+ frame_type = TX_SID_UPDATE;
+ st->sid_update_counter = 8;
+ } else
+ {
+ frame_type = TX_NO_DATA;
+ }
+ }
+ }
+ } else
+ {
+ st->sid_update_counter = 8;
+ frame_type = TX_SPEECH;
+ }
+ st->prev_ft = frame_type;
+
+ if(bitstreamformat == 0) /* default file format */
+ {
+ *(dataOut) = TX_FRAME_TYPE;
+ *(dataOut + 1) = frame_type;
+ *(dataOut + 2) = mode;
+ for (i = 0; i < nb_of_bits[coding_mode]; i++)
+ {
+ *(dataOut + 3 + i) = prms[i];
+ }
+ return (3 + nb_of_bits[coding_mode])<<1;
+ } else
+ {
+ if (bitstreamformat == 1) /* ITU file format */
+ {
+ *(dataOut) = 0x6b21;
+ if(frame_type != TX_NO_DATA && frame_type != TX_SID_FIRST)
+ {
+ *(dataOut + 1) = nb_of_bits[coding_mode];
+ for (i = 0; i < nb_of_bits[coding_mode]; i++)
+ {
+ if(prms[i] == BIT_0){
+ *(dataOut + 2 + i) = BIT_0_ITU;
+ }
+ else{
+ *(dataOut + 2 + i) = BIT_1_ITU;
+ }
+ }
+ return (2 + nb_of_bits[coding_mode])<<1;
+ } else
+ {
+ *(dataOut + 1) = 0;
+ return 2<<1;
+ }
+ } else /* MIME/storage file format */
+ {
#define MRSID 9
- /* change mode index in case of SID frame */
- if (coding_mode == MRDTX)
- {
- coding_mode = MRSID;
- if (frame_type == TX_SID_FIRST)
- {
- for (i = 0; i < NBBITS_SID; i++) prms[i] = BIT_0;
- }
- }
- /* -> force NO_DATA frame */
- if (coding_mode < 0 || coding_mode > 15 || (coding_mode > MRSID && coding_mode < 14))
- {
- coding_mode = 15;
- }
- /* mark empty frames between SID updates as NO_DATA frames */
- if (coding_mode == MRSID && frame_type == TX_NO_DATA)
- {
- coding_mode = 15;
- }
- /* set pointer for packed frame, note that we handle data as bytes */
- stream_ptr = (UWord8*)dataOut;
- /* insert table of contents (ToC) byte at the beginning of the packet */
- *stream_ptr = toc_byte[coding_mode];
- stream_ptr++;
- temp = 0;
- /* sort and pack AMR-WB speech or SID bits */
- for (i = 1; i < unpacked_size[coding_mode] + 1; i++)
- {
- if (prms[sort_ptr[coding_mode][i-1]] == BIT_1)
- {
- temp++;
- }
- if (i&0x7)
- {
- temp <<= 1;
- }
- else
- {
- *stream_ptr = temp;
- stream_ptr++;
- temp = 0;
- }
- }
- /* insert SID type indication and speech mode in case of SID frame */
- if (coding_mode == MRSID)
- {
- if (frame_type == TX_SID_UPDATE)
- {
- temp++;
- }
- temp <<= 4;
- temp += mode & 0x000F;
- }
- /* insert unused bits (zeros) at the tail of the last byte */
- if (unused_size[coding_mode])
- {
- temp <<= (unused_size[coding_mode] - 1);
- }
- *stream_ptr = temp;
- /* write packed frame into file (1 byte added to cover ToC entry) */
- return (1 + packed_size[coding_mode]);
- }
- }
+ /* change mode index in case of SID frame */
+ if (coding_mode == MRDTX)
+ {
+ coding_mode = MRSID;
+ if (frame_type == TX_SID_FIRST)
+ {
+ for (i = 0; i < NBBITS_SID; i++) prms[i] = BIT_0;
+ }
+ }
+ /* -> force NO_DATA frame */
+ if (coding_mode < 0 || coding_mode > 15 || (coding_mode > MRSID && coding_mode < 14))
+ {
+ coding_mode = 15;
+ }
+ /* mark empty frames between SID updates as NO_DATA frames */
+ if (coding_mode == MRSID && frame_type == TX_NO_DATA)
+ {
+ coding_mode = 15;
+ }
+ /* set pointer for packed frame, note that we handle data as bytes */
+ stream_ptr = (UWord8*)dataOut;
+ /* insert table of contents (ToC) byte at the beginning of the packet */
+ *stream_ptr = toc_byte[coding_mode];
+ stream_ptr++;
+ temp = 0;
+ /* sort and pack AMR-WB speech or SID bits */
+ for (i = 1; i < unpacked_size[coding_mode] + 1; i++)
+ {
+ if (prms[sort_ptr[coding_mode][i-1]] == BIT_1)
+ {
+ temp++;
+ }
+ if (i&0x7)
+ {
+ temp <<= 1;
+ }
+ else
+ {
+ *stream_ptr = temp;
+ stream_ptr++;
+ temp = 0;
+ }
+ }
+ /* insert SID type indication and speech mode in case of SID frame */
+ if (coding_mode == MRSID)
+ {
+ if (frame_type == TX_SID_UPDATE)
+ {
+ temp++;
+ }
+ temp <<= 4;
+ temp += mode & 0x000F;
+ }
+ /* insert unused bits (zeros) at the tail of the last byte */
+ if (unused_size[coding_mode])
+ {
+ temp <<= (unused_size[coding_mode] - 1);
+ }
+ *stream_ptr = temp;
+ /* write packed frame into file (1 byte added to cover ToC entry) */
+ return (1 + packed_size[coding_mode]);
+ }
+ }
}
/*-----------------------------------------------------*
@@ -185,24 +185,24 @@ int PackBits(Word16 prms[], /* i: analysis parameters */
*-----------------------------------------------------*/
void Parm_serial(
- Word16 value, /* input : parameter value */
- Word16 no_of_bits, /* input : number of bits */
- Word16 ** prms
- )
+ Word16 value, /* input : parameter value */
+ Word16 no_of_bits, /* input : number of bits */
+ Word16 ** prms
+ )
{
- Word16 i, bit;
- *prms += no_of_bits;
- for (i = 0; i < no_of_bits; i++)
- {
- bit = (Word16) (value & 0x0001); /* get lsb */
- if (bit == 0)
- *--(*prms) = BIT_0;
- else
- *--(*prms) = BIT_1;
- value >>= 1;
- }
- *prms += no_of_bits;
- return;
+ Word16 i, bit;
+ *prms += no_of_bits;
+ for (i = 0; i < no_of_bits; i++)
+ {
+ bit = (Word16) (value & 0x0001); /* get lsb */
+ if (bit == 0)
+ *--(*prms) = BIT_0;
+ else
+ *--(*prms) = BIT_1;
+ value >>= 1;
+ }
+ *prms += no_of_bits;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
index 18698e2..dbb94c6 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
@@ -17,7 +17,7 @@
/************************************************************************
* File: c2t64fx.c *
* *
-* Description:Performs algebraic codebook search for 6.60kbits mode*
+* Description:Performs algebraic codebook search for 6.60kbits mode*
* *
*************************************************************************/
@@ -45,252 +45,255 @@
**************************************************************************/
void ACELP_2t64_fx(
- Word16 dn[], /* (i) <12b : correlation between target x[] and H[] */
- Word16 cn[], /* (i) <12b : residual after long term prediction */
- Word16 H[], /* (i) Q12: impulse response of weighted synthesis filter */
- Word16 code[], /* (o) Q9 : algebraic (fixed) codebook excitation */
- Word16 y[], /* (o) Q9 : filtered fixed codebook excitation */
- Word16 * index /* (o) : index (12): 5+1+5+1 = 11 bits. */
- )
+ Word16 dn[], /* (i) <12b : correlation between target x[] and H[] */
+ Word16 cn[], /* (i) <12b : residual after long term prediction */
+ Word16 H[], /* (i) Q12: impulse response of weighted synthesis filter */
+ Word16 code[], /* (o) Q9 : algebraic (fixed) codebook excitation */
+ Word16 y[], /* (o) Q9 : filtered fixed codebook excitation */
+ Word16 * index /* (o) : index (12): 5+1+5+1 = 11 bits. */
+ )
{
- Word32 i, j, k, i0, i1, ix, iy, pos, pos2;
- Word16 ps, psk, ps1, ps2, alpk, alp1, alp2, sq;
- Word16 alp, val, exp, k_cn, k_dn;
- Word16 *p0, *p1, *p2, *psign;
- Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf;
+ Word32 i, j, k, i0, i1, ix, iy, pos, pos2;
+ Word16 ps, psk, ps1, ps2, alpk, alp1, alp2, sq;
+ Word16 alp, val, exp, k_cn, k_dn;
+ Word16 *p0, *p1, *p2, *psign;
+ Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf;
- Word16 sign[L_SUBFR], vec[L_SUBFR], dn2[L_SUBFR];
- Word16 h_buf[4 * L_SUBFR] = {0};
- Word16 rrixix[NB_TRACK][NB_POS];
- Word16 rrixiy[MSIZE];
- Word32 s, cor;
+ Word16 sign[L_SUBFR], vec[L_SUBFR], dn2[L_SUBFR];
+ Word16 h_buf[4 * L_SUBFR] = {0};
+ Word16 rrixix[NB_TRACK][NB_POS];
+ Word16 rrixiy[MSIZE];
+ Word32 s, cor;
- /*----------------------------------------------------------------*
- * Find sign for each pulse position. *
- *----------------------------------------------------------------*/
- alp = 8192; /* alp = 2.0 (Q12) */
+ /*----------------------------------------------------------------*
+ * Find sign for each pulse position. *
+ *----------------------------------------------------------------*/
+ alp = 8192; /* alp = 2.0 (Q12) */
- /* calculate energy for normalization of cn[] and dn[] */
- /* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
+ /* calculate energy for normalization of cn[] and dn[] */
+ /* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
#ifdef ASM_OPT /* asm optimization branch */
- s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
+ s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
#else
- s = Dot_product12(cn, cn, L_SUBFR, &exp);
+ s = Dot_product12(cn, cn, L_SUBFR, &exp);
#endif
- Isqrt_n(&s, &exp);
- s = L_shl(s, add1(exp, 5));
- k_cn = vo_round(s);
+ Isqrt_n(&s, &exp);
+ s = L_shl(s, add1(exp, 5));
+ if (s > INT_MAX - 0x8000) {
+ s = INT_MAX - 0x8000;
+ }
+ k_cn = vo_round(s);
- /* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
+ /* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
#ifdef ASM_OPT /* asm optimization branch */
- s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
+ s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
#else
- s = Dot_product12(dn, dn, L_SUBFR, &exp);
+ s = Dot_product12(dn, dn, L_SUBFR, &exp);
#endif
- Isqrt_n(&s, &exp);
- k_dn = vo_round(L_shl(s, (exp + 8))); /* k_dn = 256..4096 */
- k_dn = vo_mult_r(alp, k_dn); /* alp in Q12 */
+ Isqrt_n(&s, &exp);
+ k_dn = voround(L_shl(s, (exp + 8))); /* k_dn = 256..4096 */
+ k_dn = vo_mult_r(alp, k_dn); /* alp in Q12 */
- /* mix normalized cn[] and dn[] */
- p0 = cn;
- p1 = dn;
- p2 = dn2;
+ /* mix normalized cn[] and dn[] */
+ p0 = cn;
+ p1 = dn;
+ p2 = dn2;
- for (i = 0; i < L_SUBFR/4; i++)
- {
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- }
+ for (i = 0; i < L_SUBFR/4; i++)
+ {
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ }
- /* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[] */
- for (i = 0; i < L_SUBFR; i ++)
- {
- val = dn[i];
- ps = dn2[i];
- if (ps >= 0)
- {
- sign[i] = 32767; /* sign = +1 (Q12) */
- vec[i] = -32768;
- } else
- {
- sign[i] = -32768; /* sign = -1 (Q12) */
- vec[i] = 32767;
- dn[i] = -val;
- }
- }
- /*------------------------------------------------------------*
- * Compute h_inv[i]. *
- *------------------------------------------------------------*/
- /* impulse response buffer for fast computation */
- h = h_buf + L_SUBFR;
- h_inv = h + (L_SUBFR<<1);
+ /* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[] */
+ for (i = 0; i < L_SUBFR; i ++)
+ {
+ val = dn[i];
+ ps = dn2[i];
+ if (ps >= 0)
+ {
+ sign[i] = 32767; /* sign = +1 (Q12) */
+ vec[i] = -32768;
+ } else
+ {
+ sign[i] = -32768; /* sign = -1 (Q12) */
+ vec[i] = 32767;
+ dn[i] = -val;
+ }
+ }
+ /*------------------------------------------------------------*
+ * Compute h_inv[i]. *
+ *------------------------------------------------------------*/
+ /* impulse response buffer for fast computation */
+ h = h_buf + L_SUBFR;
+ h_inv = h + (L_SUBFR<<1);
- for (i = 0; i < L_SUBFR; i++)
- {
- h[i] = H[i];
- h_inv[i] = vo_negate(h[i]);
- }
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ h[i] = H[i];
+ h_inv[i] = vo_negate(h[i]);
+ }
- /*------------------------------------------------------------*
- * Compute rrixix[][] needed for the codebook search. *
- * Result is multiplied by 0.5 *
- *------------------------------------------------------------*/
- /* Init pointers to last position of rrixix[] */
- p0 = &rrixix[0][NB_POS - 1];
- p1 = &rrixix[1][NB_POS - 1];
+ /*------------------------------------------------------------*
+ * Compute rrixix[][] needed for the codebook search. *
+ * Result is multiplied by 0.5 *
+ *------------------------------------------------------------*/
+ /* Init pointers to last position of rrixix[] */
+ p0 = &rrixix[0][NB_POS - 1];
+ p1 = &rrixix[1][NB_POS - 1];
- ptr_h1 = h;
- cor = 0x00010000L; /* for rounding */
- for (i = 0; i < NB_POS; i++)
- {
- cor += ((*ptr_h1) * (*ptr_h1) << 1);
- ptr_h1++;
- *p1-- = (extract_h(cor) >> 1);
- cor += ((*ptr_h1) * (*ptr_h1) << 1);
- ptr_h1++;
- *p0-- = (extract_h(cor) >> 1);
- }
+ ptr_h1 = h;
+ cor = 0x00010000L; /* for rounding */
+ for (i = 0; i < NB_POS; i++)
+ {
+ cor += ((*ptr_h1) * (*ptr_h1) << 1);
+ ptr_h1++;
+ *p1-- = (extract_h(cor) >> 1);
+ cor += ((*ptr_h1) * (*ptr_h1) << 1);
+ ptr_h1++;
+ *p0-- = (extract_h(cor) >> 1);
+ }
- /*------------------------------------------------------------*
- * Compute rrixiy[][] needed for the codebook search. *
- *------------------------------------------------------------*/
- pos = MSIZE - 1;
- pos2 = MSIZE - 2;
- ptr_hf = h + 1;
+ /*------------------------------------------------------------*
+ * Compute rrixiy[][] needed for the codebook search. *
+ *------------------------------------------------------------*/
+ pos = MSIZE - 1;
+ pos2 = MSIZE - 2;
+ ptr_hf = h + 1;
- for (k = 0; k < NB_POS; k++)
- {
- p1 = &rrixiy[pos];
- p0 = &rrixiy[pos2];
- cor = 0x00008000L; /* for rounding */
- ptr_h1 = h;
- ptr_h2 = ptr_hf;
+ for (k = 0; k < NB_POS; k++)
+ {
+ p1 = &rrixiy[pos];
+ p0 = &rrixiy[pos2];
+ cor = 0x00008000L; /* for rounding */
+ ptr_h1 = h;
+ ptr_h2 = ptr_hf;
- for (i = (k + 1); i < NB_POS; i++)
- {
- cor += ((*ptr_h1) * (*ptr_h2))<<1;
- ptr_h1++;
- ptr_h2++;
- *p1 = extract_h(cor);
- cor += ((*ptr_h1) * (*ptr_h2))<<1;
- ptr_h1++;
- ptr_h2++;
- *p0 = extract_h(cor);
+ for (i = (k + 1); i < NB_POS; i++)
+ {
+ cor += ((*ptr_h1) * (*ptr_h2))<<1;
+ ptr_h1++;
+ ptr_h2++;
+ *p1 = extract_h(cor);
+ cor += ((*ptr_h1) * (*ptr_h2))<<1;
+ ptr_h1++;
+ ptr_h2++;
+ *p0 = extract_h(cor);
- p1 -= (NB_POS + 1);
- p0 -= (NB_POS + 1);
- }
- cor += ((*ptr_h1) * (*ptr_h2))<<1;
- ptr_h1++;
- ptr_h2++;
- *p1 = extract_h(cor);
+ p1 -= (NB_POS + 1);
+ p0 -= (NB_POS + 1);
+ }
+ cor += ((*ptr_h1) * (*ptr_h2))<<1;
+ ptr_h1++;
+ ptr_h2++;
+ *p1 = extract_h(cor);
- pos -= NB_POS;
- pos2--;
- ptr_hf += STEP;
- }
+ pos -= NB_POS;
+ pos2--;
+ ptr_hf += STEP;
+ }
- /*------------------------------------------------------------*
- * Modification of rrixiy[][] to take signs into account. *
- *------------------------------------------------------------*/
- p0 = rrixiy;
- for (i = 0; i < L_SUBFR; i += STEP)
- {
- psign = sign;
- if (psign[i] < 0)
- {
- psign = vec;
- }
- for (j = 1; j < L_SUBFR; j += STEP)
- {
- *p0 = vo_mult(*p0, psign[j]);
- p0++;
- }
- }
- /*-------------------------------------------------------------------*
- * search 2 pulses: *
- * ~@~~~~~~~~~~~~~~ *
- * 32 pos x 32 pos = 1024 tests (all combinaisons is tested) *
- *-------------------------------------------------------------------*/
- p0 = rrixix[0];
- p1 = rrixix[1];
- p2 = rrixiy;
+ /*------------------------------------------------------------*
+ * Modification of rrixiy[][] to take signs into account. *
+ *------------------------------------------------------------*/
+ p0 = rrixiy;
+ for (i = 0; i < L_SUBFR; i += STEP)
+ {
+ psign = sign;
+ if (psign[i] < 0)
+ {
+ psign = vec;
+ }
+ for (j = 1; j < L_SUBFR; j += STEP)
+ {
+ *p0 = vo_mult(*p0, psign[j]);
+ p0++;
+ }
+ }
+ /*-------------------------------------------------------------------*
+ * search 2 pulses: *
+ * ~@~~~~~~~~~~~~~~ *
+ * 32 pos x 32 pos = 1024 tests (all combinaisons is tested) *
+ *-------------------------------------------------------------------*/
+ p0 = rrixix[0];
+ p1 = rrixix[1];
+ p2 = rrixiy;
- psk = -1;
- alpk = 1;
- ix = 0;
- iy = 1;
+ psk = -1;
+ alpk = 1;
+ ix = 0;
+ iy = 1;
- for (i0 = 0; i0 < L_SUBFR; i0 += STEP)
- {
- ps1 = dn[i0];
- alp1 = (*p0++);
- pos = -1;
- for (i1 = 1; i1 < L_SUBFR; i1 += STEP)
- {
- ps2 = add1(ps1, dn[i1]);
- alp2 = add1(alp1, add1(*p1++, *p2++));
- sq = vo_mult(ps2, ps2);
- s = vo_L_mult(alpk, sq) - ((psk * alp2)<<1);
- if (s > 0)
- {
- psk = sq;
- alpk = alp2;
- pos = i1;
- }
- }
- p1 -= NB_POS;
- if (pos >= 0)
- {
- ix = i0;
- iy = pos;
- }
- }
- /*-------------------------------------------------------------------*
- * Build the codeword, the filtered codeword and index of codevector.*
- *-------------------------------------------------------------------*/
+ for (i0 = 0; i0 < L_SUBFR; i0 += STEP)
+ {
+ ps1 = dn[i0];
+ alp1 = (*p0++);
+ pos = -1;
+ for (i1 = 1; i1 < L_SUBFR; i1 += STEP)
+ {
+ ps2 = add1(ps1, dn[i1]);
+ alp2 = add1(alp1, add1(*p1++, *p2++));
+ sq = vo_mult(ps2, ps2);
+ s = vo_L_mult(alpk, sq) - ((psk * alp2)<<1);
+ if (s > 0)
+ {
+ psk = sq;
+ alpk = alp2;
+ pos = i1;
+ }
+ }
+ p1 -= NB_POS;
+ if (pos >= 0)
+ {
+ ix = i0;
+ iy = pos;
+ }
+ }
+ /*-------------------------------------------------------------------*
+ * Build the codeword, the filtered codeword and index of codevector.*
+ *-------------------------------------------------------------------*/
- for (i = 0; i < L_SUBFR; i++)
- {
- code[i] = 0;
- }
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ code[i] = 0;
+ }
- i0 = (ix >> 1); /* pos of pulse 1 (0..31) */
- i1 = (iy >> 1); /* pos of pulse 2 (0..31) */
- if (sign[ix] > 0)
- {
- code[ix] = 512; /* codeword in Q9 format */
- p0 = h - ix;
- } else
- {
- code[ix] = -512;
- i0 += NB_POS;
- p0 = h_inv - ix;
- }
- if (sign[iy] > 0)
- {
- code[iy] = 512;
- p1 = h - iy;
- } else
- {
- code[iy] = -512;
- i1 += NB_POS;
- p1 = h_inv - iy;
- }
- *index = add1((i0 << 6), i1);
- for (i = 0; i < L_SUBFR; i++)
- {
- y[i] = vo_shr_r(add1((*p0++), (*p1++)), 3);
- }
- return;
+ i0 = (ix >> 1); /* pos of pulse 1 (0..31) */
+ i1 = (iy >> 1); /* pos of pulse 2 (0..31) */
+ if (sign[ix] > 0)
+ {
+ code[ix] = 512; /* codeword in Q9 format */
+ p0 = h - ix;
+ } else
+ {
+ code[ix] = -512;
+ i0 += NB_POS;
+ p0 = h_inv - ix;
+ }
+ if (sign[iy] > 0)
+ {
+ code[iy] = 512;
+ p1 = h - iy;
+ } else
+ {
+ code[iy] = -512;
+ i1 += NB_POS;
+ p1 = h_inv - iy;
+ }
+ *index = add1((i0 << 6), i1);
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ y[i] = vo_shr_r(add1((*p0++), (*p1++)), 3);
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
index 1ecc11f..49a89a1 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
@@ -17,7 +17,7 @@
/***********************************************************************
* File: c4t64fx.c *
* *
-* Description:Performs algebraic codebook search for higher modes *
+* Description:Performs algebraic codebook search for higher modes *
* *
************************************************************************/
@@ -48,15 +48,15 @@
#include "q_pulse.h"
static Word16 tipos[36] = {
- 0, 1, 2, 3, /* starting point &ipos[0], 1st iter */
- 1, 2, 3, 0, /* starting point &ipos[4], 2nd iter */
- 2, 3, 0, 1, /* starting point &ipos[8], 3rd iter */
- 3, 0, 1, 2, /* starting point &ipos[12], 4th iter */
- 0, 1, 2, 3,
- 1, 2, 3, 0,
- 2, 3, 0, 1,
- 3, 0, 1, 2,
- 0, 1, 2, 3}; /* end point for 24 pulses &ipos[35], 4th iter */
+ 0, 1, 2, 3, /* starting point &ipos[0], 1st iter */
+ 1, 2, 3, 0, /* starting point &ipos[4], 2nd iter */
+ 2, 3, 0, 1, /* starting point &ipos[8], 3rd iter */
+ 3, 0, 1, 2, /* starting point &ipos[12], 4th iter */
+ 0, 1, 2, 3,
+ 1, 2, 3, 0,
+ 2, 3, 0, 1,
+ 3, 0, 1, 2,
+ 0, 1, 2, 3}; /* end point for 24 pulses &ipos[35], 4th iter */
#define NB_PULSE_MAX 24
@@ -70,751 +70,759 @@ static Word16 tipos[36] = {
/* Private functions */
void cor_h_vec_012(
- Word16 h[], /* (i) scaled impulse response */
- Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
- Word16 track, /* (i) track to use */
- Word16 sign[], /* (i) sign vector */
- Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
- Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
- Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
- );
+ Word16 h[], /* (i) scaled impulse response */
+ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
+ Word16 track, /* (i) track to use */
+ Word16 sign[], /* (i) sign vector */
+ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
+ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
+ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
+ );
void cor_h_vec_012_asm(
- Word16 h[], /* (i) scaled impulse response */
- Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
- Word16 track, /* (i) track to use */
- Word16 sign[], /* (i) sign vector */
- Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
- Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
- Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
- );
+ Word16 h[], /* (i) scaled impulse response */
+ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
+ Word16 track, /* (i) track to use */
+ Word16 sign[], /* (i) sign vector */
+ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
+ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
+ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
+ );
void cor_h_vec_30(
- Word16 h[], /* (i) scaled impulse response */
- Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
- Word16 track, /* (i) track to use */
- Word16 sign[], /* (i) sign vector */
- Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
- Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
- Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
- );
+ Word16 h[], /* (i) scaled impulse response */
+ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
+ Word16 track, /* (i) track to use */
+ Word16 sign[], /* (i) sign vector */
+ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
+ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
+ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
+ );
void search_ixiy(
- Word16 nb_pos_ix, /* (i) nb of pos for pulse 1 (1..8) */
- Word16 track_x, /* (i) track of pulse 1 */
- Word16 track_y, /* (i) track of pulse 2 */
- Word16 * ps, /* (i/o) correlation of all fixed pulses */
- Word16 * alp, /* (i/o) energy of all fixed pulses */
- Word16 * ix, /* (o) position of pulse 1 */
- Word16 * iy, /* (o) position of pulse 2 */
- Word16 dn[], /* (i) corr. between target and h[] */
- Word16 dn2[], /* (i) vector of selected positions */
- Word16 cor_x[], /* (i) corr. of pulse 1 with fixed pulses */
- Word16 cor_y[], /* (i) corr. of pulse 2 with fixed pulses */
- Word16 rrixiy[][MSIZE] /* (i) corr. of pulse 1 with pulse 2 */
- );
+ Word16 nb_pos_ix, /* (i) nb of pos for pulse 1 (1..8) */
+ Word16 track_x, /* (i) track of pulse 1 */
+ Word16 track_y, /* (i) track of pulse 2 */
+ Word16 * ps, /* (i/o) correlation of all fixed pulses */
+ Word16 * alp, /* (i/o) energy of all fixed pulses */
+ Word16 * ix, /* (o) position of pulse 1 */
+ Word16 * iy, /* (o) position of pulse 2 */
+ Word16 dn[], /* (i) corr. between target and h[] */
+ Word16 dn2[], /* (i) vector of selected positions */
+ Word16 cor_x[], /* (i) corr. of pulse 1 with fixed pulses */
+ Word16 cor_y[], /* (i) corr. of pulse 2 with fixed pulses */
+ Word16 rrixiy[][MSIZE] /* (i) corr. of pulse 1 with pulse 2 */
+ );
void ACELP_4t64_fx(
- Word16 dn[], /* (i) <12b : correlation between target x[] and H[] */
- Word16 cn[], /* (i) <12b : residual after long term prediction */
- Word16 H[], /* (i) Q12: impulse response of weighted synthesis filter */
- Word16 code[], /* (o) Q9 : algebraic (fixed) codebook excitation */
- Word16 y[], /* (o) Q9 : filtered fixed codebook excitation */
- Word16 nbbits, /* (i) : 20, 36, 44, 52, 64, 72 or 88 bits */
- Word16 ser_size, /* (i) : bit rate */
- Word16 _index[] /* (o) : index (20): 5+5+5+5 = 20 bits. */
- /* (o) : index (36): 9+9+9+9 = 36 bits. */
- /* (o) : index (44): 13+9+13+9 = 44 bits. */
- /* (o) : index (52): 13+13+13+13 = 52 bits. */
- /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits. */
- /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits. */
- /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits. */
- )
+ Word16 dn[], /* (i) <12b : correlation between target x[] and H[] */
+ Word16 cn[], /* (i) <12b : residual after long term prediction */
+ Word16 H[], /* (i) Q12: impulse response of weighted synthesis filter */
+ Word16 code[], /* (o) Q9 : algebraic (fixed) codebook excitation */
+ Word16 y[], /* (o) Q9 : filtered fixed codebook excitation */
+ Word16 nbbits, /* (i) : 20, 36, 44, 52, 64, 72 or 88 bits */
+ Word16 ser_size, /* (i) : bit rate */
+ Word16 _index[] /* (o) : index (20): 5+5+5+5 = 20 bits. */
+ /* (o) : index (36): 9+9+9+9 = 36 bits. */
+ /* (o) : index (44): 13+9+13+9 = 44 bits. */
+ /* (o) : index (52): 13+13+13+13 = 52 bits. */
+ /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits. */
+ /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits. */
+ /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits. */
+ )
{
- Word32 i, j, k;
- Word16 st, ix, iy, pos, index, track, nb_pulse, nbiter, j_temp;
- Word16 psk, ps, alpk, alp, val, k_cn, k_dn, exp;
- Word16 *p0, *p1, *p2, *p3, *psign;
- Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf, h_shift;
- Word32 s, cor, L_tmp, L_index;
- Word16 dn2[L_SUBFR], sign[L_SUBFR], vec[L_SUBFR];
- Word16 ind[NPMAXPT * NB_TRACK];
- Word16 codvec[NB_PULSE_MAX], nbpos[10];
- Word16 cor_x[NB_POS], cor_y[NB_POS], pos_max[NB_TRACK];
- Word16 h_buf[4 * L_SUBFR];
- Word16 rrixix[NB_TRACK][NB_POS], rrixiy[NB_TRACK][MSIZE];
- Word16 ipos[NB_PULSE_MAX];
-
- switch (nbbits)
- {
- case 20: /* 20 bits, 4 pulses, 4 tracks */
- nbiter = 4; /* 4x16x16=1024 loop */
- alp = 8192; /* alp = 2.0 (Q12) */
- nb_pulse = 4;
- nbpos[0] = 4;
- nbpos[1] = 8;
- break;
- case 36: /* 36 bits, 8 pulses, 4 tracks */
- nbiter = 4; /* 4x20x16=1280 loop */
- alp = 4096; /* alp = 1.0 (Q12) */
- nb_pulse = 8;
- nbpos[0] = 4;
- nbpos[1] = 8;
- nbpos[2] = 8;
- break;
- case 44: /* 44 bits, 10 pulses, 4 tracks */
- nbiter = 4; /* 4x26x16=1664 loop */
- alp = 4096; /* alp = 1.0 (Q12) */
- nb_pulse = 10;
- nbpos[0] = 4;
- nbpos[1] = 6;
- nbpos[2] = 8;
- nbpos[3] = 8;
- break;
- case 52: /* 52 bits, 12 pulses, 4 tracks */
- nbiter = 4; /* 4x26x16=1664 loop */
- alp = 4096; /* alp = 1.0 (Q12) */
- nb_pulse = 12;
- nbpos[0] = 4;
- nbpos[1] = 6;
- nbpos[2] = 8;
- nbpos[3] = 8;
- break;
- case 64: /* 64 bits, 16 pulses, 4 tracks */
- nbiter = 3; /* 3x36x16=1728 loop */
- alp = 3277; /* alp = 0.8 (Q12) */
- nb_pulse = 16;
- nbpos[0] = 4;
- nbpos[1] = 4;
- nbpos[2] = 6;
- nbpos[3] = 6;
- nbpos[4] = 8;
- nbpos[5] = 8;
- break;
- case 72: /* 72 bits, 18 pulses, 4 tracks */
- nbiter = 3; /* 3x35x16=1680 loop */
- alp = 3072; /* alp = 0.75 (Q12) */
- nb_pulse = 18;
- nbpos[0] = 2;
- nbpos[1] = 3;
- nbpos[2] = 4;
- nbpos[3] = 5;
- nbpos[4] = 6;
- nbpos[5] = 7;
- nbpos[6] = 8;
- break;
- case 88: /* 88 bits, 24 pulses, 4 tracks */
- if(ser_size > 462)
- nbiter = 1;
- else
- nbiter = 2; /* 2x53x16=1696 loop */
-
- alp = 2048; /* alp = 0.5 (Q12) */
- nb_pulse = 24;
- nbpos[0] = 2;
- nbpos[1] = 2;
- nbpos[2] = 3;
- nbpos[3] = 4;
- nbpos[4] = 5;
- nbpos[5] = 6;
- nbpos[6] = 7;
- nbpos[7] = 8;
- nbpos[8] = 8;
- nbpos[9] = 8;
- break;
- default:
- nbiter = 0;
- alp = 0;
- nb_pulse = 0;
- }
-
- for (i = 0; i < nb_pulse; i++)
- {
- codvec[i] = i;
- }
-
- /*----------------------------------------------------------------*
- * Find sign for each pulse position. *
- *----------------------------------------------------------------*/
- /* calculate energy for normalization of cn[] and dn[] */
- /* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
+ Word32 i, j, k;
+ Word16 st, ix, iy, pos, index, track, nb_pulse, nbiter, j_temp;
+ Word16 psk, ps, alpk, alp, val, k_cn, k_dn, exp;
+ Word16 *p0, *p1, *p2, *p3, *psign;
+ Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf, h_shift;
+ Word32 s, cor, L_tmp, L_index;
+ Word16 dn2[L_SUBFR], sign[L_SUBFR], vec[L_SUBFR];
+ Word16 ind[NPMAXPT * NB_TRACK];
+ Word16 codvec[NB_PULSE_MAX], nbpos[10];
+ Word16 cor_x[NB_POS], cor_y[NB_POS], pos_max[NB_TRACK];
+ Word16 h_buf[4 * L_SUBFR];
+ Word16 rrixix[NB_TRACK][NB_POS], rrixiy[NB_TRACK][MSIZE];
+ Word16 ipos[NB_PULSE_MAX];
+
+ switch (nbbits)
+ {
+ case 20: /* 20 bits, 4 pulses, 4 tracks */
+ nbiter = 4; /* 4x16x16=1024 loop */
+ alp = 8192; /* alp = 2.0 (Q12) */
+ nb_pulse = 4;
+ nbpos[0] = 4;
+ nbpos[1] = 8;
+ break;
+ case 36: /* 36 bits, 8 pulses, 4 tracks */
+ nbiter = 4; /* 4x20x16=1280 loop */
+ alp = 4096; /* alp = 1.0 (Q12) */
+ nb_pulse = 8;
+ nbpos[0] = 4;
+ nbpos[1] = 8;
+ nbpos[2] = 8;
+ break;
+ case 44: /* 44 bits, 10 pulses, 4 tracks */
+ nbiter = 4; /* 4x26x16=1664 loop */
+ alp = 4096; /* alp = 1.0 (Q12) */
+ nb_pulse = 10;
+ nbpos[0] = 4;
+ nbpos[1] = 6;
+ nbpos[2] = 8;
+ nbpos[3] = 8;
+ break;
+ case 52: /* 52 bits, 12 pulses, 4 tracks */
+ nbiter = 4; /* 4x26x16=1664 loop */
+ alp = 4096; /* alp = 1.0 (Q12) */
+ nb_pulse = 12;
+ nbpos[0] = 4;
+ nbpos[1] = 6;
+ nbpos[2] = 8;
+ nbpos[3] = 8;
+ break;
+ case 64: /* 64 bits, 16 pulses, 4 tracks */
+ nbiter = 3; /* 3x36x16=1728 loop */
+ alp = 3277; /* alp = 0.8 (Q12) */
+ nb_pulse = 16;
+ nbpos[0] = 4;
+ nbpos[1] = 4;
+ nbpos[2] = 6;
+ nbpos[3] = 6;
+ nbpos[4] = 8;
+ nbpos[5] = 8;
+ break;
+ case 72: /* 72 bits, 18 pulses, 4 tracks */
+ nbiter = 3; /* 3x35x16=1680 loop */
+ alp = 3072; /* alp = 0.75 (Q12) */
+ nb_pulse = 18;
+ nbpos[0] = 2;
+ nbpos[1] = 3;
+ nbpos[2] = 4;
+ nbpos[3] = 5;
+ nbpos[4] = 6;
+ nbpos[5] = 7;
+ nbpos[6] = 8;
+ break;
+ case 88: /* 88 bits, 24 pulses, 4 tracks */
+ if(ser_size > 462)
+ nbiter = 1;
+ else
+ nbiter = 2; /* 2x53x16=1696 loop */
+
+ alp = 2048; /* alp = 0.5 (Q12) */
+ nb_pulse = 24;
+ nbpos[0] = 2;
+ nbpos[1] = 2;
+ nbpos[2] = 3;
+ nbpos[3] = 4;
+ nbpos[4] = 5;
+ nbpos[5] = 6;
+ nbpos[6] = 7;
+ nbpos[7] = 8;
+ nbpos[8] = 8;
+ nbpos[9] = 8;
+ break;
+ default:
+ nbiter = 0;
+ alp = 0;
+ nb_pulse = 0;
+ }
+
+ for (i = 0; i < nb_pulse; i++)
+ {
+ codvec[i] = i;
+ }
+
+ /*----------------------------------------------------------------*
+ * Find sign for each pulse position. *
+ *----------------------------------------------------------------*/
+ /* calculate energy for normalization of cn[] and dn[] */
+ /* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
#ifdef ASM_OPT /* asm optimization branch */
- s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
+ s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
#else
- s = Dot_product12(cn, cn, L_SUBFR, &exp);
+ s = Dot_product12(cn, cn, L_SUBFR, &exp);
#endif
- Isqrt_n(&s, &exp);
- s = L_shl(s, (exp + 5));
- k_cn = extract_h(L_add(s, 0x8000));
+ Isqrt_n(&s, &exp);
+ s = L_shl(s, (exp + 5));
+ k_cn = extract_h(L_add(s, 0x8000));
- /* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
+ /* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
#ifdef ASM_OPT /* asm optimization branch */
- s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
+ s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
#else
- s = Dot_product12(dn, dn, L_SUBFR, &exp);
+ s = Dot_product12(dn, dn, L_SUBFR, &exp);
#endif
- Isqrt_n(&s, &exp);
- k_dn = (L_shl(s, (exp + 5 + 3)) + 0x8000) >> 16; /* k_dn = 256..4096 */
- k_dn = vo_mult_r(alp, k_dn); /* alp in Q12 */
-
- /* mix normalized cn[] and dn[] */
- p0 = cn;
- p1 = dn;
- p2 = dn2;
-
- for (i = 0; i < L_SUBFR/4; i++)
- {
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
- *p2++ = s >> 7;
- }
-
- /* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[] */
- for(i = 0; i < L_SUBFR; i++)
- {
- val = dn[i];
- ps = dn2[i];
- if (ps >= 0)
- {
- sign[i] = 32767; /* sign = +1 (Q12) */
- vec[i] = -32768;
- } else
- {
- sign[i] = -32768; /* sign = -1 (Q12) */
- vec[i] = 32767;
- dn[i] = -val;
- dn2[i] = -ps;
- }
- }
- /*----------------------------------------------------------------*
- * Select NB_MAX position per track according to max of dn2[]. *
- *----------------------------------------------------------------*/
- pos = 0;
- for (i = 0; i < NB_TRACK; i++)
- {
- for (k = 0; k < NB_MAX; k++)
- {
- ps = -1;
- for (j = i; j < L_SUBFR; j += STEP)
- {
- if(dn2[j] > ps)
- {
- ps = dn2[j];
- pos = j;
- }
- }
- dn2[pos] = (k - NB_MAX); /* dn2 < 0 when position is selected */
- if (k == 0)
- {
- pos_max[i] = pos;
- }
- }
- }
-
- /*--------------------------------------------------------------*
- * Scale h[] to avoid overflow and to get maximum of precision *
- * on correlation. *
- * *
- * Maximum of h[] (h[0]) is fixed to 2048 (MAX16 / 16). *
- * ==> This allow addition of 16 pulses without saturation. *
- * *
- * Energy worst case (on resonant impulse response), *
- * - energy of h[] is approximately MAX/16. *
- * - During search, the energy is divided by 8 to avoid *
- * overflow on "alp". (energy of h[] = MAX/128). *
- * ==> "alp" worst case detected is 22854 on sinusoidal wave. *
- *--------------------------------------------------------------*/
-
- /* impulse response buffer for fast computation */
-
- h = h_buf;
- h_inv = h_buf + (2 * L_SUBFR);
- L_tmp = 0;
- for (i = 0; i < L_SUBFR; i++)
- {
- *h++ = 0;
- *h_inv++ = 0;
- L_tmp += (H[i] * H[i]) << 1;
- }
- /* scale h[] down (/2) when energy of h[] is high with many pulses used */
- val = extract_h(L_tmp);
- h_shift = 0;
-
- if ((nb_pulse >= 12) && (val > 1024))
- {
- h_shift = 1;
- }
- p0 = H;
- p1 = h;
- p2 = h_inv;
-
- for (i = 0; i < L_SUBFR/4; i++)
- {
- *p1 = *p0++ >> h_shift;
- *p2++ = -(*p1++);
- *p1 = *p0++ >> h_shift;
- *p2++ = -(*p1++);
- *p1 = *p0++ >> h_shift;
- *p2++ = -(*p1++);
- *p1 = *p0++ >> h_shift;
- *p2++ = -(*p1++);
- }
-
- /*------------------------------------------------------------*
- * Compute rrixix[][] needed for the codebook search. *
- * This algorithm compute impulse response energy of all *
- * positions (16) in each track (4). Total = 4x16 = 64. *
- *------------------------------------------------------------*/
-
- /* storage order --> i3i3, i2i2, i1i1, i0i0 */
-
- /* Init pointers to last position of rrixix[] */
- p0 = &rrixix[0][NB_POS - 1];
- p1 = &rrixix[1][NB_POS - 1];
- p2 = &rrixix[2][NB_POS - 1];
- p3 = &rrixix[3][NB_POS - 1];
-
- ptr_h1 = h;
- cor = 0x00008000L; /* for rounding */
- for (i = 0; i < NB_POS; i++)
- {
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
- ptr_h1++;
- *p3-- = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
- ptr_h1++;
- *p2-- = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
- ptr_h1++;
- *p1-- = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
- ptr_h1++;
- *p0-- = extract_h(cor);
- }
-
- /*------------------------------------------------------------*
- * Compute rrixiy[][] needed for the codebook search. *
- * This algorithm compute correlation between 2 pulses *
- * (2 impulses responses) in 4 possible adjacents tracks. *
- * (track 0-1, 1-2, 2-3 and 3-0). Total = 4x16x16 = 1024. *
- *------------------------------------------------------------*/
-
- /* storage order --> i2i3, i1i2, i0i1, i3i0 */
-
- pos = MSIZE - 1;
- ptr_hf = h + 1;
-
- for (k = 0; k < NB_POS; k++)
- {
- p3 = &rrixiy[2][pos];
- p2 = &rrixiy[1][pos];
- p1 = &rrixiy[0][pos];
- p0 = &rrixiy[3][pos - NB_POS];
-
- cor = 0x00008000L; /* for rounding */
- ptr_h1 = h;
- ptr_h2 = ptr_hf;
-
- for (i = k + 1; i < NB_POS; i++)
- {
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p3 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p2 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p1 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p0 = extract_h(cor);
-
- p3 -= (NB_POS + 1);
- p2 -= (NB_POS + 1);
- p1 -= (NB_POS + 1);
- p0 -= (NB_POS + 1);
- }
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p3 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p2 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p1 = extract_h(cor);
-
- pos -= NB_POS;
- ptr_hf += STEP;
- }
-
- /* storage order --> i3i0, i2i3, i1i2, i0i1 */
-
- pos = MSIZE - 1;
- ptr_hf = h + 3;
-
- for (k = 0; k < NB_POS; k++)
- {
- p3 = &rrixiy[3][pos];
- p2 = &rrixiy[2][pos - 1];
- p1 = &rrixiy[1][pos - 1];
- p0 = &rrixiy[0][pos - 1];
-
- cor = 0x00008000L; /* for rounding */
- ptr_h1 = h;
- ptr_h2 = ptr_hf;
-
- for (i = k + 1; i < NB_POS; i++)
- {
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p3 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p2 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p1 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p0 = extract_h(cor);
-
- p3 -= (NB_POS + 1);
- p2 -= (NB_POS + 1);
- p1 -= (NB_POS + 1);
- p0 -= (NB_POS + 1);
- }
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
- ptr_h1++;
- ptr_h2++;
- *p3 = extract_h(cor);
-
- pos--;
- ptr_hf += STEP;
- }
-
- /*------------------------------------------------------------*
- * Modification of rrixiy[][] to take signs into account. *
- *------------------------------------------------------------*/
-
- p0 = &rrixiy[0][0];
-
- for (k = 0; k < NB_TRACK; k++)
- {
- j_temp = (k + 1)&0x03;
- for (i = k; i < L_SUBFR; i += STEP)
- {
- psign = sign;
- if (psign[i] < 0)
- {
- psign = vec;
- }
- j = j_temp;
- for (; j < L_SUBFR; j += STEP)
- {
- *p0 = vo_mult(*p0, psign[j]);
- p0++;
- }
- }
- }
-
- /*-------------------------------------------------------------------*
- * Deep first search *
- *-------------------------------------------------------------------*/
-
- psk = -1;
- alpk = 1;
-
- for (k = 0; k < nbiter; k++)
- {
- j_temp = k<<2;
- for (i = 0; i < nb_pulse; i++)
- ipos[i] = tipos[j_temp + i];
-
- if(nbbits == 20)
- {
- pos = 0;
- ps = 0;
- alp = 0;
- for (i = 0; i < L_SUBFR; i++)
- {
- vec[i] = 0;
- }
- } else if ((nbbits == 36) || (nbbits == 44))
- {
- /* first stage: fix 2 pulses */
- pos = 2;
-
- ix = ind[0] = pos_max[ipos[0]];
- iy = ind[1] = pos_max[ipos[1]];
- ps = dn[ix] + dn[iy];
- i = ix >> 2; /* ix / STEP */
- j = iy >> 2; /* iy / STEP */
- s = rrixix[ipos[0]][i] << 13;
- s += rrixix[ipos[1]][j] << 13;
- i = (i << 4) + j; /* (ix/STEP)*NB_POS + (iy/STEP) */
- s += rrixiy[ipos[0]][i] << 14;
- alp = (s + 0x8000) >> 16;
- if (sign[ix] < 0)
- p0 = h_inv - ix;
- else
- p0 = h - ix;
- if (sign[iy] < 0)
- p1 = h_inv - iy;
- else
- p1 = h - iy;
-
- for (i = 0; i < L_SUBFR; i++)
- {
- vec[i] = (*p0++) + (*p1++);
- }
-
- if(nbbits == 44)
- {
- ipos[8] = 0;
- ipos[9] = 1;
- }
- } else
- {
- /* first stage: fix 4 pulses */
- pos = 4;
-
- ix = ind[0] = pos_max[ipos[0]];
- iy = ind[1] = pos_max[ipos[1]];
- i = ind[2] = pos_max[ipos[2]];
- j = ind[3] = pos_max[ipos[3]];
- ps = add1(add1(add1(dn[ix], dn[iy]), dn[i]), dn[j]);
-
- if (sign[ix] < 0)
- p0 = h_inv - ix;
- else
- p0 = h - ix;
-
- if (sign[iy] < 0)
- p1 = h_inv - iy;
- else
- p1 = h - iy;
-
- if (sign[i] < 0)
- p2 = h_inv - i;
- else
- p2 = h - i;
-
- if (sign[j] < 0)
- p3 = h_inv - j;
- else
- p3 = h - j;
-
- L_tmp = 0L;
- for(i = 0; i < L_SUBFR; i++)
- {
- vec[i] = add1(add1(add1(*p0++, *p1++), *p2++), *p3++);
- L_tmp += (vec[i] * vec[i]) << 1;
- }
-
- alp = ((L_tmp >> 3) + 0x8000) >> 16;
-
- if(nbbits == 72)
- {
- ipos[16] = 0;
- ipos[17] = 1;
- }
- }
-
- /* other stages of 2 pulses */
-
- for (j = pos, st = 0; j < nb_pulse; j += 2, st++)
- {
- /*--------------------------------------------------*
- * Calculate correlation of all possible positions *
- * of the next 2 pulses with previous fixed pulses. *
- * Each pulse can have 16 possible positions. *
- *--------------------------------------------------*/
- if(ipos[j] == 3)
- {
- cor_h_vec_30(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
- }
- else
- {
+ Isqrt_n(&s, &exp);
+ k_dn = voround(L_shl(s, (exp + 5 + 3))); /* k_dn = 256..4096 */
+ k_dn = vo_mult_r(alp, k_dn); /* alp in Q12 */
+
+ /* mix normalized cn[] and dn[] */
+ p0 = cn;
+ p1 = dn;
+ p2 = dn2;
+
+ for (i = 0; i < L_SUBFR/4; i++)
+ {
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ *p2++ = s >> 7;
+ }
+
+ /* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[] */
+ for(i = 0; i < L_SUBFR; i++)
+ {
+ val = dn[i];
+ ps = dn2[i];
+ if (ps >= 0)
+ {
+ sign[i] = 32767; /* sign = +1 (Q12) */
+ vec[i] = -32768;
+ } else
+ {
+ sign[i] = -32768; /* sign = -1 (Q12) */
+ vec[i] = 32767;
+ dn[i] = -val;
+ dn2[i] = -ps;
+ }
+ }
+ /*----------------------------------------------------------------*
+ * Select NB_MAX position per track according to max of dn2[]. *
+ *----------------------------------------------------------------*/
+ pos = 0;
+ for (i = 0; i < NB_TRACK; i++)
+ {
+ for (k = 0; k < NB_MAX; k++)
+ {
+ ps = -1;
+ for (j = i; j < L_SUBFR; j += STEP)
+ {
+ if(dn2[j] > ps)
+ {
+ ps = dn2[j];
+ pos = j;
+ }
+ }
+ dn2[pos] = (k - NB_MAX); /* dn2 < 0 when position is selected */
+ if (k == 0)
+ {
+ pos_max[i] = pos;
+ }
+ }
+ }
+
+ /*--------------------------------------------------------------*
+ * Scale h[] to avoid overflow and to get maximum of precision *
+ * on correlation. *
+ * *
+ * Maximum of h[] (h[0]) is fixed to 2048 (MAX16 / 16). *
+ * ==> This allow addition of 16 pulses without saturation. *
+ * *
+ * Energy worst case (on resonant impulse response), *
+ * - energy of h[] is approximately MAX/16. *
+ * - During search, the energy is divided by 8 to avoid *
+ * overflow on "alp". (energy of h[] = MAX/128). *
+ * ==> "alp" worst case detected is 22854 on sinusoidal wave. *
+ *--------------------------------------------------------------*/
+
+ /* impulse response buffer for fast computation */
+
+ h = h_buf;
+ h_inv = h_buf + (2 * L_SUBFR);
+ L_tmp = 0;
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ *h++ = 0;
+ *h_inv++ = 0;
+ L_tmp += (H[i] * H[i]) << 1;
+ }
+ /* scale h[] down (/2) when energy of h[] is high with many pulses used */
+ val = extract_h(L_tmp);
+ h_shift = 0;
+
+ if ((nb_pulse >= 12) && (val > 1024))
+ {
+ h_shift = 1;
+ }
+ p0 = H;
+ p1 = h;
+ p2 = h_inv;
+
+ for (i = 0; i < L_SUBFR/4; i++)
+ {
+ *p1 = *p0++ >> h_shift;
+ *p2++ = -(*p1++);
+ *p1 = *p0++ >> h_shift;
+ *p2++ = -(*p1++);
+ *p1 = *p0++ >> h_shift;
+ *p2++ = -(*p1++);
+ *p1 = *p0++ >> h_shift;
+ *p2++ = -(*p1++);
+ }
+
+ /*------------------------------------------------------------*
+ * Compute rrixix[][] needed for the codebook search. *
+ * This algorithm compute impulse response energy of all *
+ * positions (16) in each track (4). Total = 4x16 = 64. *
+ *------------------------------------------------------------*/
+
+ /* storage order --> i3i3, i2i2, i1i1, i0i0 */
+
+ /* Init pointers to last position of rrixix[] */
+ p0 = &rrixix[0][NB_POS - 1];
+ p1 = &rrixix[1][NB_POS - 1];
+ p2 = &rrixix[2][NB_POS - 1];
+ p3 = &rrixix[3][NB_POS - 1];
+
+ ptr_h1 = h;
+ cor = 0x00008000L; /* for rounding */
+ for (i = 0; i < NB_POS; i++)
+ {
+ cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ ptr_h1++;
+ *p3-- = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ ptr_h1++;
+ *p2-- = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ ptr_h1++;
+ *p1-- = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ ptr_h1++;
+ *p0-- = extract_h(cor);
+ }
+
+ /*------------------------------------------------------------*
+ * Compute rrixiy[][] needed for the codebook search. *
+ * This algorithm compute correlation between 2 pulses *
+ * (2 impulses responses) in 4 possible adjacents tracks. *
+ * (track 0-1, 1-2, 2-3 and 3-0). Total = 4x16x16 = 1024. *
+ *------------------------------------------------------------*/
+
+ /* storage order --> i2i3, i1i2, i0i1, i3i0 */
+
+ pos = MSIZE - 1;
+ ptr_hf = h + 1;
+
+ for (k = 0; k < NB_POS; k++)
+ {
+ p3 = &rrixiy[2][pos];
+ p2 = &rrixiy[1][pos];
+ p1 = &rrixiy[0][pos];
+ p0 = &rrixiy[3][pos - NB_POS];
+
+ cor = 0x00008000L; /* for rounding */
+ ptr_h1 = h;
+ ptr_h2 = ptr_hf;
+
+ for (i = k + 1; i < NB_POS; i++)
+ {
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p3 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p2 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p1 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p0 = extract_h(cor);
+
+ p3 -= (NB_POS + 1);
+ p2 -= (NB_POS + 1);
+ p1 -= (NB_POS + 1);
+ p0 -= (NB_POS + 1);
+ }
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p3 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p2 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p1 = extract_h(cor);
+
+ pos -= NB_POS;
+ ptr_hf += STEP;
+ }
+
+ /* storage order --> i3i0, i2i3, i1i2, i0i1 */
+
+ pos = MSIZE - 1;
+ ptr_hf = h + 3;
+
+ for (k = 0; k < NB_POS; k++)
+ {
+ p3 = &rrixiy[3][pos];
+ p2 = &rrixiy[2][pos - 1];
+ p1 = &rrixiy[1][pos - 1];
+ p0 = &rrixiy[0][pos - 1];
+
+ cor = 0x00008000L; /* for rounding */
+ ptr_h1 = h;
+ ptr_h2 = ptr_hf;
+
+ for (i = k + 1; i < NB_POS; i++)
+ {
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p3 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p2 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p1 = extract_h(cor);
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p0 = extract_h(cor);
+
+ p3 -= (NB_POS + 1);
+ p2 -= (NB_POS + 1);
+ p1 -= (NB_POS + 1);
+ p0 -= (NB_POS + 1);
+ }
+ cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ ptr_h1++;
+ ptr_h2++;
+ *p3 = extract_h(cor);
+
+ pos--;
+ ptr_hf += STEP;
+ }
+
+ /*------------------------------------------------------------*
+ * Modification of rrixiy[][] to take signs into account. *
+ *------------------------------------------------------------*/
+
+ p0 = &rrixiy[0][0];
+
+ for (k = 0; k < NB_TRACK; k++)
+ {
+ j_temp = (k + 1)&0x03;
+ for (i = k; i < L_SUBFR; i += STEP)
+ {
+ psign = sign;
+ if (psign[i] < 0)
+ {
+ psign = vec;
+ }
+ j = j_temp;
+ for (; j < L_SUBFR; j += STEP)
+ {
+ *p0 = vo_mult(*p0, psign[j]);
+ p0++;
+ }
+ }
+ }
+
+ /*-------------------------------------------------------------------*
+ * Deep first search *
+ *-------------------------------------------------------------------*/
+
+ psk = -1;
+ alpk = 1;
+
+ for (k = 0; k < nbiter; k++)
+ {
+ j_temp = k<<2;
+ for (i = 0; i < nb_pulse; i++)
+ ipos[i] = tipos[j_temp + i];
+
+ if(nbbits == 20)
+ {
+ pos = 0;
+ ps = 0;
+ alp = 0;
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ vec[i] = 0;
+ }
+ } else if ((nbbits == 36) || (nbbits == 44))
+ {
+ /* first stage: fix 2 pulses */
+ pos = 2;
+
+ ix = ind[0] = pos_max[ipos[0]];
+ iy = ind[1] = pos_max[ipos[1]];
+ ps = dn[ix] + dn[iy];
+ i = ix >> 2; /* ix / STEP */
+ j = iy >> 2; /* iy / STEP */
+ s = rrixix[ipos[0]][i] << 13;
+ s += rrixix[ipos[1]][j] << 13;
+ i = (i << 4) + j; /* (ix/STEP)*NB_POS + (iy/STEP) */
+ s += rrixiy[ipos[0]][i] << 14;
+ alp = (s + 0x8000) >> 16;
+ if (sign[ix] < 0)
+ p0 = h_inv - ix;
+ else
+ p0 = h - ix;
+ if (sign[iy] < 0)
+ p1 = h_inv - iy;
+ else
+ p1 = h - iy;
+
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ vec[i] = (*p0++) + (*p1++);
+ }
+
+ if(nbbits == 44)
+ {
+ ipos[8] = 0;
+ ipos[9] = 1;
+ }
+ } else
+ {
+ /* first stage: fix 4 pulses */
+ pos = 4;
+
+ ix = ind[0] = pos_max[ipos[0]];
+ iy = ind[1] = pos_max[ipos[1]];
+ i = ind[2] = pos_max[ipos[2]];
+ j = ind[3] = pos_max[ipos[3]];
+ ps = add1(add1(add1(dn[ix], dn[iy]), dn[i]), dn[j]);
+
+ if (sign[ix] < 0)
+ p0 = h_inv - ix;
+ else
+ p0 = h - ix;
+
+ if (sign[iy] < 0)
+ p1 = h_inv - iy;
+ else
+ p1 = h - iy;
+
+ if (sign[i] < 0)
+ p2 = h_inv - i;
+ else
+ p2 = h - i;
+
+ if (sign[j] < 0)
+ p3 = h_inv - j;
+ else
+ p3 = h - j;
+
+ L_tmp = 0L;
+ for(i = 0; i < L_SUBFR; i++)
+ {
+ Word32 vecSq2;
+ vec[i] = add1(add1(add1(*p0++, *p1++), *p2++), *p3++);
+ vecSq2 = (vec[i] * vec[i]) << 1;
+ if (vecSq2 > 0 && L_tmp > INT_MAX - vecSq2) {
+ L_tmp = INT_MAX;
+ } else if (vecSq2 < 0 && L_tmp < INT_MIN - vecSq2) {
+ L_tmp = INT_MIN;
+ } else {
+ L_tmp += vecSq2;
+ }
+ }
+
+ alp = ((L_tmp >> 3) + 0x8000) >> 16;
+
+ if(nbbits == 72)
+ {
+ ipos[16] = 0;
+ ipos[17] = 1;
+ }
+ }
+
+ /* other stages of 2 pulses */
+
+ for (j = pos, st = 0; j < nb_pulse; j += 2, st++)
+ {
+ /*--------------------------------------------------*
+ * Calculate correlation of all possible positions *
+ * of the next 2 pulses with previous fixed pulses. *
+ * Each pulse can have 16 possible positions. *
+ *--------------------------------------------------*/
+ if(ipos[j] == 3)
+ {
+ cor_h_vec_30(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
+ }
+ else
+ {
#ifdef ASM_OPT /* asm optimization branch */
- cor_h_vec_012_asm(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
+ cor_h_vec_012_asm(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
#else
- cor_h_vec_012(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
+ cor_h_vec_012(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
#endif
- }
- /*--------------------------------------------------*
- * Find best positions of 2 pulses. *
- *--------------------------------------------------*/
- search_ixiy(nbpos[st], ipos[j], ipos[j + 1], &ps, &alp,
- &ix, &iy, dn, dn2, cor_x, cor_y, rrixiy);
-
- ind[j] = ix;
- ind[j + 1] = iy;
-
- if (sign[ix] < 0)
- p0 = h_inv - ix;
- else
- p0 = h - ix;
- if (sign[iy] < 0)
- p1 = h_inv - iy;
- else
- p1 = h - iy;
-
- for (i = 0; i < L_SUBFR; i+=4)
- {
- vec[i] += add1((*p0++), (*p1++));
- vec[i+1] += add1((*p0++), (*p1++));
- vec[i+2] += add1((*p0++), (*p1++));
- vec[i+3] += add1((*p0++), (*p1++));
- }
- }
- /* memorise the best codevector */
- ps = vo_mult(ps, ps);
- s = vo_L_msu(vo_L_mult(alpk, ps), psk, alp);
- if (s > 0)
- {
- psk = ps;
- alpk = alp;
- for (i = 0; i < nb_pulse; i++)
- {
- codvec[i] = ind[i];
- }
- for (i = 0; i < L_SUBFR; i++)
- {
- y[i] = vec[i];
- }
- }
- }
- /*-------------------------------------------------------------------*
- * Build the codeword, the filtered codeword and index of codevector.*
- *-------------------------------------------------------------------*/
- for (i = 0; i < NPMAXPT * NB_TRACK; i++)
- {
- ind[i] = -1;
- }
- for (i = 0; i < L_SUBFR; i++)
- {
- code[i] = 0;
- y[i] = vo_shr_r(y[i], 3); /* Q12 to Q9 */
- }
- val = (512 >> h_shift); /* codeword in Q9 format */
- for (k = 0; k < nb_pulse; k++)
- {
- i = codvec[k]; /* read pulse position */
- j = sign[i]; /* read sign */
- index = i >> 2; /* index = pos of pulse (0..15) */
- track = (Word16) (i & 0x03); /* track = i % NB_TRACK (0..3) */
-
- if (j > 0)
- {
- code[i] += val;
- codvec[k] += 128;
- } else
- {
- code[i] -= val;
- index += NB_POS;
- }
-
- i = (Word16)((vo_L_mult(track, NPMAXPT) >> 1));
-
- while (ind[i] >= 0)
- {
- i += 1;
- }
- ind[i] = index;
- }
-
- k = 0;
- /* Build index of codevector */
- if(nbbits == 20)
- {
- for (track = 0; track < NB_TRACK; track++)
- {
- _index[track] = (Word16)(quant_1p_N1(ind[k], 4));
- k += NPMAXPT;
- }
- } else if(nbbits == 36)
- {
- for (track = 0; track < NB_TRACK; track++)
- {
- _index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
- k += NPMAXPT;
- }
- } else if(nbbits == 44)
- {
- for (track = 0; track < NB_TRACK - 2; track++)
- {
- _index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
- k += NPMAXPT;
- }
- for (track = 2; track < NB_TRACK; track++)
- {
- _index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
- k += NPMAXPT;
- }
- } else if(nbbits == 52)
- {
- for (track = 0; track < NB_TRACK; track++)
- {
- _index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
- k += NPMAXPT;
- }
- } else if(nbbits == 64)
- {
- for (track = 0; track < NB_TRACK; track++)
- {
- L_index = quant_4p_4N(&ind[k], 4);
- _index[track] = (Word16)((L_index >> 14) & 3);
- _index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
- k += NPMAXPT;
- }
- } else if(nbbits == 72)
- {
- for (track = 0; track < NB_TRACK - 2; track++)
- {
- L_index = quant_5p_5N(&ind[k], 4);
- _index[track] = (Word16)((L_index >> 10) & 0x03FF);
- _index[track + NB_TRACK] = (Word16)(L_index & 0x03FF);
- k += NPMAXPT;
- }
- for (track = 2; track < NB_TRACK; track++)
- {
- L_index = quant_4p_4N(&ind[k], 4);
- _index[track] = (Word16)((L_index >> 14) & 3);
- _index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
- k += NPMAXPT;
- }
- } else if(nbbits == 88)
- {
- for (track = 0; track < NB_TRACK; track++)
- {
- L_index = quant_6p_6N_2(&ind[k], 4);
- _index[track] = (Word16)((L_index >> 11) & 0x07FF);
- _index[track + NB_TRACK] = (Word16)(L_index & 0x07FF);
- k += NPMAXPT;
- }
- }
- return;
+ }
+ /*--------------------------------------------------*
+ * Find best positions of 2 pulses. *
+ *--------------------------------------------------*/
+ search_ixiy(nbpos[st], ipos[j], ipos[j + 1], &ps, &alp,
+ &ix, &iy, dn, dn2, cor_x, cor_y, rrixiy);
+
+ ind[j] = ix;
+ ind[j + 1] = iy;
+
+ if (sign[ix] < 0)
+ p0 = h_inv - ix;
+ else
+ p0 = h - ix;
+ if (sign[iy] < 0)
+ p1 = h_inv - iy;
+ else
+ p1 = h - iy;
+
+ for (i = 0; i < L_SUBFR; i+=4)
+ {
+ vec[i] += add1((*p0++), (*p1++));
+ vec[i+1] += add1((*p0++), (*p1++));
+ vec[i+2] += add1((*p0++), (*p1++));
+ vec[i+3] += add1((*p0++), (*p1++));
+ }
+ }
+ /* memorise the best codevector */
+ ps = vo_mult(ps, ps);
+ s = vo_L_msu(vo_L_mult(alpk, ps), psk, alp);
+ if (s > 0)
+ {
+ psk = ps;
+ alpk = alp;
+ for (i = 0; i < nb_pulse; i++)
+ {
+ codvec[i] = ind[i];
+ }
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ y[i] = vec[i];
+ }
+ }
+ }
+ /*-------------------------------------------------------------------*
+ * Build the codeword, the filtered codeword and index of codevector.*
+ *-------------------------------------------------------------------*/
+ for (i = 0; i < NPMAXPT * NB_TRACK; i++)
+ {
+ ind[i] = -1;
+ }
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ code[i] = 0;
+ y[i] = vo_shr_r(y[i], 3); /* Q12 to Q9 */
+ }
+ val = (512 >> h_shift); /* codeword in Q9 format */
+ for (k = 0; k < nb_pulse; k++)
+ {
+ i = codvec[k]; /* read pulse position */
+ j = sign[i]; /* read sign */
+ index = i >> 2; /* index = pos of pulse (0..15) */
+ track = (Word16) (i & 0x03); /* track = i % NB_TRACK (0..3) */
+
+ if (j > 0)
+ {
+ code[i] += val;
+ codvec[k] += 128;
+ } else
+ {
+ code[i] -= val;
+ index += NB_POS;
+ }
+
+ i = (Word16)((vo_L_mult(track, NPMAXPT) >> 1));
+
+ while (ind[i] >= 0)
+ {
+ i += 1;
+ }
+ ind[i] = index;
+ }
+
+ k = 0;
+ /* Build index of codevector */
+ if(nbbits == 20)
+ {
+ for (track = 0; track < NB_TRACK; track++)
+ {
+ _index[track] = (Word16)(quant_1p_N1(ind[k], 4));
+ k += NPMAXPT;
+ }
+ } else if(nbbits == 36)
+ {
+ for (track = 0; track < NB_TRACK; track++)
+ {
+ _index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
+ k += NPMAXPT;
+ }
+ } else if(nbbits == 44)
+ {
+ for (track = 0; track < NB_TRACK - 2; track++)
+ {
+ _index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
+ k += NPMAXPT;
+ }
+ for (track = 2; track < NB_TRACK; track++)
+ {
+ _index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
+ k += NPMAXPT;
+ }
+ } else if(nbbits == 52)
+ {
+ for (track = 0; track < NB_TRACK; track++)
+ {
+ _index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
+ k += NPMAXPT;
+ }
+ } else if(nbbits == 64)
+ {
+ for (track = 0; track < NB_TRACK; track++)
+ {
+ L_index = quant_4p_4N(&ind[k], 4);
+ _index[track] = (Word16)((L_index >> 14) & 3);
+ _index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
+ k += NPMAXPT;
+ }
+ } else if(nbbits == 72)
+ {
+ for (track = 0; track < NB_TRACK - 2; track++)
+ {
+ L_index = quant_5p_5N(&ind[k], 4);
+ _index[track] = (Word16)((L_index >> 10) & 0x03FF);
+ _index[track + NB_TRACK] = (Word16)(L_index & 0x03FF);
+ k += NPMAXPT;
+ }
+ for (track = 2; track < NB_TRACK; track++)
+ {
+ L_index = quant_4p_4N(&ind[k], 4);
+ _index[track] = (Word16)((L_index >> 14) & 3);
+ _index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
+ k += NPMAXPT;
+ }
+ } else if(nbbits == 88)
+ {
+ for (track = 0; track < NB_TRACK; track++)
+ {
+ L_index = quant_6p_6N_2(&ind[k], 4);
+ _index[track] = (Word16)((L_index >> 11) & 0x07FF);
+ _index[track + NB_TRACK] = (Word16)(L_index & 0x07FF);
+ k += NPMAXPT;
+ }
+ }
+ return;
}
@@ -824,135 +832,135 @@ void ACELP_4t64_fx(
* Compute correlations of h[] with vec[] for the specified track. *
*-------------------------------------------------------------------*/
void cor_h_vec_30(
- Word16 h[], /* (i) scaled impulse response */
- Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
- Word16 track, /* (i) track to use */
- Word16 sign[], /* (i) sign vector */
- Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
- Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
- Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
- )
+ Word16 h[], /* (i) scaled impulse response */
+ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
+ Word16 track, /* (i) track to use */
+ Word16 sign[], /* (i) sign vector */
+ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
+ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
+ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
+ )
{
- Word32 i, j, pos, corr;
- Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
- Word32 L_sum1,L_sum2;
- cor_x = cor_1;
- cor_y = cor_2;
- p0 = rrixix[track];
- p3 = rrixix[0];
- pos = track;
-
- for (i = 0; i < NB_POS; i+=2)
- {
- L_sum1 = L_sum2 = 0L;
- p1 = h;
- p2 = &vec[pos];
- for (j=pos;j < L_SUBFR; j++)
- {
- L_sum1 += *p1 * *p2;
- p2-=3;
- L_sum2 += *p1++ * *p2;
- p2+=4;
- }
- p2-=3;
- L_sum2 += *p1++ * *p2++;
- L_sum2 += *p1++ * *p2++;
- L_sum2 += *p1++ * *p2++;
-
- L_sum1 = (L_sum1 << 2);
- L_sum2 = (L_sum2 << 2);
-
- corr = vo_round(L_sum1);
- *cor_x++ = vo_mult(corr, sign[pos]) + (*p0++);
- corr = vo_round(L_sum2);
- *cor_y++ = vo_mult(corr, sign[pos-3]) + (*p3++);
- pos += STEP;
-
- L_sum1 = L_sum2 = 0L;
- p1 = h;
- p2 = &vec[pos];
- for (j=pos;j < L_SUBFR; j++)
- {
- L_sum1 += *p1 * *p2;
- p2-=3;
- L_sum2 += *p1++ * *p2;
- p2+=4;
- }
- p2-=3;
- L_sum2 += *p1++ * *p2++;
- L_sum2 += *p1++ * *p2++;
- L_sum2 += *p1++ * *p2++;
-
- L_sum1 = (L_sum1 << 2);
- L_sum2 = (L_sum2 << 2);
-
- corr = vo_round(L_sum1);
- *cor_x++ = vo_mult(corr, sign[pos]) + (*p0++);
- corr = vo_round(L_sum2);
- *cor_y++ = vo_mult(corr, sign[pos-3]) + (*p3++);
- pos += STEP;
- }
- return;
+ Word32 i, j, pos, corr;
+ Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
+ Word32 L_sum1,L_sum2;
+ cor_x = cor_1;
+ cor_y = cor_2;
+ p0 = rrixix[track];
+ p3 = rrixix[0];
+ pos = track;
+
+ for (i = 0; i < NB_POS; i+=2)
+ {
+ L_sum1 = L_sum2 = 0L;
+ p1 = h;
+ p2 = &vec[pos];
+ for (j=pos;j < L_SUBFR; j++)
+ {
+ L_sum1 += *p1 * *p2;
+ p2-=3;
+ L_sum2 += *p1++ * *p2;
+ p2+=4;
+ }
+ p2-=3;
+ L_sum2 += *p1++ * *p2++;
+ L_sum2 += *p1++ * *p2++;
+ L_sum2 += *p1++ * *p2++;
+
+ L_sum1 = (L_sum1 << 2);
+ L_sum2 = (L_sum2 << 2);
+
+ corr = vo_round(L_sum1);
+ *cor_x++ = vo_mult(corr, sign[pos]) + (*p0++);
+ corr = vo_round(L_sum2);
+ *cor_y++ = vo_mult(corr, sign[pos-3]) + (*p3++);
+ pos += STEP;
+
+ L_sum1 = L_sum2 = 0L;
+ p1 = h;
+ p2 = &vec[pos];
+ for (j=pos;j < L_SUBFR; j++)
+ {
+ L_sum1 += *p1 * *p2;
+ p2-=3;
+ L_sum2 += *p1++ * *p2;
+ p2+=4;
+ }
+ p2-=3;
+ L_sum2 += *p1++ * *p2++;
+ L_sum2 += *p1++ * *p2++;
+ L_sum2 += *p1++ * *p2++;
+
+ L_sum1 = (L_sum1 << 2);
+ L_sum2 = (L_sum2 << 2);
+
+ corr = vo_round(L_sum1);
+ *cor_x++ = vo_mult(corr, sign[pos]) + (*p0++);
+ corr = vo_round(L_sum2);
+ *cor_y++ = vo_mult(corr, sign[pos-3]) + (*p3++);
+ pos += STEP;
+ }
+ return;
}
void cor_h_vec_012(
- Word16 h[], /* (i) scaled impulse response */
- Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
- Word16 track, /* (i) track to use */
- Word16 sign[], /* (i) sign vector */
- Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
- Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
- Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
- )
+ Word16 h[], /* (i) scaled impulse response */
+ Word16 vec[], /* (i) scaled vector (/8) to correlate with h[] */
+ Word16 track, /* (i) track to use */
+ Word16 sign[], /* (i) sign vector */
+ Word16 rrixix[][NB_POS], /* (i) correlation of h[x] with h[x] */
+ Word16 cor_1[], /* (o) result of correlation (NB_POS elements) */
+ Word16 cor_2[] /* (o) result of correlation (NB_POS elements) */
+ )
{
- Word32 i, j, pos, corr;
- Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
- Word32 L_sum1,L_sum2;
- cor_x = cor_1;
- cor_y = cor_2;
- p0 = rrixix[track];
- p3 = rrixix[track+1];
- pos = track;
-
- for (i = 0; i < NB_POS; i+=2)
- {
- L_sum1 = L_sum2 = 0L;
- p1 = h;
- p2 = &vec[pos];
- for (j=62-pos ;j >= 0; j--)
- {
- L_sum1 += *p1 * *p2++;
- L_sum2 += *p1++ * *p2;
- }
- L_sum1 += *p1 * *p2;
- L_sum1 = (L_sum1 << 2);
- L_sum2 = (L_sum2 << 2);
-
- corr = (L_sum1 + 0x8000) >> 16;
- cor_x[i] = vo_mult(corr, sign[pos]) + (*p0++);
- corr = (L_sum2 + 0x8000) >> 16;
- cor_y[i] = vo_mult(corr, sign[pos + 1]) + (*p3++);
- pos += STEP;
-
- L_sum1 = L_sum2 = 0L;
- p1 = h;
- p2 = &vec[pos];
- for (j= 62-pos;j >= 0; j--)
- {
- L_sum1 += *p1 * *p2++;
- L_sum2 += *p1++ * *p2;
- }
- L_sum1 += *p1 * *p2;
- L_sum1 = (L_sum1 << 2);
- L_sum2 = (L_sum2 << 2);
-
- corr = (L_sum1 + 0x8000) >> 16;
- cor_x[i+1] = vo_mult(corr, sign[pos]) + (*p0++);
- corr = (L_sum2 + 0x8000) >> 16;
- cor_y[i+1] = vo_mult(corr, sign[pos + 1]) + (*p3++);
- pos += STEP;
- }
- return;
+ Word32 i, j, pos, corr;
+ Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
+ Word32 L_sum1,L_sum2;
+ cor_x = cor_1;
+ cor_y = cor_2;
+ p0 = rrixix[track];
+ p3 = rrixix[track+1];
+ pos = track;
+
+ for (i = 0; i < NB_POS; i+=2)
+ {
+ L_sum1 = L_sum2 = 0L;
+ p1 = h;
+ p2 = &vec[pos];
+ for (j=62-pos ;j >= 0; j--)
+ {
+ L_sum1 += *p1 * *p2++;
+ L_sum2 += *p1++ * *p2;
+ }
+ L_sum1 += *p1 * *p2;
+ L_sum1 = (L_sum1 << 2);
+ L_sum2 = (L_sum2 << 2);
+
+ corr = (L_sum1 + 0x8000) >> 16;
+ cor_x[i] = vo_mult(corr, sign[pos]) + (*p0++);
+ corr = (L_sum2 + 0x8000) >> 16;
+ cor_y[i] = vo_mult(corr, sign[pos + 1]) + (*p3++);
+ pos += STEP;
+
+ L_sum1 = L_sum2 = 0L;
+ p1 = h;
+ p2 = &vec[pos];
+ for (j= 62-pos;j >= 0; j--)
+ {
+ L_sum1 += *p1 * *p2++;
+ L_sum2 += *p1++ * *p2;
+ }
+ L_sum1 += *p1 * *p2;
+ L_sum1 = (L_sum1 << 2);
+ L_sum2 = (L_sum2 << 2);
+
+ corr = (L_sum1 + 0x8000) >> 16;
+ cor_x[i+1] = vo_mult(corr, sign[pos]) + (*p0++);
+ corr = (L_sum2 + 0x8000) >> 16;
+ cor_y[i+1] = vo_mult(corr, sign[pos + 1]) + (*p3++);
+ pos += STEP;
+ }
+ return;
}
/*-------------------------------------------------------------------*
@@ -962,80 +970,80 @@ void cor_h_vec_012(
*-------------------------------------------------------------------*/
void search_ixiy(
- Word16 nb_pos_ix, /* (i) nb of pos for pulse 1 (1..8) */
- Word16 track_x, /* (i) track of pulse 1 */
- Word16 track_y, /* (i) track of pulse 2 */
- Word16 * ps, /* (i/o) correlation of all fixed pulses */
- Word16 * alp, /* (i/o) energy of all fixed pulses */
- Word16 * ix, /* (o) position of pulse 1 */
- Word16 * iy, /* (o) position of pulse 2 */
- Word16 dn[], /* (i) corr. between target and h[] */
- Word16 dn2[], /* (i) vector of selected positions */
- Word16 cor_x[], /* (i) corr. of pulse 1 with fixed pulses */
- Word16 cor_y[], /* (i) corr. of pulse 2 with fixed pulses */
- Word16 rrixiy[][MSIZE] /* (i) corr. of pulse 1 with pulse 2 */
- )
+ Word16 nb_pos_ix, /* (i) nb of pos for pulse 1 (1..8) */
+ Word16 track_x, /* (i) track of pulse 1 */
+ Word16 track_y, /* (i) track of pulse 2 */
+ Word16 * ps, /* (i/o) correlation of all fixed pulses */
+ Word16 * alp, /* (i/o) energy of all fixed pulses */
+ Word16 * ix, /* (o) position of pulse 1 */
+ Word16 * iy, /* (o) position of pulse 2 */
+ Word16 dn[], /* (i) corr. between target and h[] */
+ Word16 dn2[], /* (i) vector of selected positions */
+ Word16 cor_x[], /* (i) corr. of pulse 1 with fixed pulses */
+ Word16 cor_y[], /* (i) corr. of pulse 2 with fixed pulses */
+ Word16 rrixiy[][MSIZE] /* (i) corr. of pulse 1 with pulse 2 */
+ )
{
- Word32 x, y, pos, thres_ix;
- Word16 ps1, ps2, sq, sqk;
- Word16 alp_16, alpk;
- Word16 *p0, *p1, *p2;
- Word32 s, alp0, alp1, alp2;
-
- p0 = cor_x;
- p1 = cor_y;
- p2 = rrixiy[track_x];
-
- thres_ix = nb_pos_ix - NB_MAX;
-
- alp0 = L_deposit_h(*alp);
- alp0 = (alp0 + 0x00008000L); /* for rounding */
-
- sqk = -1;
- alpk = 1;
-
- for (x = track_x; x < L_SUBFR; x += STEP)
- {
- ps1 = *ps + dn[x];
- alp1 = alp0 + ((*p0++)<<13);
-
- if (dn2[x] < thres_ix)
- {
- pos = -1;
- for (y = track_y; y < L_SUBFR; y += STEP)
- {
- ps2 = add1(ps1, dn[y]);
-
- alp2 = alp1 + ((*p1++)<<13);
- alp2 = alp2 + ((*p2++)<<14);
- alp_16 = extract_h(alp2);
- sq = vo_mult(ps2, ps2);
- s = vo_L_mult(alpk, sq) - ((sqk * alp_16)<<1);
-
- if (s > 0)
- {
- sqk = sq;
- alpk = alp_16;
- pos = y;
- }
- }
- p1 -= NB_POS;
-
- if (pos >= 0)
- {
- *ix = x;
- *iy = pos;
- }
- } else
- {
- p2 += NB_POS;
- }
- }
-
- *ps = add1(*ps, add1(dn[*ix], dn[*iy]));
- *alp = alpk;
-
- return;
+ Word32 x, y, pos, thres_ix;
+ Word16 ps1, ps2, sq, sqk;
+ Word16 alp_16, alpk;
+ Word16 *p0, *p1, *p2;
+ Word32 s, alp0, alp1, alp2;
+
+ p0 = cor_x;
+ p1 = cor_y;
+ p2 = rrixiy[track_x];
+
+ thres_ix = nb_pos_ix - NB_MAX;
+
+ alp0 = L_deposit_h(*alp);
+ alp0 = (alp0 + 0x00008000L); /* for rounding */
+
+ sqk = -1;
+ alpk = 1;
+
+ for (x = track_x; x < L_SUBFR; x += STEP)
+ {
+ ps1 = *ps + dn[x];
+ alp1 = L_add(alp0, ((*p0++)<<13));
+
+ if (dn2[x] < thres_ix)
+ {
+ pos = -1;
+ for (y = track_y; y < L_SUBFR; y += STEP)
+ {
+ ps2 = add1(ps1, dn[y]);
+
+ alp2 = alp1 + ((*p1++)<<13);
+ alp2 = alp2 + ((*p2++)<<14);
+ alp_16 = extract_h(alp2);
+ sq = vo_mult(ps2, ps2);
+ s = L_sub(vo_L_mult(alpk, sq), L_mult(sqk, alp_16));
+
+ if (s > 0)
+ {
+ sqk = sq;
+ alpk = alp_16;
+ pos = y;
+ }
+ }
+ p1 -= NB_POS;
+
+ if (pos >= 0)
+ {
+ *ix = x;
+ *iy = pos;
+ }
+ } else
+ {
+ p2 += NB_POS;
+ }
+ }
+
+ *ps = add1(*ps, add1(dn[*ix], dn[*iy]));
+ *alp = alpk;
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/convolve.c b/media/libstagefright/codecs/amrwbenc/src/convolve.c
index 4c1f7d4..9b8b3aa 100644
--- a/media/libstagefright/codecs/amrwbenc/src/convolve.c
+++ b/media/libstagefright/codecs/amrwbenc/src/convolve.c
@@ -17,8 +17,8 @@
/***********************************************************************
File: convolve.c
- Description:Perform the convolution between two vectors x[] and h[]
- and write the result in the vector y[]
+ Description:Perform the convolution between two vectors x[] and h[]
+ and write the result in the vector y[]
************************************************************************/
@@ -28,85 +28,85 @@
#define UNUSED(x) (void)(x)
void Convolve (
- Word16 x[], /* (i) : input vector */
- Word16 h[], /* (i) : impulse response */
- Word16 y[], /* (o) : output vector */
- Word16 L /* (i) : vector size */
- )
+ Word16 x[], /* (i) : input vector */
+ Word16 h[], /* (i) : impulse response */
+ Word16 y[], /* (o) : output vector */
+ Word16 L /* (i) : vector size */
+ )
{
- Word32 i, n;
- Word16 *tmpH,*tmpX;
- Word32 s;
+ Word32 i, n;
+ Word16 *tmpH,*tmpX;
+ Word32 s;
UNUSED(L);
- for (n = 0; n < 64;)
- {
- tmpH = h+n;
- tmpX = x;
- i=n+1;
- s = vo_mult32((*tmpX++), (*tmpH--));i--;
- while(i>0)
- {
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- i -= 4;
- }
- y[n] = ((s<<1) + 0x8000)>>16;
- n++;
+ for (n = 0; n < 64;)
+ {
+ tmpH = h+n;
+ tmpX = x;
+ i=n+1;
+ s = vo_mult32((*tmpX++), (*tmpH--));i--;
+ while(i>0)
+ {
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ i -= 4;
+ }
+ y[n] = ((s<<1) + 0x8000)>>16;
+ n++;
- tmpH = h+n;
- tmpX = x;
- i=n+1;
- s = vo_mult32((*tmpX++), (*tmpH--));i--;
- s += vo_mult32((*tmpX++), (*tmpH--));i--;
+ tmpH = h+n;
+ tmpX = x;
+ i=n+1;
+ s = vo_mult32((*tmpX++), (*tmpH--));i--;
+ s += vo_mult32((*tmpX++), (*tmpH--));i--;
- while(i>0)
- {
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- i -= 4;
- }
- y[n] = ((s<<1) + 0x8000)>>16;
- n++;
+ while(i>0)
+ {
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ i -= 4;
+ }
+ y[n] = ((s<<1) + 0x8000)>>16;
+ n++;
- tmpH = h+n;
- tmpX = x;
- i=n+1;
- s = vo_mult32((*tmpX++), (*tmpH--));i--;
- s += vo_mult32((*tmpX++), (*tmpH--));i--;
- s += vo_mult32((*tmpX++), (*tmpH--));i--;
+ tmpH = h+n;
+ tmpX = x;
+ i=n+1;
+ s = vo_mult32((*tmpX++), (*tmpH--));i--;
+ s += vo_mult32((*tmpX++), (*tmpH--));i--;
+ s += vo_mult32((*tmpX++), (*tmpH--));i--;
- while(i>0)
- {
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- i -= 4;
- }
- y[n] = ((s<<1) + 0x8000)>>16;
- n++;
+ while(i>0)
+ {
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ i -= 4;
+ }
+ y[n] = ((s<<1) + 0x8000)>>16;
+ n++;
- s = 0;
- tmpH = h+n;
- tmpX = x;
- i=n+1;
- while(i>0)
- {
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- i -= 4;
- }
- y[n] = ((s<<1) + 0x8000)>>16;
- n++;
- }
- return;
+ s = 0;
+ tmpH = h+n;
+ tmpX = x;
+ i=n+1;
+ while(i>0)
+ {
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ s += vo_mult32((*tmpX++), (*tmpH--));
+ i -= 4;
+ }
+ y[n] = ((s<<1) + 0x8000)>>16;
+ n++;
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
index d9245ed..e834396 100644
--- a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
+++ b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
@@ -17,10 +17,10 @@
/***********************************************************************
* File: cor_h_x.c *
* *
-* Description:Compute correlation between target "x[]" and "h[]" *
-* Designed for codebook search (24 pulses, 4 tracks, *
-* 4 pulses per track, 16 positions in each track) to *
-* avoid saturation. *
+* Description:Compute correlation between target "x[]" and "h[]" *
+* Designed for codebook search (24 pulses, 4 tracks, *
+* 4 pulses per track, 16 positions in each track) to *
+* avoid saturation. *
* *
************************************************************************/
@@ -33,94 +33,100 @@
#define STEP 4
void cor_h_x(
- Word16 h[], /* (i) Q12 : impulse response of weighted synthesis filter */
- Word16 x[], /* (i) Q0 : target vector */
- Word16 dn[] /* (o) <12bit : correlation between target and h[] */
- )
+ Word16 h[], /* (i) Q12 : impulse response of weighted synthesis filter */
+ Word16 x[], /* (i) Q0 : target vector */
+ Word16 dn[] /* (o) <12bit : correlation between target and h[] */
+ )
{
- Word32 i, j;
- Word32 L_tmp, y32[L_SUBFR], L_tot;
- Word16 *p1, *p2;
- Word32 *p3;
- Word32 L_max, L_max1, L_max2, L_max3;
- /* first keep the result on 32 bits and find absolute maximum */
- L_tot = 1;
- L_max = 0;
- L_max1 = 0;
- L_max2 = 0;
- L_max3 = 0;
- for (i = 0; i < L_SUBFR; i += STEP)
- {
- L_tmp = 1; /* 1 -> to avoid null dn[] */
- p1 = &x[i];
- p2 = &h[0];
- for (j = i; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ Word32 i, j;
+ Word32 L_tmp, y32[L_SUBFR], L_tot;
+ Word16 *p1, *p2;
+ Word32 *p3;
+ Word32 L_max, L_max1, L_max2, L_max3;
+ /* first keep the result on 32 bits and find absolute maximum */
+ L_tot = 1;
+ L_max = 0;
+ L_max1 = 0;
+ L_max2 = 0;
+ L_max3 = 0;
+ for (i = 0; i < L_SUBFR; i += STEP)
+ {
+ L_tmp = 1; /* 1 -> to avoid null dn[] */
+ p1 = &x[i];
+ p2 = &h[0];
+ for (j = i; j < L_SUBFR; j++)
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
- y32[i] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
- if(L_tmp > L_max)
- {
- L_max = L_tmp;
- }
+ y32[i] = L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+ if(L_tmp > L_max)
+ {
+ L_max = L_tmp;
+ }
- L_tmp = 1L;
- p1 = &x[i+1];
- p2 = &h[0];
- for (j = i+1; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = 1L;
+ p1 = &x[i+1];
+ p2 = &h[0];
+ for (j = i+1; j < L_SUBFR; j++)
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
- y32[i+1] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
- if(L_tmp > L_max1)
- {
- L_max1 = L_tmp;
- }
+ y32[i+1] = L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+ if(L_tmp > L_max1)
+ {
+ L_max1 = L_tmp;
+ }
- L_tmp = 1;
- p1 = &x[i+2];
- p2 = &h[0];
- for (j = i+2; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = 1;
+ p1 = &x[i+2];
+ p2 = &h[0];
+ for (j = i+2; j < L_SUBFR; j++)
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
- y32[i+2] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
- if(L_tmp > L_max2)
- {
- L_max2 = L_tmp;
- }
+ y32[i+2] = L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+ if(L_tmp > L_max2)
+ {
+ L_max2 = L_tmp;
+ }
- L_tmp = 1;
- p1 = &x[i+3];
- p2 = &h[0];
- for (j = i+3; j < L_SUBFR; j++)
- L_tmp += vo_L_mult(*p1++, *p2++);
+ L_tmp = 1;
+ p1 = &x[i+3];
+ p2 = &h[0];
+ for (j = i+3; j < L_SUBFR; j++)
+ L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
- y32[i+3] = L_tmp;
- L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
- if(L_tmp > L_max3)
- {
- L_max3 = L_tmp;
- }
- }
- /* tot += 3*max / 8 */
- L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
- L_tot = vo_L_add(L_tot, L_max); /* +max/4 */
- L_tot = vo_L_add(L_tot, (L_max >> 1)); /* +max/8 */
+ y32[i+3] = L_tmp;
+ L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+ if(L_tmp > L_max3)
+ {
+ L_max3 = L_tmp;
+ }
+ }
+ /* tot += 3*max / 8 */
+ if (L_max > INT_MAX - L_max1 ||
+ L_max + L_max1 > INT_MAX - L_max2 ||
+ L_max + L_max1 + L_max2 > INT_MAX - L_max3) {
+ L_max = INT_MAX >> 2;
+ } else {
+ L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
+ }
+ L_tot = vo_L_add(L_tot, L_max); /* +max/4 */
+ L_tot = vo_L_add(L_tot, (L_max >> 1)); /* +max/8 */
- /* Find the number of right shifts to do on y32[] so that */
- /* 6.0 x sumation of max of dn[] in each track not saturate. */
- j = norm_l(L_tot) - 4; /* 4 -> 16 x tot */
- p1 = dn;
- p3 = y32;
- for (i = 0; i < L_SUBFR; i+=4)
- {
- *p1++ = vo_round(L_shl(*p3++, j));
- *p1++ = vo_round(L_shl(*p3++, j));
- *p1++ = vo_round(L_shl(*p3++, j));
- *p1++ = vo_round(L_shl(*p3++, j));
- }
- return;
+ /* Find the number of right shifts to do on y32[] so that */
+ /* 6.0 x sumation of max of dn[] in each track not saturate. */
+ j = norm_l(L_tot) - 4; /* 4 -> 16 x tot */
+ p1 = dn;
+ p3 = y32;
+ for (i = 0; i < L_SUBFR; i+=4)
+ {
+ *p1++ = vo_round(L_shl(*p3++, j));
+ *p1++ = vo_round(L_shl(*p3++, j));
+ *p1++ = vo_round(L_shl(*p3++, j));
+ *p1++ = vo_round(L_shl(*p3++, j));
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/decim54.c b/media/libstagefright/codecs/amrwbenc/src/decim54.c
index 3b88514..e4c7940 100644
--- a/media/libstagefright/codecs/amrwbenc/src/decim54.c
+++ b/media/libstagefright/codecs/amrwbenc/src/decim54.c
@@ -17,7 +17,7 @@
/***********************************************************************
* File: decim54.c *
* *
-* Description:Decimation of 16kHz signal to 12.8kHz *
+* Description:Decimation of 16kHz signal to 12.8kHz *
* *
************************************************************************/
@@ -33,114 +33,114 @@
/* Local functions */
static void Down_samp(
- Word16 * sig, /* input: signal to downsampling */
- Word16 * sig_d, /* output: downsampled signal */
- Word16 L_frame_d /* input: length of output */
- );
+ Word16 * sig, /* input: signal to downsampling */
+ Word16 * sig_d, /* output: downsampled signal */
+ Word16 L_frame_d /* input: length of output */
+ );
/* 1/5 resolution interpolation filter (in Q14) */
/* -1.5dB @ 6kHz, -6dB @ 6.4kHz, -10dB @ 6.6kHz, -20dB @ 6.9kHz, -25dB @ 7kHz, -55dB @ 8kHz */
static Word16 fir_down1[4][30] =
{
- {-5, 24, -50, 54, 0, -128, 294, -408, 344, 0, -647, 1505, -2379, 3034, 13107, 3034, -2379, 1505, -647, 0, 344, -408,
- 294, -128, 0, 54, -50, 24, -5, 0},
+ {-5, 24, -50, 54, 0, -128, 294, -408, 344, 0, -647, 1505, -2379, 3034, 13107, 3034, -2379, 1505, -647, 0, 344, -408,
+ 294, -128, 0, 54, -50, 24, -5, 0},
- {-6, 19, -26, 0, 77, -188, 270, -233, 0, 434, -964, 1366, -1293, 0, 12254, 6575, -2746, 1030, 0, -507, 601, -441,
- 198, 0, -95, 99, -58, 18, 0, -1},
+ {-6, 19, -26, 0, 77, -188, 270, -233, 0, 434, -964, 1366, -1293, 0, 12254, 6575, -2746, 1030, 0, -507, 601, -441,
+ 198, 0, -95, 99, -58, 18, 0, -1},
- {-3, 9, 0, -41, 111, -170, 153, 0, -295, 649, -888, 770, 0, -1997, 9894, 9894, -1997, 0, 770, -888, 649, -295, 0,
- 153, -170, 111, -41, 0, 9, -3},
+ {-3, 9, 0, -41, 111, -170, 153, 0, -295, 649, -888, 770, 0, -1997, 9894, 9894, -1997, 0, 770, -888, 649, -295, 0,
+ 153, -170, 111, -41, 0, 9, -3},
- {-1, 0, 18, -58, 99, -95, 0, 198, -441, 601, -507, 0, 1030, -2746, 6575, 12254, 0, -1293, 1366, -964, 434, 0,
- -233, 270, -188, 77, 0, -26, 19, -6}
+ {-1, 0, 18, -58, 99, -95, 0, 198, -441, 601, -507, 0, 1030, -2746, 6575, 12254, 0, -1293, 1366, -964, 434, 0,
+ -233, 270, -188, 77, 0, -26, 19, -6}
};
void Init_Decim_12k8(
- Word16 mem[] /* output: memory (2*NB_COEF_DOWN) set to zeros */
- )
+ Word16 mem[] /* output: memory (2*NB_COEF_DOWN) set to zeros */
+ )
{
- Set_zero(mem, 2 * NB_COEF_DOWN);
- return;
+ Set_zero(mem, 2 * NB_COEF_DOWN);
+ return;
}
void Decim_12k8(
- Word16 sig16k[], /* input: signal to downsampling */
- Word16 lg, /* input: length of input */
- Word16 sig12k8[], /* output: decimated signal */
- Word16 mem[] /* in/out: memory (2*NB_COEF_DOWN) */
- )
+ Word16 sig16k[], /* input: signal to downsampling */
+ Word16 lg, /* input: length of input */
+ Word16 sig12k8[], /* output: decimated signal */
+ Word16 mem[] /* in/out: memory (2*NB_COEF_DOWN) */
+ )
{
- Word16 lg_down;
- Word16 signal[L_FRAME16k + (2 * NB_COEF_DOWN)];
+ Word16 lg_down;
+ Word16 signal[L_FRAME16k + (2 * NB_COEF_DOWN)];
- Copy(mem, signal, 2 * NB_COEF_DOWN);
+ Copy(mem, signal, 2 * NB_COEF_DOWN);
- Copy(sig16k, signal + (2 * NB_COEF_DOWN), lg);
+ Copy(sig16k, signal + (2 * NB_COEF_DOWN), lg);
- lg_down = (lg * DOWN_FAC)>>15;
+ lg_down = (lg * DOWN_FAC)>>15;
- Down_samp(signal + NB_COEF_DOWN, sig12k8, lg_down);
+ Down_samp(signal + NB_COEF_DOWN, sig12k8, lg_down);
- Copy(signal + lg, mem, 2 * NB_COEF_DOWN);
+ Copy(signal + lg, mem, 2 * NB_COEF_DOWN);
- return;
+ return;
}
static void Down_samp(
- Word16 * sig, /* input: signal to downsampling */
- Word16 * sig_d, /* output: downsampled signal */
- Word16 L_frame_d /* input: length of output */
- )
+ Word16 * sig, /* input: signal to downsampling */
+ Word16 * sig_d, /* output: downsampled signal */
+ Word16 L_frame_d /* input: length of output */
+ )
{
- Word32 i, j, frac, pos;
- Word16 *x, *y;
- Word32 L_sum;
-
- pos = 0; /* position is in Q2 -> 1/4 resolution */
- for (j = 0; j < L_frame_d; j++)
- {
- i = (pos >> 2); /* integer part */
- frac = pos & 3; /* fractional part */
- x = sig + i - NB_COEF_DOWN + 1;
- y = (Word16 *)(fir_down1 + frac);
-
- L_sum = vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x++),(*y++));
- L_sum += vo_mult32((*x),(*y));
-
- L_sum = L_shl2(L_sum, 2);
- sig_d[j] = extract_h(L_add(L_sum, 0x8000));
- pos += FAC5; /* pos + 5/4 */
- }
- return;
+ Word32 i, j, frac, pos;
+ Word16 *x, *y;
+ Word32 L_sum;
+
+ pos = 0; /* position is in Q2 -> 1/4 resolution */
+ for (j = 0; j < L_frame_d; j++)
+ {
+ i = (pos >> 2); /* integer part */
+ frac = pos & 3; /* fractional part */
+ x = sig + i - NB_COEF_DOWN + 1;
+ y = (Word16 *)(fir_down1 + frac);
+
+ L_sum = vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x++),(*y++));
+ L_sum += vo_mult32((*x),(*y));
+
+ L_sum = L_shl2(L_sum, 2);
+ sig_d[j] = extract_h(L_add(L_sum, 0x8000));
+ pos += FAC5; /* pos + 5/4 */
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/deemph.c b/media/libstagefright/codecs/amrwbenc/src/deemph.c
index 0c49d6b..cc27f6e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/deemph.c
+++ b/media/libstagefright/codecs/amrwbenc/src/deemph.c
@@ -17,9 +17,9 @@
/***********************************************************************
* File: deemph.c *
* *
-* Description:filtering through 1/(1-mu z^ -1) *
-* Deemph2 --> signal is divided by 2 *
-* Deemph_32 --> for 32 bits signal. *
+* Description:filtering through 1/(1-mu z^ -1) *
+* Deemph2 --> signal is divided by 2 *
+* Deemph_32 --> for 32 bits signal. *
* *
************************************************************************/
@@ -28,89 +28,92 @@
#include "math_op.h"
void Deemph(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : deemphasis factor */
- Word16 L, /* (i) : vector size */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- )
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : deemphasis factor */
+ Word16 L, /* (i) : vector size */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ )
{
- Word32 i;
- Word32 L_tmp;
+ Word32 i;
+ Word32 L_tmp;
- L_tmp = L_deposit_h(x[0]);
- L_tmp = L_mac(L_tmp, *mem, mu);
- x[0] = vo_round(L_tmp);
+ L_tmp = L_deposit_h(x[0]);
+ L_tmp = L_mac(L_tmp, *mem, mu);
+ x[0] = vo_round(L_tmp);
- for (i = 1; i < L; i++)
- {
- L_tmp = L_deposit_h(x[i]);
- L_tmp = L_mac(L_tmp, x[i - 1], mu);
- x[i] = voround(L_tmp);
- }
+ for (i = 1; i < L; i++)
+ {
+ L_tmp = L_deposit_h(x[i]);
+ L_tmp = L_mac(L_tmp, x[i - 1], mu);
+ x[i] = voround(L_tmp);
+ }
- *mem = x[L - 1];
+ *mem = x[L - 1];
- return;
+ return;
}
void Deemph2(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : deemphasis factor */
- Word16 L, /* (i) : vector size */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- )
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : deemphasis factor */
+ Word16 L, /* (i) : vector size */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ )
{
- Word32 i;
- Word32 L_tmp;
- L_tmp = x[0] << 15;
- L_tmp += ((*mem) * mu)<<1;
- x[0] = (L_tmp + 0x8000)>>16;
- for (i = 1; i < L; i++)
- {
- L_tmp = x[i] << 15;
- L_tmp += (x[i - 1] * mu)<<1;
- x[i] = (L_tmp + 0x8000)>>16;
- }
- *mem = x[L - 1];
- return;
+ Word32 i;
+ Word32 L_tmp;
+ L_tmp = x[0] << 15;
+ i = L_mult(*mem, mu);
+ L_tmp = L_add(L_tmp, i);
+ x[0] = voround(L_tmp);
+ for (i = 1; i < L; i++)
+ {
+ Word32 tmp;
+ L_tmp = x[i] << 15;
+ tmp = (x[i - 1] * mu)<<1;
+ L_tmp = L_add(L_tmp, tmp);
+ x[i] = voround(L_tmp);
+ }
+ *mem = x[L - 1];
+ return;
}
void Deemph_32(
- Word16 x_hi[], /* (i) : input signal (bit31..16) */
- Word16 x_lo[], /* (i) : input signal (bit15..4) */
- Word16 y[], /* (o) : output signal (x16) */
- Word16 mu, /* (i) Q15 : deemphasis factor */
- Word16 L, /* (i) : vector size */
- Word16 * mem /* (i/o) : memory (y[-1]) */
- )
+ Word16 x_hi[], /* (i) : input signal (bit31..16) */
+ Word16 x_lo[], /* (i) : input signal (bit15..4) */
+ Word16 y[], /* (o) : output signal (x16) */
+ Word16 mu, /* (i) Q15 : deemphasis factor */
+ Word16 L, /* (i) : vector size */
+ Word16 * mem /* (i/o) : memory (y[-1]) */
+ )
{
- Word16 fac;
- Word32 i, L_tmp;
-
- fac = mu >> 1; /* Q15 --> Q14 */
-
- L_tmp = L_deposit_h(x_hi[0]);
- L_tmp += (x_lo[0] * 8)<<1;
- L_tmp = (L_tmp << 3);
- L_tmp += ((*mem) * fac)<<1;
- L_tmp = (L_tmp << 1);
- y[0] = (L_tmp + 0x8000)>>16;
-
- for (i = 1; i < L; i++)
- {
- L_tmp = L_deposit_h(x_hi[i]);
- L_tmp += (x_lo[i] * 8)<<1;
- L_tmp = (L_tmp << 3);
- L_tmp += (y[i - 1] * fac)<<1;
- L_tmp = (L_tmp << 1);
- y[i] = (L_tmp + 0x8000)>>16;
- }
-
- *mem = y[L - 1];
-
- return;
+ Word16 fac;
+ Word32 i, L_tmp;
+
+ fac = mu >> 1; /* Q15 --> Q14 */
+
+ L_tmp = L_deposit_h(x_hi[0]);
+ L_tmp += (x_lo[0] * 8)<<1;
+ L_tmp = (L_tmp << 3);
+ L_tmp += ((*mem) * fac)<<1;
+ L_tmp = (L_tmp << 1);
+ y[0] = (L_tmp + 0x8000)>>16;
+
+ for (i = 1; i < L; i++)
+ {
+ L_tmp = L_deposit_h(x_hi[i]);
+ L_tmp += (x_lo[i] * 8)<<1;
+ L_tmp = (L_tmp << 3);
+ L_tmp += (y[i - 1] * fac)<<1;
+ L_tmp = (L_tmp << 1);
+ y[i] = (L_tmp + 0x8000)>>16;
+ }
+
+ *mem = y[L - 1];
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/dtx.c b/media/libstagefright/codecs/amrwbenc/src/dtx.c
index 2cfaced..6be8683 100644
--- a/media/libstagefright/codecs/amrwbenc/src/dtx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/dtx.c
@@ -17,7 +17,7 @@
/***********************************************************************
* File: dtx.c *
* *
-* Description:DTX functions *
+* Description:DTX functions *
* *
************************************************************************/
@@ -35,33 +35,33 @@
#include "mem_align.h"
static void aver_isf_history(
- Word16 isf_old[],
- Word16 indices[],
- Word32 isf_aver[]
- );
+ Word16 isf_old[],
+ Word16 indices[],
+ Word32 isf_aver[]
+ );
static void find_frame_indices(
- Word16 isf_old_tx[],
- Word16 indices[],
- dtx_encState * st
- );
+ Word16 isf_old_tx[],
+ Word16 indices[],
+ dtx_encState * st
+ );
static Word16 dithering_control(
- dtx_encState * st
- );
+ dtx_encState * st
+ );
/* excitation energy adjustment depending on speech coder mode used, Q7 */
static Word16 en_adjust[9] =
{
- 230, /* mode0 = 7k : -5.4dB */
- 179, /* mode1 = 9k : -4.2dB */
- 141, /* mode2 = 12k : -3.3dB */
- 128, /* mode3 = 14k : -3.0dB */
- 122, /* mode4 = 16k : -2.85dB */
- 115, /* mode5 = 18k : -2.7dB */
- 115, /* mode6 = 20k : -2.7dB */
- 115, /* mode7 = 23k : -2.7dB */
- 115 /* mode8 = 24k : -2.7dB */
+ 230, /* mode0 = 7k : -5.4dB */
+ 179, /* mode1 = 9k : -4.2dB */
+ 141, /* mode2 = 12k : -3.3dB */
+ 128, /* mode3 = 14k : -3.0dB */
+ 122, /* mode4 = 16k : -2.85dB */
+ 115, /* mode5 = 18k : -2.7dB */
+ 115, /* mode6 = 20k : -2.7dB */
+ 115, /* mode7 = 23k : -2.7dB */
+ 115 /* mode8 = 24k : -2.7dB */
};
/**************************************************************************
@@ -71,24 +71,24 @@ static Word16 en_adjust[9] =
**************************************************************************/
Word16 dtx_enc_init(dtx_encState ** st, Word16 isf_init[], VO_MEM_OPERATOR *pMemOP)
{
- dtx_encState *s;
-
- if (st == (dtx_encState **) NULL)
- {
- fprintf(stderr, "dtx_enc_init: invalid parameter\n");
- return -1;
- }
- *st = NULL;
-
- /* allocate memory */
- if ((s = (dtx_encState *)mem_malloc(pMemOP, sizeof(dtx_encState), 32, VO_INDEX_ENC_AMRWB)) == NULL)
- {
- fprintf(stderr, "dtx_enc_init: can not malloc state structure\n");
- return -1;
- }
- dtx_enc_reset(s, isf_init);
- *st = s;
- return 0;
+ dtx_encState *s;
+
+ if (st == (dtx_encState **) NULL)
+ {
+ fprintf(stderr, "dtx_enc_init: invalid parameter\n");
+ return -1;
+ }
+ *st = NULL;
+
+ /* allocate memory */
+ if ((s = (dtx_encState *)mem_malloc(pMemOP, sizeof(dtx_encState), 32, VO_INDEX_ENC_AMRWB)) == NULL)
+ {
+ fprintf(stderr, "dtx_enc_init: can not malloc state structure\n");
+ return -1;
+ }
+ dtx_enc_reset(s, isf_init);
+ *st = s;
+ return 0;
}
/**************************************************************************
@@ -98,40 +98,40 @@ Word16 dtx_enc_init(dtx_encState ** st, Word16 isf_init[], VO_MEM_OPERATOR *pMem
**************************************************************************/
Word16 dtx_enc_reset(dtx_encState * st, Word16 isf_init[])
{
- Word32 i;
-
- if (st == (dtx_encState *) NULL)
- {
- fprintf(stderr, "dtx_enc_reset: invalid parameter\n");
- return -1;
- }
- st->hist_ptr = 0;
- st->log_en_index = 0;
-
- /* Init isf_hist[] */
- for (i = 0; i < DTX_HIST_SIZE; i++)
- {
- Copy(isf_init, &st->isf_hist[i * M], M);
- }
- st->cng_seed = RANDOM_INITSEED;
-
- /* Reset energy history */
- Set_zero(st->log_en_hist, DTX_HIST_SIZE);
-
- st->dtxHangoverCount = DTX_HANG_CONST;
- st->decAnaElapsedCount = 32767;
-
- for (i = 0; i < 28; i++)
- {
- st->D[i] = 0;
- }
-
- for (i = 0; i < DTX_HIST_SIZE - 1; i++)
- {
- st->sumD[i] = 0;
- }
-
- return 1;
+ Word32 i;
+
+ if (st == (dtx_encState *) NULL)
+ {
+ fprintf(stderr, "dtx_enc_reset: invalid parameter\n");
+ return -1;
+ }
+ st->hist_ptr = 0;
+ st->log_en_index = 0;
+
+ /* Init isf_hist[] */
+ for (i = 0; i < DTX_HIST_SIZE; i++)
+ {
+ Copy(isf_init, &st->isf_hist[i * M], M);
+ }
+ st->cng_seed = RANDOM_INITSEED;
+
+ /* Reset energy history */
+ Set_zero(st->log_en_hist, DTX_HIST_SIZE);
+
+ st->dtxHangoverCount = DTX_HANG_CONST;
+ st->decAnaElapsedCount = 32767;
+
+ for (i = 0; i < 28; i++)
+ {
+ st->D[i] = 0;
+ }
+
+ for (i = 0; i < DTX_HIST_SIZE - 1; i++)
+ {
+ st->sumD[i] = 0;
+ }
+
+ return 1;
}
/**************************************************************************
@@ -141,12 +141,12 @@ Word16 dtx_enc_reset(dtx_encState * st, Word16 isf_init[])
**************************************************************************/
void dtx_enc_exit(dtx_encState ** st, VO_MEM_OPERATOR *pMemOP)
{
- if (st == NULL || *st == NULL)
- return;
- /* deallocate memory */
- mem_free(pMemOP, *st, VO_INDEX_ENC_AMRWB);
- *st = NULL;
- return;
+ if (st == NULL || *st == NULL)
+ return;
+ /* deallocate memory */
+ mem_free(pMemOP, *st, VO_INDEX_ENC_AMRWB);
+ *st = NULL;
+ return;
}
@@ -156,133 +156,133 @@ void dtx_enc_exit(dtx_encState ** st, VO_MEM_OPERATOR *pMemOP)
*
**************************************************************************/
Word16 dtx_enc(
- dtx_encState * st, /* i/o : State struct */
- Word16 isf[M], /* o : CN ISF vector */
- Word16 * exc2, /* o : CN excitation */
- Word16 ** prms
- )
+ dtx_encState * st, /* i/o : State struct */
+ Word16 isf[M], /* o : CN ISF vector */
+ Word16 * exc2, /* o : CN excitation */
+ Word16 ** prms
+ )
{
- Word32 i, j;
- Word16 indice[7];
- Word16 log_en, gain, level, exp, exp0, tmp;
- Word16 log_en_int_e, log_en_int_m;
- Word32 L_isf[M], ener32, level32;
- Word16 isf_order[3];
- Word16 CN_dith;
-
- /* VOX mode computation of SID parameters */
- log_en = 0;
- for (i = 0; i < M; i++)
- {
- L_isf[i] = 0;
- }
- /* average energy and isf */
- for (i = 0; i < DTX_HIST_SIZE; i++)
- {
- /* Division by DTX_HIST_SIZE = 8 has been done in dtx_buffer. log_en is in Q10 */
- log_en = add(log_en, st->log_en_hist[i]);
-
- }
- find_frame_indices(st->isf_hist, isf_order, st);
- aver_isf_history(st->isf_hist, isf_order, L_isf);
-
- for (j = 0; j < M; j++)
- {
- isf[j] = (Word16)(L_isf[j] >> 3); /* divide by 8 */
- }
-
- /* quantize logarithmic energy to 6 bits (-6 : 66 dB) which corresponds to -2:22 in log2(E). */
- /* st->log_en_index = (short)( (log_en + 2.0) * 2.625 ); */
-
- /* increase dynamics to 7 bits (Q8) */
- log_en = (log_en >> 2);
-
- /* Add 2 in Q8 = 512 to get log2(E) between 0:24 */
- log_en = add(log_en, 512);
-
- /* Multiply by 2.625 to get full 6 bit range. 2.625 = 21504 in Q13. The result is in Q6 */
- log_en = mult(log_en, 21504);
-
- /* Quantize Energy */
- st->log_en_index = shr(log_en, 6);
-
- if(st->log_en_index > 63)
- {
- st->log_en_index = 63;
- }
- if (st->log_en_index < 0)
- {
- st->log_en_index = 0;
- }
- /* Quantize ISFs */
- Qisf_ns(isf, isf, indice);
-
-
- Parm_serial(indice[0], 6, prms);
- Parm_serial(indice[1], 6, prms);
- Parm_serial(indice[2], 6, prms);
- Parm_serial(indice[3], 5, prms);
- Parm_serial(indice[4], 5, prms);
-
- Parm_serial((st->log_en_index), 6, prms);
-
- CN_dith = dithering_control(st);
- Parm_serial(CN_dith, 1, prms);
-
- /* level = (float)( pow( 2.0f, (float)st->log_en_index / 2.625 - 2.0 ) ); */
- /* log2(E) in Q9 (log2(E) lies in between -2:22) */
- log_en = shl(st->log_en_index, 15 - 6);
-
- /* Divide by 2.625; log_en will be between 0:24 */
- log_en = mult(log_en, 12483);
- /* the result corresponds to log2(gain) in Q10 */
-
- /* Find integer part */
- log_en_int_e = (log_en >> 10);
-
- /* Find fractional part */
- log_en_int_m = (Word16) (log_en & 0x3ff);
- log_en_int_m = shl(log_en_int_m, 5);
-
- /* Subtract 2 from log_en in Q9, i.e divide the gain by 2 (energy by 4) */
- /* Add 16 in order to have the result of pow2 in Q16 */
- log_en_int_e = add(log_en_int_e, 16 - 1);
-
- level32 = Pow2(log_en_int_e, log_en_int_m); /* Q16 */
- exp0 = norm_l(level32);
- level32 = (level32 << exp0); /* level in Q31 */
- exp0 = (15 - exp0);
- level = extract_h(level32); /* level in Q15 */
-
- /* generate white noise vector */
- for (i = 0; i < L_FRAME; i++)
- {
- exc2[i] = (Random(&(st->cng_seed)) >> 4);
- }
-
- /* gain = level / sqrt(ener) * sqrt(L_FRAME) */
-
- /* energy of generated excitation */
- ener32 = Dot_product12(exc2, exc2, L_FRAME, &exp);
+ Word32 i, j;
+ Word16 indice[7];
+ Word16 log_en, gain, level, exp, exp0, tmp;
+ Word16 log_en_int_e, log_en_int_m;
+ Word32 L_isf[M], ener32, level32;
+ Word16 isf_order[3];
+ Word16 CN_dith;
+
+ /* VOX mode computation of SID parameters */
+ log_en = 0;
+ for (i = 0; i < M; i++)
+ {
+ L_isf[i] = 0;
+ }
+ /* average energy and isf */
+ for (i = 0; i < DTX_HIST_SIZE; i++)
+ {
+ /* Division by DTX_HIST_SIZE = 8 has been done in dtx_buffer. log_en is in Q10 */
+ log_en = add(log_en, st->log_en_hist[i]);
+
+ }
+ find_frame_indices(st->isf_hist, isf_order, st);
+ aver_isf_history(st->isf_hist, isf_order, L_isf);
+
+ for (j = 0; j < M; j++)
+ {
+ isf[j] = (Word16)(L_isf[j] >> 3); /* divide by 8 */
+ }
+
+ /* quantize logarithmic energy to 6 bits (-6 : 66 dB) which corresponds to -2:22 in log2(E). */
+ /* st->log_en_index = (short)( (log_en + 2.0) * 2.625 ); */
+
+ /* increase dynamics to 7 bits (Q8) */
+ log_en = (log_en >> 2);
+
+ /* Add 2 in Q8 = 512 to get log2(E) between 0:24 */
+ log_en = add(log_en, 512);
+
+ /* Multiply by 2.625 to get full 6 bit range. 2.625 = 21504 in Q13. The result is in Q6 */
+ log_en = mult(log_en, 21504);
+
+ /* Quantize Energy */
+ st->log_en_index = shr(log_en, 6);
+
+ if(st->log_en_index > 63)
+ {
+ st->log_en_index = 63;
+ }
+ if (st->log_en_index < 0)
+ {
+ st->log_en_index = 0;
+ }
+ /* Quantize ISFs */
+ Qisf_ns(isf, isf, indice);
+
+
+ Parm_serial(indice[0], 6, prms);
+ Parm_serial(indice[1], 6, prms);
+ Parm_serial(indice[2], 6, prms);
+ Parm_serial(indice[3], 5, prms);
+ Parm_serial(indice[4], 5, prms);
+
+ Parm_serial((st->log_en_index), 6, prms);
+
+ CN_dith = dithering_control(st);
+ Parm_serial(CN_dith, 1, prms);
+
+ /* level = (float)( pow( 2.0f, (float)st->log_en_index / 2.625 - 2.0 ) ); */
+ /* log2(E) in Q9 (log2(E) lies in between -2:22) */
+ log_en = shl(st->log_en_index, 15 - 6);
+
+ /* Divide by 2.625; log_en will be between 0:24 */
+ log_en = mult(log_en, 12483);
+ /* the result corresponds to log2(gain) in Q10 */
+
+ /* Find integer part */
+ log_en_int_e = (log_en >> 10);
+
+ /* Find fractional part */
+ log_en_int_m = (Word16) (log_en & 0x3ff);
+ log_en_int_m = shl(log_en_int_m, 5);
+
+ /* Subtract 2 from log_en in Q9, i.e divide the gain by 2 (energy by 4) */
+ /* Add 16 in order to have the result of pow2 in Q16 */
+ log_en_int_e = add(log_en_int_e, 16 - 1);
+
+ level32 = Pow2(log_en_int_e, log_en_int_m); /* Q16 */
+ exp0 = norm_l(level32);
+ level32 = (level32 << exp0); /* level in Q31 */
+ exp0 = (15 - exp0);
+ level = extract_h(level32); /* level in Q15 */
+
+ /* generate white noise vector */
+ for (i = 0; i < L_FRAME; i++)
+ {
+ exc2[i] = (Random(&(st->cng_seed)) >> 4);
+ }
+
+ /* gain = level / sqrt(ener) * sqrt(L_FRAME) */
+
+ /* energy of generated excitation */
+ ener32 = Dot_product12(exc2, exc2, L_FRAME, &exp);
- Isqrt_n(&ener32, &exp);
+ Isqrt_n(&ener32, &exp);
- gain = extract_h(ener32);
+ gain = extract_h(ener32);
- gain = mult(level, gain); /* gain in Q15 */
+ gain = mult(level, gain); /* gain in Q15 */
- exp = add(exp0, exp);
+ exp = add(exp0, exp);
- /* Multiply by sqrt(L_FRAME)=16, i.e. shift left by 4 */
- exp += 4;
+ /* Multiply by sqrt(L_FRAME)=16, i.e. shift left by 4 */
+ exp += 4;
- for (i = 0; i < L_FRAME; i++)
- {
- tmp = mult(exc2[i], gain); /* Q0 * Q15 */
- exc2[i] = shl(tmp, exp);
- }
+ for (i = 0; i < L_FRAME; i++)
+ {
+ tmp = mult(exc2[i], gain); /* Q0 * Q15 */
+ exc2[i] = shl(tmp, exp);
+ }
- return 0;
+ return 0;
}
/**************************************************************************
@@ -291,45 +291,45 @@ Word16 dtx_enc(
*
**************************************************************************/
Word16 dtx_buffer(
- dtx_encState * st, /* i/o : State struct */
- Word16 isf_new[], /* i : isf vector */
- Word32 enr, /* i : residual energy (in L_FRAME) */
- Word16 codec_mode
- )
+ dtx_encState * st, /* i/o : State struct */
+ Word16 isf_new[], /* i : isf vector */
+ Word32 enr, /* i : residual energy (in L_FRAME) */
+ Word16 codec_mode
+ )
{
- Word16 log_en;
-
- Word16 log_en_e;
- Word16 log_en_m;
- st->hist_ptr = add(st->hist_ptr, 1);
- if(st->hist_ptr == DTX_HIST_SIZE)
- {
- st->hist_ptr = 0;
- }
- /* copy lsp vector into buffer */
- Copy(isf_new, &st->isf_hist[st->hist_ptr * M], M);
-
- /* log_en = (float)log10(enr*0.0059322)/(float)log10(2.0f); */
- Log2(enr, &log_en_e, &log_en_m);
-
- /* convert exponent and mantissa to Word16 Q7. Q7 is used to simplify averaging in dtx_enc */
- log_en = shl(log_en_e, 7); /* Q7 */
- log_en = add(log_en, shr(log_en_m, 15 - 7));
-
- /* Find energy per sample by multiplying with 0.0059322, i.e subtract log2(1/0.0059322) = 7.39722 The
- * constant 0.0059322 takes into account windowings and analysis length from autocorrelation
- * computations; 7.39722 in Q7 = 947 */
- /* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
- /* log_en = sub( log_en, 947 + en_adjust[codec_mode] ); */
-
- /* Find energy per sample (divide by L_FRAME=256), i.e subtract log2(256) = 8.0 (1024 in Q7) */
- /* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
-
- log_en = sub(log_en, add(1024, en_adjust[codec_mode]));
-
- /* Insert into the buffer */
- st->log_en_hist[st->hist_ptr] = log_en;
- return 0;
+ Word16 log_en;
+
+ Word16 log_en_e;
+ Word16 log_en_m;
+ st->hist_ptr = add(st->hist_ptr, 1);
+ if(st->hist_ptr == DTX_HIST_SIZE)
+ {
+ st->hist_ptr = 0;
+ }
+ /* copy lsp vector into buffer */
+ Copy(isf_new, &st->isf_hist[st->hist_ptr * M], M);
+
+ /* log_en = (float)log10(enr*0.0059322)/(float)log10(2.0f); */
+ Log2(enr, &log_en_e, &log_en_m);
+
+ /* convert exponent and mantissa to Word16 Q7. Q7 is used to simplify averaging in dtx_enc */
+ log_en = shl(log_en_e, 7); /* Q7 */
+ log_en = add(log_en, shr(log_en_m, 15 - 7));
+
+ /* Find energy per sample by multiplying with 0.0059322, i.e subtract log2(1/0.0059322) = 7.39722 The
+ * constant 0.0059322 takes into account windowings and analysis length from autocorrelation
+ * computations; 7.39722 in Q7 = 947 */
+ /* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
+ /* log_en = sub( log_en, 947 + en_adjust[codec_mode] ); */
+
+ /* Find energy per sample (divide by L_FRAME=256), i.e subtract log2(256) = 8.0 (1024 in Q7) */
+ /* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
+
+ log_en = sub(log_en, add(1024, en_adjust[codec_mode]));
+
+ /* Insert into the buffer */
+ st->log_en_hist[st->hist_ptr] = log_en;
+ return 0;
}
/**************************************************************************
@@ -339,267 +339,267 @@ Word16 dtx_buffer(
* the decoding side.
**************************************************************************/
void tx_dtx_handler(dtx_encState * st, /* i/o : State struct */
- Word16 vad_flag, /* i : vad decision */
- Word16 * usedMode /* i/o : mode changed or not */
- )
+ Word16 vad_flag, /* i : vad decision */
+ Word16 * usedMode /* i/o : mode changed or not */
+ )
{
- /* this state machine is in synch with the GSMEFR txDtx machine */
- st->decAnaElapsedCount = add(st->decAnaElapsedCount, 1);
-
- if (vad_flag != 0)
- {
- st->dtxHangoverCount = DTX_HANG_CONST;
- } else
- { /* non-speech */
- if (st->dtxHangoverCount == 0)
- { /* out of decoder analysis hangover */
- st->decAnaElapsedCount = 0;
- *usedMode = MRDTX;
- } else
- { /* in possible analysis hangover */
- st->dtxHangoverCount = sub(st->dtxHangoverCount, 1);
-
- /* decAnaElapsedCount + dtxHangoverCount < DTX_ELAPSED_FRAMES_THRESH */
- if (sub(add(st->decAnaElapsedCount, st->dtxHangoverCount),
- DTX_ELAPSED_FRAMES_THRESH) < 0)
- {
- *usedMode = MRDTX;
- /* if short time since decoder update, do not add extra HO */
- }
- /* else override VAD and stay in speech mode *usedMode and add extra hangover */
- }
- }
-
- return;
+ /* this state machine is in synch with the GSMEFR txDtx machine */
+ st->decAnaElapsedCount = add(st->decAnaElapsedCount, 1);
+
+ if (vad_flag != 0)
+ {
+ st->dtxHangoverCount = DTX_HANG_CONST;
+ } else
+ { /* non-speech */
+ if (st->dtxHangoverCount == 0)
+ { /* out of decoder analysis hangover */
+ st->decAnaElapsedCount = 0;
+ *usedMode = MRDTX;
+ } else
+ { /* in possible analysis hangover */
+ st->dtxHangoverCount = sub(st->dtxHangoverCount, 1);
+
+ /* decAnaElapsedCount + dtxHangoverCount < DTX_ELAPSED_FRAMES_THRESH */
+ if (sub(add(st->decAnaElapsedCount, st->dtxHangoverCount),
+ DTX_ELAPSED_FRAMES_THRESH) < 0)
+ {
+ *usedMode = MRDTX;
+ /* if short time since decoder update, do not add extra HO */
+ }
+ /* else override VAD and stay in speech mode *usedMode and add extra hangover */
+ }
+ }
+
+ return;
}
static void aver_isf_history(
- Word16 isf_old[],
- Word16 indices[],
- Word32 isf_aver[]
- )
+ Word16 isf_old[],
+ Word16 indices[],
+ Word32 isf_aver[]
+ )
{
- Word32 i, j, k;
- Word16 isf_tmp[2 * M];
- Word32 L_tmp;
-
- /* Memorize in isf_tmp[][] the ISF vectors to be replaced by */
- /* the median ISF vector prior to the averaging */
- for (k = 0; k < 2; k++)
- {
- if ((indices[k] + 1) != 0)
- {
- for (i = 0; i < M; i++)
- {
- isf_tmp[k * M + i] = isf_old[indices[k] * M + i];
- isf_old[indices[k] * M + i] = isf_old[indices[2] * M + i];
- }
- }
- }
-
- /* Perform the ISF averaging */
- for (j = 0; j < M; j++)
- {
- L_tmp = 0;
-
- for (i = 0; i < DTX_HIST_SIZE; i++)
- {
- L_tmp = L_add(L_tmp, L_deposit_l(isf_old[i * M + j]));
- }
- isf_aver[j] = L_tmp;
- }
-
- /* Retrieve from isf_tmp[][] the ISF vectors saved prior to averaging */
- for (k = 0; k < 2; k++)
- {
- if ((indices[k] + 1) != 0)
- {
- for (i = 0; i < M; i++)
- {
- isf_old[indices[k] * M + i] = isf_tmp[k * M + i];
- }
- }
- }
-
- return;
+ Word32 i, j, k;
+ Word16 isf_tmp[2 * M];
+ Word32 L_tmp;
+
+ /* Memorize in isf_tmp[][] the ISF vectors to be replaced by */
+ /* the median ISF vector prior to the averaging */
+ for (k = 0; k < 2; k++)
+ {
+ if ((indices[k] + 1) != 0)
+ {
+ for (i = 0; i < M; i++)
+ {
+ isf_tmp[k * M + i] = isf_old[indices[k] * M + i];
+ isf_old[indices[k] * M + i] = isf_old[indices[2] * M + i];
+ }
+ }
+ }
+
+ /* Perform the ISF averaging */
+ for (j = 0; j < M; j++)
+ {
+ L_tmp = 0;
+
+ for (i = 0; i < DTX_HIST_SIZE; i++)
+ {
+ L_tmp = L_add(L_tmp, L_deposit_l(isf_old[i * M + j]));
+ }
+ isf_aver[j] = L_tmp;
+ }
+
+ /* Retrieve from isf_tmp[][] the ISF vectors saved prior to averaging */
+ for (k = 0; k < 2; k++)
+ {
+ if ((indices[k] + 1) != 0)
+ {
+ for (i = 0; i < M; i++)
+ {
+ isf_old[indices[k] * M + i] = isf_tmp[k * M + i];
+ }
+ }
+ }
+
+ return;
}
static void find_frame_indices(
- Word16 isf_old_tx[],
- Word16 indices[],
- dtx_encState * st
- )
+ Word16 isf_old_tx[],
+ Word16 indices[],
+ dtx_encState * st
+ )
{
- Word32 L_tmp, summin, summax, summax2nd;
- Word16 i, j, tmp;
- Word16 ptr;
-
- /* Remove the effect of the oldest frame from the column */
- /* sum sumD[0..DTX_HIST_SIZE-1]. sumD[DTX_HIST_SIZE] is */
- /* not updated since it will be removed later. */
-
- tmp = DTX_HIST_SIZE_MIN_ONE;
- j = -1;
- for (i = 0; i < DTX_HIST_SIZE_MIN_ONE; i++)
- {
- j = add(j, tmp);
- st->sumD[i] = L_sub(st->sumD[i], st->D[j]);
- tmp = sub(tmp, 1);
- }
-
- /* Shift the column sum sumD. The element sumD[DTX_HIST_SIZE-1] */
- /* corresponding to the oldest frame is removed. The sum of */
- /* the distances between the latest isf and other isfs, */
- /* i.e. the element sumD[0], will be computed during this call. */
- /* Hence this element is initialized to zero. */
-
- for (i = DTX_HIST_SIZE_MIN_ONE; i > 0; i--)
- {
- st->sumD[i] = st->sumD[i - 1];
- }
- st->sumD[0] = 0;
-
- /* Remove the oldest frame from the distance matrix. */
- /* Note that the distance matrix is replaced by a one- */
- /* dimensional array to save static memory. */
-
- tmp = 0;
- for (i = 27; i >= 12; i = (Word16) (i - tmp))
- {
- tmp = add(tmp, 1);
- for (j = tmp; j > 0; j--)
- {
- st->D[i - j + 1] = st->D[i - j - tmp];
- }
- }
-
- /* Compute the first column of the distance matrix D */
- /* (squared Euclidean distances from isf1[] to isf_old_tx[][]). */
-
- ptr = st->hist_ptr;
- for (i = 1; i < DTX_HIST_SIZE; i++)
- {
- /* Compute the distance between the latest isf and the other isfs. */
- ptr = sub(ptr, 1);
- if (ptr < 0)
- {
- ptr = DTX_HIST_SIZE_MIN_ONE;
- }
- L_tmp = 0;
- for (j = 0; j < M; j++)
- {
- tmp = sub(isf_old_tx[st->hist_ptr * M + j], isf_old_tx[ptr * M + j]);
- L_tmp = L_mac(L_tmp, tmp, tmp);
- }
- st->D[i - 1] = L_tmp;
-
- /* Update also the column sums. */
- st->sumD[0] = L_add(st->sumD[0], st->D[i - 1]);
- st->sumD[i] = L_add(st->sumD[i], st->D[i - 1]);
- }
-
- /* Find the minimum and maximum distances */
- summax = st->sumD[0];
- summin = st->sumD[0];
- indices[0] = 0;
- indices[2] = 0;
- for (i = 1; i < DTX_HIST_SIZE; i++)
- {
- if (L_sub(st->sumD[i], summax) > 0)
- {
- indices[0] = i;
- summax = st->sumD[i];
- }
- if (L_sub(st->sumD[i], summin) < 0)
- {
- indices[2] = i;
- summin = st->sumD[i];
- }
- }
-
- /* Find the second largest distance */
- summax2nd = -2147483647L;
- indices[1] = -1;
- for (i = 0; i < DTX_HIST_SIZE; i++)
- {
- if ((L_sub(st->sumD[i], summax2nd) > 0) && (sub(i, indices[0]) != 0))
- {
- indices[1] = i;
- summax2nd = st->sumD[i];
- }
- }
-
- for (i = 0; i < 3; i++)
- {
- indices[i] = sub(st->hist_ptr, indices[i]);
- if (indices[i] < 0)
- {
- indices[i] = add(indices[i], DTX_HIST_SIZE);
- }
- }
-
- /* If maximum distance/MED_THRESH is smaller than minimum distance */
- /* then the median ISF vector replacement is not performed */
- tmp = norm_l(summax);
- summax = (summax << tmp);
- summin = (summin << tmp);
- L_tmp = L_mult(voround(summax), INV_MED_THRESH);
- if(L_tmp <= summin)
- {
- indices[0] = -1;
- }
- /* If second largest distance/MED_THRESH is smaller than */
- /* minimum distance then the median ISF vector replacement is */
- /* not performed */
- summax2nd = L_shl(summax2nd, tmp);
- L_tmp = L_mult(voround(summax2nd), INV_MED_THRESH);
- if(L_tmp <= summin)
- {
- indices[1] = -1;
- }
- return;
+ Word32 L_tmp, summin, summax, summax2nd;
+ Word16 i, j, tmp;
+ Word16 ptr;
+
+ /* Remove the effect of the oldest frame from the column */
+ /* sum sumD[0..DTX_HIST_SIZE-1]. sumD[DTX_HIST_SIZE] is */
+ /* not updated since it will be removed later. */
+
+ tmp = DTX_HIST_SIZE_MIN_ONE;
+ j = -1;
+ for (i = 0; i < DTX_HIST_SIZE_MIN_ONE; i++)
+ {
+ j = add(j, tmp);
+ st->sumD[i] = L_sub(st->sumD[i], st->D[j]);
+ tmp = sub(tmp, 1);
+ }
+
+ /* Shift the column sum sumD. The element sumD[DTX_HIST_SIZE-1] */
+ /* corresponding to the oldest frame is removed. The sum of */
+ /* the distances between the latest isf and other isfs, */
+ /* i.e. the element sumD[0], will be computed during this call. */
+ /* Hence this element is initialized to zero. */
+
+ for (i = DTX_HIST_SIZE_MIN_ONE; i > 0; i--)
+ {
+ st->sumD[i] = st->sumD[i - 1];
+ }
+ st->sumD[0] = 0;
+
+ /* Remove the oldest frame from the distance matrix. */
+ /* Note that the distance matrix is replaced by a one- */
+ /* dimensional array to save static memory. */
+
+ tmp = 0;
+ for (i = 27; i >= 12; i = (Word16) (i - tmp))
+ {
+ tmp = add(tmp, 1);
+ for (j = tmp; j > 0; j--)
+ {
+ st->D[i - j + 1] = st->D[i - j - tmp];
+ }
+ }
+
+ /* Compute the first column of the distance matrix D */
+ /* (squared Euclidean distances from isf1[] to isf_old_tx[][]). */
+
+ ptr = st->hist_ptr;
+ for (i = 1; i < DTX_HIST_SIZE; i++)
+ {
+ /* Compute the distance between the latest isf and the other isfs. */
+ ptr = sub(ptr, 1);
+ if (ptr < 0)
+ {
+ ptr = DTX_HIST_SIZE_MIN_ONE;
+ }
+ L_tmp = 0;
+ for (j = 0; j < M; j++)
+ {
+ tmp = sub(isf_old_tx[st->hist_ptr * M + j], isf_old_tx[ptr * M + j]);
+ L_tmp = L_mac(L_tmp, tmp, tmp);
+ }
+ st->D[i - 1] = L_tmp;
+
+ /* Update also the column sums. */
+ st->sumD[0] = L_add(st->sumD[0], st->D[i - 1]);
+ st->sumD[i] = L_add(st->sumD[i], st->D[i - 1]);
+ }
+
+ /* Find the minimum and maximum distances */
+ summax = st->sumD[0];
+ summin = st->sumD[0];
+ indices[0] = 0;
+ indices[2] = 0;
+ for (i = 1; i < DTX_HIST_SIZE; i++)
+ {
+ if (L_sub(st->sumD[i], summax) > 0)
+ {
+ indices[0] = i;
+ summax = st->sumD[i];
+ }
+ if (L_sub(st->sumD[i], summin) < 0)
+ {
+ indices[2] = i;
+ summin = st->sumD[i];
+ }
+ }
+
+ /* Find the second largest distance */
+ summax2nd = -2147483647L;
+ indices[1] = -1;
+ for (i = 0; i < DTX_HIST_SIZE; i++)
+ {
+ if ((L_sub(st->sumD[i], summax2nd) > 0) && (sub(i, indices[0]) != 0))
+ {
+ indices[1] = i;
+ summax2nd = st->sumD[i];
+ }
+ }
+
+ for (i = 0; i < 3; i++)
+ {
+ indices[i] = sub(st->hist_ptr, indices[i]);
+ if (indices[i] < 0)
+ {
+ indices[i] = add(indices[i], DTX_HIST_SIZE);
+ }
+ }
+
+ /* If maximum distance/MED_THRESH is smaller than minimum distance */
+ /* then the median ISF vector replacement is not performed */
+ tmp = norm_l(summax);
+ summax = (summax << tmp);
+ summin = (summin << tmp);
+ L_tmp = L_mult(voround(summax), INV_MED_THRESH);
+ if(L_tmp <= summin)
+ {
+ indices[0] = -1;
+ }
+ /* If second largest distance/MED_THRESH is smaller than */
+ /* minimum distance then the median ISF vector replacement is */
+ /* not performed */
+ summax2nd = L_shl(summax2nd, tmp);
+ L_tmp = L_mult(voround(summax2nd), INV_MED_THRESH);
+ if(L_tmp <= summin)
+ {
+ indices[1] = -1;
+ }
+ return;
}
static Word16 dithering_control(
- dtx_encState * st
- )
+ dtx_encState * st
+ )
{
- Word16 tmp, mean, CN_dith, gain_diff;
- Word32 i, ISF_diff;
-
- /* determine how stationary the spectrum of background noise is */
- ISF_diff = 0;
- for (i = 0; i < 8; i++)
- {
- ISF_diff = L_add(ISF_diff, st->sumD[i]);
- }
- if ((ISF_diff >> 26) > 0)
- {
- CN_dith = 1;
- } else
- {
- CN_dith = 0;
- }
-
- /* determine how stationary the energy of background noise is */
- mean = 0;
- for (i = 0; i < DTX_HIST_SIZE; i++)
- {
- mean = add(mean, st->log_en_hist[i]);
- }
- mean = (mean >> 3);
- gain_diff = 0;
- for (i = 0; i < DTX_HIST_SIZE; i++)
- {
- tmp = abs_s(sub(st->log_en_hist[i], mean));
- gain_diff = add(gain_diff, tmp);
- }
- if (gain_diff > GAIN_THR)
- {
- CN_dith = 1;
- }
- return CN_dith;
+ Word16 tmp, mean, CN_dith, gain_diff;
+ Word32 i, ISF_diff;
+
+ /* determine how stationary the spectrum of background noise is */
+ ISF_diff = 0;
+ for (i = 0; i < 8; i++)
+ {
+ ISF_diff = L_add(ISF_diff, st->sumD[i]);
+ }
+ if ((ISF_diff >> 26) > 0)
+ {
+ CN_dith = 1;
+ } else
+ {
+ CN_dith = 0;
+ }
+
+ /* determine how stationary the energy of background noise is */
+ mean = 0;
+ for (i = 0; i < DTX_HIST_SIZE; i++)
+ {
+ mean = add(mean, st->log_en_hist[i]);
+ }
+ mean = (mean >> 3);
+ gain_diff = 0;
+ for (i = 0; i < DTX_HIST_SIZE; i++)
+ {
+ tmp = abs_s(sub(st->log_en_hist[i], mean));
+ gain_diff = add(gain_diff, tmp);
+ }
+ if (gain_diff > GAIN_THR)
+ {
+ CN_dith = 1;
+ }
+ return CN_dith;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/g_pitch.c b/media/libstagefright/codecs/amrwbenc/src/g_pitch.c
index d681f2e..98ee87e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/g_pitch.c
+++ b/media/libstagefright/codecs/amrwbenc/src/g_pitch.c
@@ -17,9 +17,9 @@
/***********************************************************************
* File: g_pitch.c *
* *
-* Description:Compute the gain of pitch. Result in Q12 *
-* if(gain < 0) gain = 0 *
-* if(gain > 1.2) gain = 1.2 *
+* Description:Compute the gain of pitch. Result in Q12 *
+* if(gain < 0) gain = 0 *
+* if(gain > 1.2) gain = 1.2 *
************************************************************************/
#include "typedef.h"
@@ -27,52 +27,52 @@
#include "math_op.h"
Word16 G_pitch( /* (o) Q14 : Gain of pitch lag saturated to 1.2 */
- Word16 xn[], /* (i) : Pitch target. */
- Word16 y1[], /* (i) : filtered adaptive codebook. */
- Word16 g_coeff[], /* : Correlations need for gain quantization. */
- Word16 L_subfr /* : Length of subframe. */
- )
+ Word16 xn[], /* (i) : Pitch target. */
+ Word16 y1[], /* (i) : filtered adaptive codebook. */
+ Word16 g_coeff[], /* : Correlations need for gain quantization. */
+ Word16 L_subfr /* : Length of subframe. */
+ )
{
- Word32 i;
- Word16 xy, yy, exp_xy, exp_yy, gain;
- /* Compute scalar product <y1[],y1[]> */
+ Word32 i;
+ Word16 xy, yy, exp_xy, exp_yy, gain;
+ /* Compute scalar product <y1[],y1[]> */
#ifdef ASM_OPT /* asm optimization branch */
- /* Compute scalar product <xn[],y1[]> */
- xy = extract_h(Dot_product12_asm(xn, y1, L_subfr, &exp_xy));
- yy = extract_h(Dot_product12_asm(y1, y1, L_subfr, &exp_yy));
+ /* Compute scalar product <xn[],y1[]> */
+ xy = extract_h(Dot_product12_asm(xn, y1, L_subfr, &exp_xy));
+ yy = extract_h(Dot_product12_asm(y1, y1, L_subfr, &exp_yy));
#else
- /* Compute scalar product <xn[],y1[]> */
- xy = extract_h(Dot_product12(xn, y1, L_subfr, &exp_xy));
- yy = extract_h(Dot_product12(y1, y1, L_subfr, &exp_yy));
+ /* Compute scalar product <xn[],y1[]> */
+ xy = extract_h(Dot_product12(xn, y1, L_subfr, &exp_xy));
+ yy = extract_h(Dot_product12(y1, y1, L_subfr, &exp_yy));
#endif
- g_coeff[0] = yy;
- g_coeff[1] = exp_yy;
- g_coeff[2] = xy;
- g_coeff[3] = exp_xy;
+ g_coeff[0] = yy;
+ g_coeff[1] = exp_yy;
+ g_coeff[2] = xy;
+ g_coeff[3] = exp_xy;
- /* If (xy < 0) gain = 0 */
- if (xy < 0)
- return ((Word16) 0);
+ /* If (xy < 0) gain = 0 */
+ if (xy < 0)
+ return ((Word16) 0);
- /* compute gain = xy/yy */
+ /* compute gain = xy/yy */
- xy >>= 1; /* Be sure xy < yy */
- gain = div_s(xy, yy);
+ xy >>= 1; /* Be sure xy < yy */
+ gain = div_s(xy, yy);
- i = exp_xy;
- i -= exp_yy;
+ i = exp_xy;
+ i -= exp_yy;
- gain = shl(gain, i);
+ gain = shl(gain, i);
- /* if (gain > 1.2) gain = 1.2 in Q14 */
- if(gain > 19661)
- {
- gain = 19661;
- }
- return (gain);
+ /* if (gain > 1.2) gain = 1.2 in Q14 */
+ if(gain > 19661)
+ {
+ gain = 19661;
+ }
+ return (gain);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/gpclip.c b/media/libstagefright/codecs/amrwbenc/src/gpclip.c
index 800b3f9..4ce3daa 100644
--- a/media/libstagefright/codecs/amrwbenc/src/gpclip.c
+++ b/media/libstagefright/codecs/amrwbenc/src/gpclip.c
@@ -35,75 +35,75 @@
void Init_gp_clip(
- Word16 mem[] /* (o) : memory of gain of pitch clipping algorithm */
- )
+ Word16 mem[] /* (o) : memory of gain of pitch clipping algorithm */
+ )
{
- mem[0] = DIST_ISF_MAX;
- mem[1] = GAIN_PIT_MIN;
+ mem[0] = DIST_ISF_MAX;
+ mem[1] = GAIN_PIT_MIN;
}
Word16 Gp_clip(
- Word16 mem[] /* (i/o) : memory of gain of pitch clipping algorithm */
- )
+ Word16 mem[] /* (i/o) : memory of gain of pitch clipping algorithm */
+ )
{
- Word16 clip = 0;
- if ((mem[0] < DIST_ISF_THRES) && (mem[1] > GAIN_PIT_THRES))
- clip = 1;
+ Word16 clip = 0;
+ if ((mem[0] < DIST_ISF_THRES) && (mem[1] > GAIN_PIT_THRES))
+ clip = 1;
- return (clip);
+ return (clip);
}
void Gp_clip_test_isf(
- Word16 isf[], /* (i) : isf values (in frequency domain) */
- Word16 mem[] /* (i/o) : memory of gain of pitch clipping algorithm */
- )
+ Word16 isf[], /* (i) : isf values (in frequency domain) */
+ Word16 mem[] /* (i/o) : memory of gain of pitch clipping algorithm */
+ )
{
- Word16 dist, dist_min;
- Word32 i;
+ Word16 dist, dist_min;
+ Word32 i;
- dist_min = vo_sub(isf[1], isf[0]);
+ dist_min = vo_sub(isf[1], isf[0]);
- for (i = 2; i < M - 1; i++)
- {
- dist = vo_sub(isf[i], isf[i - 1]);
- if(dist < dist_min)
- {
- dist_min = dist;
- }
- }
+ for (i = 2; i < M - 1; i++)
+ {
+ dist = vo_sub(isf[i], isf[i - 1]);
+ if(dist < dist_min)
+ {
+ dist_min = dist;
+ }
+ }
- dist = extract_h(L_mac(vo_L_mult(26214, mem[0]), 6554, dist_min));
+ dist = extract_h(L_mac(vo_L_mult(26214, mem[0]), 6554, dist_min));
- if (dist > DIST_ISF_MAX)
- {
- dist = DIST_ISF_MAX;
- }
- mem[0] = dist;
+ if (dist > DIST_ISF_MAX)
+ {
+ dist = DIST_ISF_MAX;
+ }
+ mem[0] = dist;
- return;
+ return;
}
void Gp_clip_test_gain_pit(
- Word16 gain_pit, /* (i) Q14 : gain of quantized pitch */
- Word16 mem[] /* (i/o) : memory of gain of pitch clipping algorithm */
- )
+ Word16 gain_pit, /* (i) Q14 : gain of quantized pitch */
+ Word16 mem[] /* (i/o) : memory of gain of pitch clipping algorithm */
+ )
{
- Word16 gain;
- Word32 L_tmp;
- L_tmp = (29491 * mem[1])<<1;
- L_tmp += (3277 * gain_pit)<<1;
-
- gain = extract_h(L_tmp);
-
- if(gain < GAIN_PIT_MIN)
- {
- gain = GAIN_PIT_MIN;
- }
- mem[1] = gain;
- return;
+ Word16 gain;
+ Word32 L_tmp;
+ L_tmp = (29491 * mem[1])<<1;
+ L_tmp += (3277 * gain_pit)<<1;
+
+ gain = extract_h(L_tmp);
+
+ if(gain < GAIN_PIT_MIN)
+ {
+ gain = GAIN_PIT_MIN;
+ }
+ mem[1] = gain;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/homing.c b/media/libstagefright/codecs/amrwbenc/src/homing.c
index 565040f..a96e7db 100644
--- a/media/libstagefright/codecs/amrwbenc/src/homing.c
+++ b/media/libstagefright/codecs/amrwbenc/src/homing.c
@@ -29,18 +29,18 @@
Word16 encoder_homing_frame_test(Word16 input_frame[])
{
- Word32 i;
- Word16 j = 0;
+ Word32 i;
+ Word16 j = 0;
- /* check 320 input samples for matching EHF_MASK: defined in e_homing.h */
- for (i = 0; i < L_FRAME16k; i++)
- {
- j = (Word16) (input_frame[i] ^ EHF_MASK);
+ /* check 320 input samples for matching EHF_MASK: defined in e_homing.h */
+ for (i = 0; i < L_FRAME16k; i++)
+ {
+ j = (Word16) (input_frame[i] ^ EHF_MASK);
- if (j)
- break;
- }
+ if (j)
+ break;
+ }
- return (Word16) (!j);
+ return (Word16) (!j);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp400.c b/media/libstagefright/codecs/amrwbenc/src/hp400.c
index a6f9701..c658a92 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp400.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp400.c
@@ -50,56 +50,56 @@ static Word16 a[3] = {16384, 29280, -14160}; /* Q12 (x4) */
void Init_HP400_12k8(Word16 mem[])
{
- Set_zero(mem, 6);
+ Set_zero(mem, 6);
}
void HP400_12k8(
- Word16 signal[], /* input signal / output is divided by 16 */
- Word16 lg, /* lenght of signal */
- Word16 mem[] /* filter memory [6] */
- )
+ Word16 signal[], /* input signal / output is divided by 16 */
+ Word16 lg, /* lenght of signal */
+ Word16 mem[] /* filter memory [6] */
+ )
{
- Word16 x2;
- Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
- Word32 L_tmp;
- Word32 num;
- y2_hi = *mem++;
- y2_lo = *mem++;
- y1_hi = *mem++;
- y1_lo = *mem++;
- x0 = *mem++;
- x1 = *mem;
- num = (Word32)lg;
- do
- {
- x2 = x1;
- x1 = x0;
- x0 = *signal;
- /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] */
- /* + a[1]*y[i-1] + a[2] * y[i-2]; */
- L_tmp = 8192L; /* rounding to maximise precision */
- L_tmp += y1_lo * a[1];
- L_tmp += y2_lo * a[2];
- L_tmp = L_tmp >> 14;
- L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2)* b[0] + x1 * b[1]) << 1;
- L_tmp <<= 1; /* coeff Q12 --> Q13 */
- y2_hi = y1_hi;
- y2_lo = y1_lo;
- y1_hi = (Word16)(L_tmp>>16);
- y1_lo = (Word16)((L_tmp & 0xffff)>>1);
+ Word16 x2;
+ Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
+ Word32 L_tmp;
+ Word32 num;
+ y2_hi = *mem++;
+ y2_lo = *mem++;
+ y1_hi = *mem++;
+ y1_lo = *mem++;
+ x0 = *mem++;
+ x1 = *mem;
+ num = (Word32)lg;
+ do
+ {
+ x2 = x1;
+ x1 = x0;
+ x0 = *signal;
+ /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] */
+ /* + a[1]*y[i-1] + a[2] * y[i-2]; */
+ L_tmp = 8192L; /* rounding to maximise precision */
+ L_tmp += y1_lo * a[1];
+ L_tmp += y2_lo * a[2];
+ L_tmp = L_tmp >> 14;
+ L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2)* b[0] + x1 * b[1]) << 1;
+ L_tmp <<= 1; /* coeff Q12 --> Q13 */
+ y2_hi = y1_hi;
+ y2_lo = y1_lo;
+ y1_hi = (Word16)(L_tmp>>16);
+ y1_lo = (Word16)((L_tmp & 0xffff)>>1);
- /* signal is divided by 16 to avoid overflow in energy computation */
- *signal++ = (L_tmp + 0x8000) >> 16;
- }while(--num !=0);
+ /* signal is divided by 16 to avoid overflow in energy computation */
+ *signal++ = (L_tmp + 0x8000) >> 16;
+ }while(--num !=0);
- *mem-- = x1;
- *mem-- = x0;
- *mem-- = y1_lo;
- *mem-- = y1_hi;
- *mem-- = y2_lo;
- *mem = y2_hi;
- return;
+ *mem-- = x1;
+ *mem-- = x0;
+ *mem-- = y1_lo;
+ *mem-- = y1_hi;
+ *mem-- = y2_lo;
+ *mem = y2_hi;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp50.c b/media/libstagefright/codecs/amrwbenc/src/hp50.c
index c1c7b83..807d672 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp50.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp50.c
@@ -17,7 +17,7 @@
/***********************************************************************
* File: hp50.c *
* *
-* Description: *
+* Description: *
* 2nd order high pass filter with cut off frequency at 31 Hz. *
* Designed with cheby2 function in MATLAB. *
* Optimized for fixed-point to get the following frequency response: *
@@ -51,56 +51,56 @@ static Word16 a[3] = {8192, 16211, -8021}; /* Q12 (x2) */
void Init_HP50_12k8(Word16 mem[])
{
- Set_zero(mem, 6);
+ Set_zero(mem, 6);
}
void HP50_12k8(
- Word16 signal[], /* input/output signal */
- Word16 lg, /* lenght of signal */
- Word16 mem[] /* filter memory [6] */
- )
+ Word16 signal[], /* input/output signal */
+ Word16 lg, /* lenght of signal */
+ Word16 mem[] /* filter memory [6] */
+ )
{
- Word16 x2;
- Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
- Word32 L_tmp;
- Word32 num;
+ Word16 x2;
+ Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
+ Word32 L_tmp;
+ Word32 num;
- y2_hi = *mem++;
- y2_lo = *mem++;
- y1_hi = *mem++;
- y1_lo = *mem++;
- x0 = *mem++;
- x1 = *mem;
- num = (Word32)lg;
- do
- {
- x2 = x1;
- x1 = x0;
- x0 = *signal;
- /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] */
- /* + a[1]*y[i-1] + a[2] * y[i-2]; */
- L_tmp = 8192 ; /* rounding to maximise precision */
- L_tmp += y1_lo * a[1];
- L_tmp += y2_lo * a[2];
- L_tmp = L_tmp >> 14;
- L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2) * b[0] + x1 * b[1]) << 1;
- L_tmp <<= 2; /* coeff Q12 --> Q13 */
- y2_hi = y1_hi;
- y2_lo = y1_lo;
- y1_hi = (Word16)(L_tmp>>16);
- y1_lo = (Word16)((L_tmp & 0xffff)>>1);
- *signal++ = extract_h((L_add((L_tmp<<1), 0x8000)));
- }while(--num !=0);
+ y2_hi = *mem++;
+ y2_lo = *mem++;
+ y1_hi = *mem++;
+ y1_lo = *mem++;
+ x0 = *mem++;
+ x1 = *mem;
+ num = (Word32)lg;
+ do
+ {
+ x2 = x1;
+ x1 = x0;
+ x0 = *signal;
+ /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] */
+ /* + a[1]*y[i-1] + a[2] * y[i-2]; */
+ L_tmp = 8192 ; /* rounding to maximise precision */
+ L_tmp += y1_lo * a[1];
+ L_tmp += y2_lo * a[2];
+ L_tmp = L_tmp >> 14;
+ L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2) * b[0] + x1 * b[1]) << 1;
+ L_tmp <<= 2; /* coeff Q12 --> Q13 */
+ y2_hi = y1_hi;
+ y2_lo = y1_lo;
+ y1_hi = (Word16)(L_tmp>>16);
+ y1_lo = (Word16)((L_tmp & 0xffff)>>1);
+ *signal++ = extract_h((L_add((L_tmp<<1), 0x8000)));
+ }while(--num !=0);
- *mem-- = x1;
- *mem-- = x0;
- *mem-- = y1_lo;
- *mem-- = y1_hi;
- *mem-- = y2_lo;
- *mem-- = y2_hi;
+ *mem-- = x1;
+ *mem-- = x0;
+ *mem-- = y1_lo;
+ *mem-- = y1_hi;
+ *mem-- = y2_lo;
+ *mem-- = y2_hi;
- return;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp6k.c b/media/libstagefright/codecs/amrwbenc/src/hp6k.c
index 8e66eb0..0baf612 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp6k.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp6k.c
@@ -17,10 +17,10 @@
/***********************************************************************
* File: hp6k.c *
* *
-* Description:15th order band pass 6kHz to 7kHz FIR filter *
+* Description:15th order band pass 6kHz to 7kHz FIR filter *
* frequency: 4kHz 5kHz 5.5kHz 6kHz 6.5kHz 7kHz 7.5kHz 8kHz *
-* dB loss: -60dB -45dB -13dB -3dB 0dB -3dB -13dB -45dB *
-* *
+* dB loss: -60dB -45dB -13dB -3dB 0dB -3dB -13dB -45dB *
+* *
************************************************************************/
#include "typedef.h"
@@ -34,58 +34,58 @@
Word16 fir_6k_7k[L_FIR] =
{
- -32, 47, 32, -27, -369,
- 1122, -1421, 0, 3798, -8880,
- 12349, -10984, 3548, 7766, -18001,
- 22118, -18001, 7766, 3548, -10984,
- 12349, -8880, 3798, 0, -1421,
- 1122, -369, -27, 32, 47,
- -32
+ -32, 47, 32, -27, -369,
+ 1122, -1421, 0, 3798, -8880,
+ 12349, -10984, 3548, 7766, -18001,
+ 22118, -18001, 7766, 3548, -10984,
+ 12349, -8880, 3798, 0, -1421,
+ 1122, -369, -27, 32, 47,
+ -32
};
void Init_Filt_6k_7k(Word16 mem[]) /* mem[30] */
{
- Set_zero(mem, L_FIR - 1);
- return;
+ Set_zero(mem, L_FIR - 1);
+ return;
}
void Filt_6k_7k(
- Word16 signal[], /* input: signal */
- Word16 lg, /* input: length of input */
- Word16 mem[] /* in/out: memory (size=30) */
- )
+ Word16 signal[], /* input: signal */
+ Word16 lg, /* input: length of input */
+ Word16 mem[] /* in/out: memory (size=30) */
+ )
{
- Word16 x[L_SUBFR16k + (L_FIR - 1)];
- Word32 i, L_tmp;
+ Word16 x[L_SUBFR16k + (L_FIR - 1)];
+ Word32 i, L_tmp;
- Copy(mem, x, L_FIR - 1);
- for (i = lg - 1; i >= 0; i--)
- {
- x[i + L_FIR - 1] = signal[i] >> 2; /* gain of filter = 4 */
- }
- for (i = 0; i < lg; i++)
- {
- L_tmp = (x[i] + x[i+ 30]) * fir_6k_7k[0];
- L_tmp += (x[i+1] + x[i + 29]) * fir_6k_7k[1];
- L_tmp += (x[i+2] + x[i + 28]) * fir_6k_7k[2];
- L_tmp += (x[i+3] + x[i + 27]) * fir_6k_7k[3];
- L_tmp += (x[i+4] + x[i + 26]) * fir_6k_7k[4];
- L_tmp += (x[i+5] + x[i + 25]) * fir_6k_7k[5];
- L_tmp += (x[i+6] + x[i + 24]) * fir_6k_7k[6];
- L_tmp += (x[i+7] + x[i + 23]) * fir_6k_7k[7];
- L_tmp += (x[i+8] + x[i + 22]) * fir_6k_7k[8];
- L_tmp += (x[i+9] + x[i + 21]) * fir_6k_7k[9];
- L_tmp += (x[i+10] + x[i + 20]) * fir_6k_7k[10];
- L_tmp += (x[i+11] + x[i + 19]) * fir_6k_7k[11];
- L_tmp += (x[i+12] + x[i + 18]) * fir_6k_7k[12];
- L_tmp += (x[i+13] + x[i + 17]) * fir_6k_7k[13];
- L_tmp += (x[i+14] + x[i + 16]) * fir_6k_7k[14];
- L_tmp += (x[i+15]) * fir_6k_7k[15];
- signal[i] = (L_tmp + 0x4000) >> 15;
- }
+ Copy(mem, x, L_FIR - 1);
+ for (i = lg - 1; i >= 0; i--)
+ {
+ x[i + L_FIR - 1] = signal[i] >> 2; /* gain of filter = 4 */
+ }
+ for (i = 0; i < lg; i++)
+ {
+ L_tmp = (x[i] + x[i+ 30]) * fir_6k_7k[0];
+ L_tmp += (x[i+1] + x[i + 29]) * fir_6k_7k[1];
+ L_tmp += (x[i+2] + x[i + 28]) * fir_6k_7k[2];
+ L_tmp += (x[i+3] + x[i + 27]) * fir_6k_7k[3];
+ L_tmp += (x[i+4] + x[i + 26]) * fir_6k_7k[4];
+ L_tmp += (x[i+5] + x[i + 25]) * fir_6k_7k[5];
+ L_tmp += (x[i+6] + x[i + 24]) * fir_6k_7k[6];
+ L_tmp += (x[i+7] + x[i + 23]) * fir_6k_7k[7];
+ L_tmp += (x[i+8] + x[i + 22]) * fir_6k_7k[8];
+ L_tmp += (x[i+9] + x[i + 21]) * fir_6k_7k[9];
+ L_tmp += (x[i+10] + x[i + 20]) * fir_6k_7k[10];
+ L_tmp += (x[i+11] + x[i + 19]) * fir_6k_7k[11];
+ L_tmp += (x[i+12] + x[i + 18]) * fir_6k_7k[12];
+ L_tmp += (x[i+13] + x[i + 17]) * fir_6k_7k[13];
+ L_tmp += (x[i+14] + x[i + 16]) * fir_6k_7k[14];
+ L_tmp += (x[i+15]) * fir_6k_7k[15];
+ signal[i] = (L_tmp + 0x4000) >> 15;
+ }
- Copy(x + lg, mem, L_FIR - 1);
+ Copy(x + lg, mem, L_FIR - 1);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c b/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c
index bc1ec49..f0347cb 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c
@@ -48,101 +48,101 @@ static Word16 b[4] = {-3432, +10280, -10280, +3432};
/* Initialization of static values */
void Init_Hp_wsp(Word16 mem[])
{
- Set_zero(mem, 9);
+ Set_zero(mem, 9);
- return;
+ return;
}
void scale_mem_Hp_wsp(Word16 mem[], Word16 exp)
{
- Word32 i;
- Word32 L_tmp;
-
- for (i = 0; i < 6; i += 2)
- {
- L_tmp = ((mem[i] << 16) + (mem[i + 1]<<1));
- L_tmp = L_shl(L_tmp, exp);
- mem[i] = L_tmp >> 16;
- mem[i + 1] = (L_tmp & 0xffff)>>1;
- }
-
- for (i = 6; i < 9; i++)
- {
- L_tmp = L_deposit_h(mem[i]); /* x[i] */
- L_tmp = L_shl(L_tmp, exp);
- mem[i] = vo_round(L_tmp);
- }
-
- return;
+ Word32 i;
+ Word32 L_tmp;
+
+ for (i = 0; i < 6; i += 2)
+ {
+ L_tmp = ((mem[i] << 16) + (mem[i + 1]<<1));
+ L_tmp = L_shl(L_tmp, exp);
+ mem[i] = L_tmp >> 16;
+ mem[i + 1] = (L_tmp & 0xffff)>>1;
+ }
+
+ for (i = 6; i < 9; i++)
+ {
+ L_tmp = L_deposit_h(mem[i]); /* x[i] */
+ L_tmp = L_shl(L_tmp, exp);
+ mem[i] = vo_round(L_tmp);
+ }
+
+ return;
}
void Hp_wsp(
- Word16 wsp[], /* i : wsp[] signal */
- Word16 hp_wsp[], /* o : hypass wsp[] */
- Word16 lg, /* i : lenght of signal */
- Word16 mem[] /* i/o : filter memory [9] */
- )
+ Word16 wsp[], /* i : wsp[] signal */
+ Word16 hp_wsp[], /* o : hypass wsp[] */
+ Word16 lg, /* i : lenght of signal */
+ Word16 mem[] /* i/o : filter memory [9] */
+ )
{
- Word16 x0, x1, x2, x3;
- Word16 y3_hi, y3_lo, y2_hi, y2_lo, y1_hi, y1_lo;
- Word32 i, L_tmp;
-
- y3_hi = mem[0];
- y3_lo = mem[1];
- y2_hi = mem[2];
- y2_lo = mem[3];
- y1_hi = mem[4];
- y1_lo = mem[5];
- x0 = mem[6];
- x1 = mem[7];
- x2 = mem[8];
-
- for (i = 0; i < lg; i++)
- {
- x3 = x2;
- x2 = x1;
- x1 = x0;
- x0 = wsp[i];
- /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] + b[3]*x[i-3] */
- /* + a[1]*y[i-1] + a[2] * y[i-2] + a[3]*y[i-3] */
-
- L_tmp = 16384L; /* rounding to maximise precision */
- L_tmp += (y1_lo * a[1])<<1;
- L_tmp += (y2_lo * a[2])<<1;
- L_tmp += (y3_lo * a[3])<<1;
- L_tmp = L_tmp >> 15;
- L_tmp += (y1_hi * a[1])<<1;
- L_tmp += (y2_hi * a[2])<<1;
- L_tmp += (y3_hi * a[3])<<1;
- L_tmp += (x0 * b[0])<<1;
- L_tmp += (x1 * b[1])<<1;
- L_tmp += (x2 * b[2])<<1;
- L_tmp += (x3 * b[3])<<1;
-
- L_tmp = L_tmp << 2;
-
- y3_hi = y2_hi;
- y3_lo = y2_lo;
- y2_hi = y1_hi;
- y2_lo = y1_lo;
- y1_hi = L_tmp >> 16;
- y1_lo = (L_tmp & 0xffff) >>1;
-
- hp_wsp[i] = (L_tmp + 0x4000)>>15;
- }
-
- mem[0] = y3_hi;
- mem[1] = y3_lo;
- mem[2] = y2_hi;
- mem[3] = y2_lo;
- mem[4] = y1_hi;
- mem[5] = y1_lo;
- mem[6] = x0;
- mem[7] = x1;
- mem[8] = x2;
-
- return;
+ Word16 x0, x1, x2, x3;
+ Word16 y3_hi, y3_lo, y2_hi, y2_lo, y1_hi, y1_lo;
+ Word32 i, L_tmp;
+
+ y3_hi = mem[0];
+ y3_lo = mem[1];
+ y2_hi = mem[2];
+ y2_lo = mem[3];
+ y1_hi = mem[4];
+ y1_lo = mem[5];
+ x0 = mem[6];
+ x1 = mem[7];
+ x2 = mem[8];
+
+ for (i = 0; i < lg; i++)
+ {
+ x3 = x2;
+ x2 = x1;
+ x1 = x0;
+ x0 = wsp[i];
+ /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] + b[3]*x[i-3] */
+ /* + a[1]*y[i-1] + a[2] * y[i-2] + a[3]*y[i-3] */
+
+ L_tmp = 16384L; /* rounding to maximise precision */
+ L_tmp += (y1_lo * a[1])<<1;
+ L_tmp += (y2_lo * a[2])<<1;
+ L_tmp += (y3_lo * a[3])<<1;
+ L_tmp = L_tmp >> 15;
+ L_tmp += (y1_hi * a[1])<<1;
+ L_tmp += (y2_hi * a[2])<<1;
+ L_tmp += (y3_hi * a[3])<<1;
+ L_tmp += (x0 * b[0])<<1;
+ L_tmp += (x1 * b[1])<<1;
+ L_tmp += (x2 * b[2])<<1;
+ L_tmp += (x3 * b[3])<<1;
+
+ L_tmp = L_tmp << 2;
+
+ y3_hi = y2_hi;
+ y3_lo = y2_lo;
+ y2_hi = y1_hi;
+ y2_lo = y1_lo;
+ y1_hi = L_tmp >> 16;
+ y1_lo = (L_tmp & 0xffff) >>1;
+
+ hp_wsp[i] = (L_tmp + 0x4000)>>15;
+ }
+
+ mem[0] = y3_hi;
+ mem[1] = y3_lo;
+ mem[2] = y2_hi;
+ mem[3] = y2_lo;
+ mem[4] = y1_hi;
+ mem[5] = y1_lo;
+ mem[6] = x0;
+ mem[7] = x1;
+ mem[8] = x2;
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/int_lpc.c b/media/libstagefright/codecs/amrwbenc/src/int_lpc.c
index 1119bc7..3d8b8cb 100644
--- a/media/libstagefright/codecs/amrwbenc/src/int_lpc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/int_lpc.c
@@ -30,36 +30,36 @@
void Int_isp(
- Word16 isp_old[], /* input : isps from past frame */
- Word16 isp_new[], /* input : isps from present frame */
- Word16 frac[], /* input : fraction for 3 first subfr (Q15) */
- Word16 Az[] /* output: LP coefficients in 4 subframes */
- )
+ Word16 isp_old[], /* input : isps from past frame */
+ Word16 isp_new[], /* input : isps from present frame */
+ Word16 frac[], /* input : fraction for 3 first subfr (Q15) */
+ Word16 Az[] /* output: LP coefficients in 4 subframes */
+ )
{
- Word32 i, k;
- Word16 fac_old, fac_new;
- Word16 isp[M];
- Word32 L_tmp;
+ Word32 i, k;
+ Word16 fac_old, fac_new;
+ Word16 isp[M];
+ Word32 L_tmp;
- for (k = 0; k < 3; k++)
- {
- fac_new = frac[k];
- fac_old = (32767 - fac_new) + 1; /* 1.0 - fac_new */
+ for (k = 0; k < 3; k++)
+ {
+ fac_new = frac[k];
+ fac_old = (32767 - fac_new) + 1; /* 1.0 - fac_new */
- for (i = 0; i < M; i++)
- {
- L_tmp = (isp_old[i] * fac_old)<<1;
- L_tmp += (isp_new[i] * fac_new)<<1;
- isp[i] = (L_tmp + 0x8000)>>16;
- }
- Isp_Az(isp, Az, M, 0);
- Az += MP1;
- }
+ for (i = 0; i < M; i++)
+ {
+ L_tmp = (isp_old[i] * fac_old)<<1;
+ L_tmp += (isp_new[i] * fac_new)<<1;
+ isp[i] = (L_tmp + 0x8000)>>16;
+ }
+ Isp_Az(isp, Az, M, 0);
+ Az += MP1;
+ }
- /* 4th subframe: isp_new (frac=1.0) */
- Isp_Az(isp_new, Az, M, 0);
+ /* 4th subframe: isp_new (frac=1.0) */
+ Isp_Az(isp_new, Az, M, 0);
- return;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/isp_az.c b/media/libstagefright/codecs/amrwbenc/src/isp_az.c
index 30a8bbd..62e29e7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/isp_az.c
+++ b/media/libstagefright/codecs/amrwbenc/src/isp_az.c
@@ -35,132 +35,132 @@ static void Get_isp_pol(Word16 * isp, Word32 * f, Word16 n);
static void Get_isp_pol_16kHz(Word16 * isp, Word32 * f, Word16 n);
void Isp_Az(
- Word16 isp[], /* (i) Q15 : Immittance spectral pairs */
- Word16 a[], /* (o) Q12 : predictor coefficients (order = M) */
- Word16 m,
- Word16 adaptive_scaling /* (i) 0 : adaptive scaling disabled */
- /* 1 : adaptive scaling enabled */
- )
+ Word16 isp[], /* (i) Q15 : Immittance spectral pairs */
+ Word16 a[], /* (o) Q12 : predictor coefficients (order = M) */
+ Word16 m,
+ Word16 adaptive_scaling /* (i) 0 : adaptive scaling disabled */
+ /* 1 : adaptive scaling enabled */
+ )
{
- Word32 i, j;
- Word16 hi, lo;
- Word32 f1[NC16k + 1], f2[NC16k];
- Word16 nc;
- Word32 t0;
- Word16 q, q_sug;
- Word32 tmax;
-
- nc = (m >> 1);
- if(nc > 8)
- {
- Get_isp_pol_16kHz(&isp[0], f1, nc);
- for (i = 0; i <= nc; i++)
- {
- f1[i] = f1[i] << 2;
- }
- } else
- Get_isp_pol(&isp[0], f1, nc);
-
- if (nc > 8)
- {
- Get_isp_pol_16kHz(&isp[1], f2, (nc - 1));
- for (i = 0; i <= nc - 1; i++)
- {
- f2[i] = f2[i] << 2;
- }
- } else
- Get_isp_pol(&isp[1], f2, (nc - 1));
-
- /*-----------------------------------------------------*
- * Multiply F2(z) by (1 - z^-2) *
- *-----------------------------------------------------*/
-
- for (i = (nc - 1); i > 1; i--)
- {
- f2[i] = vo_L_sub(f2[i], f2[i - 2]); /* f2[i] -= f2[i-2]; */
- }
-
- /*----------------------------------------------------------*
- * Scale F1(z) by (1+isp[m-1]) and F2(z) by (1-isp[m-1]) *
- *----------------------------------------------------------*/
-
- for (i = 0; i < nc; i++)
- {
- /* f1[i] *= (1.0 + isp[M-1]); */
-
- hi = f1[i] >> 16;
- lo = (f1[i] & 0xffff)>>1;
-
- t0 = Mpy_32_16(hi, lo, isp[m - 1]);
- f1[i] = vo_L_add(f1[i], t0);
-
- /* f2[i] *= (1.0 - isp[M-1]); */
-
- hi = f2[i] >> 16;
- lo = (f2[i] & 0xffff)>>1;
- t0 = Mpy_32_16(hi, lo, isp[m - 1]);
- f2[i] = vo_L_sub(f2[i], t0);
- }
-
- /*-----------------------------------------------------*
- * A(z) = (F1(z)+F2(z))/2 *
- * F1(z) is symmetric and F2(z) is antisymmetric *
- *-----------------------------------------------------*/
-
- /* a[0] = 1.0; */
- a[0] = 4096;
- tmax = 1;
- for (i = 1, j = m - 1; i < nc; i++, j--)
- {
- /* a[i] = 0.5*(f1[i] + f2[i]); */
-
- t0 = vo_L_add(f1[i], f2[i]); /* f1[i] + f2[i] */
- tmax |= L_abs(t0);
- a[i] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
-
- /* a[j] = 0.5*(f1[i] - f2[i]); */
-
- t0 = vo_L_sub(f1[i], f2[i]); /* f1[i] - f2[i] */
- tmax |= L_abs(t0);
- a[j] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
- }
-
- /* rescale data if overflow has occured and reprocess the loop */
- if(adaptive_scaling == 1)
- q = 4 - norm_l(tmax); /* adaptive scaling enabled */
- else
- q = 0; /* adaptive scaling disabled */
-
- if (q > 0)
- {
- q_sug = (12 + q);
- for (i = 1, j = m - 1; i < nc; i++, j--)
- {
- /* a[i] = 0.5*(f1[i] + f2[i]); */
- t0 = vo_L_add(f1[i], f2[i]); /* f1[i] + f2[i] */
- a[i] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
-
- /* a[j] = 0.5*(f1[i] - f2[i]); */
- t0 = vo_L_sub(f1[i], f2[i]); /* f1[i] - f2[i] */
- a[j] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
- }
- a[0] = shr(a[0], q);
- }
- else
- {
- q_sug = 12;
- q = 0;
- }
- /* a[NC] = 0.5*f1[NC]*(1.0 + isp[M-1]); */
- hi = f1[nc] >> 16;
- lo = (f1[nc] & 0xffff)>>1;
- t0 = Mpy_32_16(hi, lo, isp[m - 1]);
- t0 = vo_L_add(f1[nc], t0);
- a[nc] = (Word16)(L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
- /* a[m] = isp[m-1]; */
-
- a[m] = vo_shr_r(isp[m - 1], (3 + q)); /* from Q15 to Q12 */
- return;
+ Word32 i, j;
+ Word16 hi, lo;
+ Word32 f1[NC16k + 1], f2[NC16k];
+ Word16 nc;
+ Word32 t0;
+ Word16 q, q_sug;
+ Word32 tmax;
+
+ nc = (m >> 1);
+ if(nc > 8)
+ {
+ Get_isp_pol_16kHz(&isp[0], f1, nc);
+ for (i = 0; i <= nc; i++)
+ {
+ f1[i] = f1[i] << 2;
+ }
+ } else
+ Get_isp_pol(&isp[0], f1, nc);
+
+ if (nc > 8)
+ {
+ Get_isp_pol_16kHz(&isp[1], f2, (nc - 1));
+ for (i = 0; i <= nc - 1; i++)
+ {
+ f2[i] = f2[i] << 2;
+ }
+ } else
+ Get_isp_pol(&isp[1], f2, (nc - 1));
+
+ /*-----------------------------------------------------*
+ * Multiply F2(z) by (1 - z^-2) *
+ *-----------------------------------------------------*/
+
+ for (i = (nc - 1); i > 1; i--)
+ {
+ f2[i] = vo_L_sub(f2[i], f2[i - 2]); /* f2[i] -= f2[i-2]; */
+ }
+
+ /*----------------------------------------------------------*
+ * Scale F1(z) by (1+isp[m-1]) and F2(z) by (1-isp[m-1]) *
+ *----------------------------------------------------------*/
+
+ for (i = 0; i < nc; i++)
+ {
+ /* f1[i] *= (1.0 + isp[M-1]); */
+
+ hi = f1[i] >> 16;
+ lo = (f1[i] & 0xffff)>>1;
+
+ t0 = Mpy_32_16(hi, lo, isp[m - 1]);
+ f1[i] = vo_L_add(f1[i], t0);
+
+ /* f2[i] *= (1.0 - isp[M-1]); */
+
+ hi = f2[i] >> 16;
+ lo = (f2[i] & 0xffff)>>1;
+ t0 = Mpy_32_16(hi, lo, isp[m - 1]);
+ f2[i] = vo_L_sub(f2[i], t0);
+ }
+
+ /*-----------------------------------------------------*
+ * A(z) = (F1(z)+F2(z))/2 *
+ * F1(z) is symmetric and F2(z) is antisymmetric *
+ *-----------------------------------------------------*/
+
+ /* a[0] = 1.0; */
+ a[0] = 4096;
+ tmax = 1;
+ for (i = 1, j = m - 1; i < nc; i++, j--)
+ {
+ /* a[i] = 0.5*(f1[i] + f2[i]); */
+
+ t0 = vo_L_add(f1[i], f2[i]); /* f1[i] + f2[i] */
+ tmax |= L_abs(t0);
+ a[i] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
+
+ /* a[j] = 0.5*(f1[i] - f2[i]); */
+
+ t0 = vo_L_sub(f1[i], f2[i]); /* f1[i] - f2[i] */
+ tmax |= L_abs(t0);
+ a[j] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
+ }
+
+ /* rescale data if overflow has occured and reprocess the loop */
+ if(adaptive_scaling == 1)
+ q = 4 - norm_l(tmax); /* adaptive scaling enabled */
+ else
+ q = 0; /* adaptive scaling disabled */
+
+ if (q > 0)
+ {
+ q_sug = (12 + q);
+ for (i = 1, j = m - 1; i < nc; i++, j--)
+ {
+ /* a[i] = 0.5*(f1[i] + f2[i]); */
+ t0 = vo_L_add(f1[i], f2[i]); /* f1[i] + f2[i] */
+ a[i] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
+
+ /* a[j] = 0.5*(f1[i] - f2[i]); */
+ t0 = vo_L_sub(f1[i], f2[i]); /* f1[i] - f2[i] */
+ a[j] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
+ }
+ a[0] = shr(a[0], q);
+ }
+ else
+ {
+ q_sug = 12;
+ q = 0;
+ }
+ /* a[NC] = 0.5*f1[NC]*(1.0 + isp[M-1]); */
+ hi = f1[nc] >> 16;
+ lo = (f1[nc] & 0xffff)>>1;
+ t0 = Mpy_32_16(hi, lo, isp[m - 1]);
+ t0 = vo_L_add(f1[nc], t0);
+ a[nc] = (Word16)(L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
+ /* a[m] = isp[m-1]; */
+
+ a[m] = vo_shr_r(isp[m - 1], (3 + q)); /* from Q15 to Q12 */
+ return;
}
/*-----------------------------------------------------------*
@@ -185,63 +185,63 @@ void Isp_Az(
static void Get_isp_pol(Word16 * isp, Word32 * f, Word16 n)
{
- Word16 hi, lo;
- Word32 i, j, t0;
- /* All computation in Q23 */
-
- f[0] = vo_L_mult(4096, 1024); /* f[0] = 1.0; in Q23 */
- f[1] = vo_L_mult(isp[0], -256); /* f[1] = -2.0*isp[0] in Q23 */
-
- f += 2; /* Advance f pointer */
- isp += 2; /* Advance isp pointer */
- for (i = 2; i <= n; i++)
- {
- *f = f[-2];
- for (j = 1; j < i; j++, f--)
- {
- hi = f[-1]>>16;
- lo = (f[-1] & 0xffff)>>1;
-
- t0 = Mpy_32_16(hi, lo, *isp); /* t0 = f[-1] * isp */
- t0 = t0 << 1;
- *f = vo_L_sub(*f, t0); /* *f -= t0 */
- *f = vo_L_add(*f, f[-2]); /* *f += f[-2] */
- }
- *f -= (*isp << 9); /* *f -= isp<<8 */
- f += i; /* Advance f pointer */
- isp += 2; /* Advance isp pointer */
- }
- return;
+ Word16 hi, lo;
+ Word32 i, j, t0;
+ /* All computation in Q23 */
+
+ f[0] = vo_L_mult(4096, 1024); /* f[0] = 1.0; in Q23 */
+ f[1] = vo_L_mult(isp[0], -256); /* f[1] = -2.0*isp[0] in Q23 */
+
+ f += 2; /* Advance f pointer */
+ isp += 2; /* Advance isp pointer */
+ for (i = 2; i <= n; i++)
+ {
+ *f = f[-2];
+ for (j = 1; j < i; j++, f--)
+ {
+ hi = f[-1]>>16;
+ lo = (f[-1] & 0xffff)>>1;
+
+ t0 = Mpy_32_16(hi, lo, *isp); /* t0 = f[-1] * isp */
+ t0 = t0 << 1;
+ *f = vo_L_sub(*f, t0); /* *f -= t0 */
+ *f = vo_L_add(*f, f[-2]); /* *f += f[-2] */
+ }
+ *f -= (*isp << 9); /* *f -= isp<<8 */
+ f += i; /* Advance f pointer */
+ isp += 2; /* Advance isp pointer */
+ }
+ return;
}
static void Get_isp_pol_16kHz(Word16 * isp, Word32 * f, Word16 n)
{
- Word16 hi, lo;
- Word32 i, j, t0;
-
- /* All computation in Q23 */
- f[0] = L_mult(4096, 256); /* f[0] = 1.0; in Q23 */
- f[1] = L_mult(isp[0], -64); /* f[1] = -2.0*isp[0] in Q23 */
-
- f += 2; /* Advance f pointer */
- isp += 2; /* Advance isp pointer */
-
- for (i = 2; i <= n; i++)
- {
- *f = f[-2];
- for (j = 1; j < i; j++, f--)
- {
- VO_L_Extract(f[-1], &hi, &lo);
- t0 = Mpy_32_16(hi, lo, *isp); /* t0 = f[-1] * isp */
- t0 = L_shl2(t0, 1);
- *f = L_sub(*f, t0); /* *f -= t0 */
- *f = L_add(*f, f[-2]); /* *f += f[-2] */
- }
- *f = L_msu(*f, *isp, 64); /* *f -= isp<<8 */
- f += i; /* Advance f pointer */
- isp += 2; /* Advance isp pointer */
- }
- return;
+ Word16 hi, lo;
+ Word32 i, j, t0;
+
+ /* All computation in Q23 */
+ f[0] = L_mult(4096, 256); /* f[0] = 1.0; in Q23 */
+ f[1] = L_mult(isp[0], -64); /* f[1] = -2.0*isp[0] in Q23 */
+
+ f += 2; /* Advance f pointer */
+ isp += 2; /* Advance isp pointer */
+
+ for (i = 2; i <= n; i++)
+ {
+ *f = f[-2];
+ for (j = 1; j < i; j++, f--)
+ {
+ VO_L_Extract(f[-1], &hi, &lo);
+ t0 = Mpy_32_16(hi, lo, *isp); /* t0 = f[-1] * isp */
+ t0 = L_shl2(t0, 1);
+ *f = L_sub(*f, t0); /* *f -= t0 */
+ *f = L_add(*f, f[-2]); /* *f += f[-2] */
+ }
+ *f = L_msu(*f, *isp, 64); /* *f -= isp<<8 */
+ f += i; /* Advance f pointer */
+ isp += 2; /* Advance isp pointer */
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/isp_isf.c b/media/libstagefright/codecs/amrwbenc/src/isp_isf.c
index b4ba408..56798e0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/isp_isf.c
+++ b/media/libstagefright/codecs/amrwbenc/src/isp_isf.c
@@ -18,11 +18,11 @@
* File: isp_isf.c *
* *
* Description: *
-* Isp_isf Transformation isp to isf *
-* Isf_isp Transformation isf to isp *
+* Isp_isf Transformation isp to isf *
+* Isf_isp Transformation isf to isp *
* *
-* The transformation from isp[i] to isf[i] and isf[i] to isp[i] *
-* are approximated by a look-up table and interpolation *
+* The transformation from isp[i] to isf[i] and isf[i] to isp[i] *
+* are approximated by a look-up table and interpolation *
* *
************************************************************************/
@@ -31,59 +31,59 @@
#include "isp_isf.tab" /* Look-up table for transformations */
void Isp_isf(
- Word16 isp[], /* (i) Q15 : isp[m] (range: -1<=val<1) */
- Word16 isf[], /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
- Word16 m /* (i) : LPC order */
- )
+ Word16 isp[], /* (i) Q15 : isp[m] (range: -1<=val<1) */
+ Word16 isf[], /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+ Word16 m /* (i) : LPC order */
+ )
{
- Word32 i, ind;
- Word32 L_tmp;
- ind = 127; /* beging at end of table -1 */
- for (i = (m - 1); i >= 0; i--)
- {
- if (i >= (m - 2))
- { /* m-2 is a constant */
- ind = 127; /* beging at end of table -1 */
- }
- /* find value in table that is just greater than isp[i] */
- while (table[ind] < isp[i])
- ind--;
- /* acos(isp[i])= ind*128 + ( ( isp[i]-table[ind] ) * slope[ind] )/2048 */
- L_tmp = vo_L_mult(vo_sub(isp[i], table[ind]), slope[ind]);
- isf[i] = vo_round((L_tmp << 4)); /* (isp[i]-table[ind])*slope[ind])>>11 */
- isf[i] = add1(isf[i], (ind << 7));
- }
- isf[m - 1] = (isf[m - 1] >> 1);
- return;
+ Word32 i, ind;
+ Word32 L_tmp;
+ ind = 127; /* beging at end of table -1 */
+ for (i = (m - 1); i >= 0; i--)
+ {
+ if (i >= (m - 2))
+ { /* m-2 is a constant */
+ ind = 127; /* beging at end of table -1 */
+ }
+ /* find value in table that is just greater than isp[i] */
+ while (table[ind] < isp[i])
+ ind--;
+ /* acos(isp[i])= ind*128 + ( ( isp[i]-table[ind] ) * slope[ind] )/2048 */
+ L_tmp = vo_L_mult(vo_sub(isp[i], table[ind]), slope[ind]);
+ isf[i] = vo_round((L_tmp << 4)); /* (isp[i]-table[ind])*slope[ind])>>11 */
+ isf[i] = add1(isf[i], (ind << 7));
+ }
+ isf[m - 1] = (isf[m - 1] >> 1);
+ return;
}
void Isf_isp(
- Word16 isf[], /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
- Word16 isp[], /* (o) Q15 : isp[m] (range: -1<=val<1) */
- Word16 m /* (i) : LPC order */
- )
+ Word16 isf[], /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+ Word16 isp[], /* (o) Q15 : isp[m] (range: -1<=val<1) */
+ Word16 m /* (i) : LPC order */
+ )
{
- Word16 offset;
- Word32 i, ind, L_tmp;
+ Word16 offset;
+ Word32 i, ind, L_tmp;
- for (i = 0; i < m - 1; i++)
- {
- isp[i] = isf[i];
- }
- isp[m - 1] = (isf[m - 1] << 1);
+ for (i = 0; i < m - 1; i++)
+ {
+ isp[i] = isf[i];
+ }
+ isp[m - 1] = (isf[m - 1] << 1);
- for (i = 0; i < m; i++)
- {
- ind = (isp[i] >> 7); /* ind = b7-b15 of isf[i] */
- offset = (Word16) (isp[i] & 0x007f); /* offset = b0-b6 of isf[i] */
+ for (i = 0; i < m; i++)
+ {
+ ind = (isp[i] >> 7); /* ind = b7-b15 of isf[i] */
+ offset = (Word16) (isp[i] & 0x007f); /* offset = b0-b6 of isf[i] */
- /* isp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 128 */
- L_tmp = vo_L_mult(vo_sub(table[ind + 1], table[ind]), offset);
- isp[i] = add1(table[ind], (Word16)((L_tmp >> 8)));
- }
+ /* isp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 128 */
+ L_tmp = vo_L_mult(vo_sub(table[ind + 1], table[ind]), offset);
+ isp[i] = add1(table[ind], (Word16)((L_tmp >> 8)));
+ }
- return;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/lag_wind.c b/media/libstagefright/codecs/amrwbenc/src/lag_wind.c
index 49c622c..527430b 100644
--- a/media/libstagefright/codecs/amrwbenc/src/lag_wind.c
+++ b/media/libstagefright/codecs/amrwbenc/src/lag_wind.c
@@ -17,8 +17,8 @@
/***********************************************************************
* File: lag_wind.c *
* *
-* Description: Lag_windows on autocorrelations *
-* r[i] *= lag_wind[i] *
+* Description: Lag_windows on autocorrelations *
+* r[i] *= lag_wind[i] *
* *
************************************************************************/
@@ -29,20 +29,20 @@
void Lag_window(
- Word16 r_h[], /* (i/o) : Autocorrelations (msb) */
- Word16 r_l[] /* (i/o) : Autocorrelations (lsb) */
- )
+ Word16 r_h[], /* (i/o) : Autocorrelations (msb) */
+ Word16 r_l[] /* (i/o) : Autocorrelations (lsb) */
+ )
{
- Word32 i;
- Word32 x;
-
- for (i = 1; i <= M; i++)
- {
- x = Mpy_32(r_h[i], r_l[i], volag_h[i - 1], volag_l[i - 1]);
- r_h[i] = x >> 16;
- r_l[i] = (x & 0xffff)>>1;
- }
- return;
+ Word32 i;
+ Word32 x;
+
+ for (i = 1; i <= M; i++)
+ {
+ x = Mpy_32(r_h[i], r_l[i], volag_h[i - 1], volag_l[i - 1]);
+ r_h[i] = x >> 16;
+ r_l[i] = (x & 0xffff)>>1;
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/levinson.c b/media/libstagefright/codecs/amrwbenc/src/levinson.c
index 4b2f8ed..9d5a3bd 100644
--- a/media/libstagefright/codecs/amrwbenc/src/levinson.c
+++ b/media/libstagefright/codecs/amrwbenc/src/levinson.c
@@ -21,7 +21,7 @@
* *
************************************************************************/
/*---------------------------------------------------------------------------*
- * LEVINSON.C *
+ * LEVINSON.C *
*---------------------------------------------------------------------------*
* *
* LEVINSON-DURBIN algorithm in double precision *
@@ -96,154 +96,154 @@
#define NC (M/2)
void Init_Levinson(
- Word16 * mem /* output :static memory (18 words) */
- )
+ Word16 * mem /* output :static memory (18 words) */
+ )
{
- Set_zero(mem, 18); /* old_A[0..M-1] = 0, old_rc[0..1] = 0 */
- return;
+ Set_zero(mem, 18); /* old_A[0..M-1] = 0, old_rc[0..1] = 0 */
+ return;
}
void Levinson(
- Word16 Rh[], /* (i) : Rh[M+1] Vector of autocorrelations (msb) */
- Word16 Rl[], /* (i) : Rl[M+1] Vector of autocorrelations (lsb) */
- Word16 A[], /* (o) Q12 : A[M] LPC coefficients (m = 16) */
- Word16 rc[], /* (o) Q15 : rc[M] Reflection coefficients. */
- Word16 * mem /* (i/o) :static memory (18 words) */
- )
+ Word16 Rh[], /* (i) : Rh[M+1] Vector of autocorrelations (msb) */
+ Word16 Rl[], /* (i) : Rl[M+1] Vector of autocorrelations (lsb) */
+ Word16 A[], /* (o) Q12 : A[M] LPC coefficients (m = 16) */
+ Word16 rc[], /* (o) Q15 : rc[M] Reflection coefficients. */
+ Word16 * mem /* (i/o) :static memory (18 words) */
+ )
{
- Word32 i, j;
- Word16 hi, lo;
- Word16 Kh, Kl; /* reflection coefficient; hi and lo */
- Word16 alp_h, alp_l, alp_exp; /* Prediction gain; hi lo and exponent */
- Word16 Ah[M + 1], Al[M + 1]; /* LPC coef. in double prec. */
- Word16 Anh[M + 1], Anl[M + 1]; /* LPC coef.for next iteration in double prec. */
- Word32 t0, t1, t2; /* temporary variable */
- Word16 *old_A, *old_rc;
-
- /* Last A(z) for case of unstable filter */
- old_A = mem;
- old_rc = mem + M;
-
- /* K = A[1] = -R[1] / R[0] */
-
- t1 = ((Rh[1] << 16) + (Rl[1] << 1)); /* R[1] in Q31 */
- t2 = L_abs(t1); /* abs R[1] */
- t0 = Div_32(t2, Rh[0], Rl[0]); /* R[1]/R[0] in Q31 */
- if (t1 > 0)
- t0 = -t0; /* -R[1]/R[0] */
-
- Kh = t0 >> 16;
- Kl = (t0 & 0xffff)>>1;
- rc[0] = Kh;
- t0 = (t0 >> 4); /* A[1] in Q27 */
-
- Ah[1] = t0 >> 16;
- Al[1] = (t0 & 0xffff)>>1;
-
- /* Alpha = R[0] * (1-K**2) */
- t0 = Mpy_32(Kh, Kl, Kh, Kl); /* K*K in Q31 */
- t0 = L_abs(t0); /* Some case <0 !! */
- t0 = vo_L_sub((Word32) 0x7fffffffL, t0); /* 1 - K*K in Q31 */
-
- hi = t0 >> 16;
- lo = (t0 & 0xffff)>>1;
-
- t0 = Mpy_32(Rh[0], Rl[0], hi, lo); /* Alpha in Q31 */
-
- /* Normalize Alpha */
- alp_exp = norm_l(t0);
- t0 = (t0 << alp_exp);
-
- alp_h = t0 >> 16;
- alp_l = (t0 & 0xffff)>>1;
- /*--------------------------------------*
- * ITERATIONS I=2 to M *
- *--------------------------------------*/
- for (i = 2; i <= M; i++)
- {
- /* t0 = SUM ( R[j]*A[i-j] ,j=1,i-1 ) + R[i] */
- t0 = 0;
- for (j = 1; j < i; j++)
- t0 = vo_L_add(t0, Mpy_32(Rh[j], Rl[j], Ah[i - j], Al[i - j]));
-
- t0 = t0 << 4; /* result in Q27 -> convert to Q31 */
- /* No overflow possible */
- t1 = ((Rh[i] << 16) + (Rl[i] << 1));
- t0 = vo_L_add(t0, t1); /* add R[i] in Q31 */
-
- /* K = -t0 / Alpha */
- t1 = L_abs(t0);
- t2 = Div_32(t1, alp_h, alp_l); /* abs(t0)/Alpha */
- if (t0 > 0)
- t2 = -t2; /* K =-t0/Alpha */
- t2 = (t2 << alp_exp); /* denormalize; compare to Alpha */
-
- Kh = t2 >> 16;
- Kl = (t2 & 0xffff)>>1;
-
- rc[i - 1] = Kh;
- /* Test for unstable filter. If unstable keep old A(z) */
- if (abs_s(Kh) > 32750)
- {
- A[0] = 4096; /* Ai[0] not stored (always 1.0) */
- for (j = 0; j < M; j++)
- {
- A[j + 1] = old_A[j];
- }
- rc[0] = old_rc[0]; /* only two rc coefficients are needed */
- rc[1] = old_rc[1];
- return;
- }
- /*------------------------------------------*
- * Compute new LPC coeff. -> An[i] *
- * An[j]= A[j] + K*A[i-j] , j=1 to i-1 *
- * An[i]= K *
- *------------------------------------------*/
- for (j = 1; j < i; j++)
- {
- t0 = Mpy_32(Kh, Kl, Ah[i - j], Al[i - j]);
- t0 = vo_L_add(t0, ((Ah[j] << 16) + (Al[j] << 1)));
- Anh[j] = t0 >> 16;
- Anl[j] = (t0 & 0xffff)>>1;
- }
- t2 = (t2 >> 4); /* t2 = K in Q31 ->convert to Q27 */
-
- VO_L_Extract(t2, &Anh[i], &Anl[i]); /* An[i] in Q27 */
-
- /* Alpha = Alpha * (1-K**2) */
- t0 = Mpy_32(Kh, Kl, Kh, Kl); /* K*K in Q31 */
- t0 = L_abs(t0); /* Some case <0 !! */
- t0 = vo_L_sub((Word32) 0x7fffffffL, t0); /* 1 - K*K in Q31 */
- hi = t0 >> 16;
- lo = (t0 & 0xffff)>>1;
- t0 = Mpy_32(alp_h, alp_l, hi, lo); /* Alpha in Q31 */
-
- /* Normalize Alpha */
- j = norm_l(t0);
- t0 = (t0 << j);
- alp_h = t0 >> 16;
- alp_l = (t0 & 0xffff)>>1;
- alp_exp += j; /* Add normalization to alp_exp */
-
- /* A[j] = An[j] */
- for (j = 1; j <= i; j++)
- {
- Ah[j] = Anh[j];
- Al[j] = Anl[j];
- }
- }
- /* Truncate A[i] in Q27 to Q12 with rounding */
- A[0] = 4096;
- for (i = 1; i <= M; i++)
- {
- t0 = (Ah[i] << 16) + (Al[i] << 1);
- old_A[i - 1] = A[i] = vo_round((t0 << 1));
- }
- old_rc[0] = rc[0];
- old_rc[1] = rc[1];
-
- return;
+ Word32 i, j;
+ Word16 hi, lo;
+ Word16 Kh, Kl; /* reflection coefficient; hi and lo */
+ Word16 alp_h, alp_l, alp_exp; /* Prediction gain; hi lo and exponent */
+ Word16 Ah[M + 1], Al[M + 1]; /* LPC coef. in double prec. */
+ Word16 Anh[M + 1], Anl[M + 1]; /* LPC coef.for next iteration in double prec. */
+ Word32 t0, t1, t2; /* temporary variable */
+ Word16 *old_A, *old_rc;
+
+ /* Last A(z) for case of unstable filter */
+ old_A = mem;
+ old_rc = mem + M;
+
+ /* K = A[1] = -R[1] / R[0] */
+
+ t1 = ((Rh[1] << 16) + (Rl[1] << 1)); /* R[1] in Q31 */
+ t2 = L_abs(t1); /* abs R[1] */
+ t0 = Div_32(t2, Rh[0], Rl[0]); /* R[1]/R[0] in Q31 */
+ if (t1 > 0)
+ t0 = -t0; /* -R[1]/R[0] */
+
+ Kh = t0 >> 16;
+ Kl = (t0 & 0xffff)>>1;
+ rc[0] = Kh;
+ t0 = (t0 >> 4); /* A[1] in Q27 */
+
+ Ah[1] = t0 >> 16;
+ Al[1] = (t0 & 0xffff)>>1;
+
+ /* Alpha = R[0] * (1-K**2) */
+ t0 = Mpy_32(Kh, Kl, Kh, Kl); /* K*K in Q31 */
+ t0 = L_abs(t0); /* Some case <0 !! */
+ t0 = vo_L_sub((Word32) 0x7fffffffL, t0); /* 1 - K*K in Q31 */
+
+ hi = t0 >> 16;
+ lo = (t0 & 0xffff)>>1;
+
+ t0 = Mpy_32(Rh[0], Rl[0], hi, lo); /* Alpha in Q31 */
+
+ /* Normalize Alpha */
+ alp_exp = norm_l(t0);
+ t0 = (t0 << alp_exp);
+
+ alp_h = t0 >> 16;
+ alp_l = (t0 & 0xffff)>>1;
+ /*--------------------------------------*
+ * ITERATIONS I=2 to M *
+ *--------------------------------------*/
+ for (i = 2; i <= M; i++)
+ {
+ /* t0 = SUM ( R[j]*A[i-j] ,j=1,i-1 ) + R[i] */
+ t0 = 0;
+ for (j = 1; j < i; j++)
+ t0 = vo_L_add(t0, Mpy_32(Rh[j], Rl[j], Ah[i - j], Al[i - j]));
+
+ t0 = t0 << 4; /* result in Q27 -> convert to Q31 */
+ /* No overflow possible */
+ t1 = ((Rh[i] << 16) + (Rl[i] << 1));
+ t0 = vo_L_add(t0, t1); /* add R[i] in Q31 */
+
+ /* K = -t0 / Alpha */
+ t1 = L_abs(t0);
+ t2 = Div_32(t1, alp_h, alp_l); /* abs(t0)/Alpha */
+ if (t0 > 0)
+ t2 = -t2; /* K =-t0/Alpha */
+ t2 = (t2 << alp_exp); /* denormalize; compare to Alpha */
+
+ Kh = t2 >> 16;
+ Kl = (t2 & 0xffff)>>1;
+
+ rc[i - 1] = Kh;
+ /* Test for unstable filter. If unstable keep old A(z) */
+ if (abs_s(Kh) > 32750)
+ {
+ A[0] = 4096; /* Ai[0] not stored (always 1.0) */
+ for (j = 0; j < M; j++)
+ {
+ A[j + 1] = old_A[j];
+ }
+ rc[0] = old_rc[0]; /* only two rc coefficients are needed */
+ rc[1] = old_rc[1];
+ return;
+ }
+ /*------------------------------------------*
+ * Compute new LPC coeff. -> An[i] *
+ * An[j]= A[j] + K*A[i-j] , j=1 to i-1 *
+ * An[i]= K *
+ *------------------------------------------*/
+ for (j = 1; j < i; j++)
+ {
+ t0 = Mpy_32(Kh, Kl, Ah[i - j], Al[i - j]);
+ t0 = vo_L_add(t0, ((Ah[j] << 16) + (Al[j] << 1)));
+ Anh[j] = t0 >> 16;
+ Anl[j] = (t0 & 0xffff)>>1;
+ }
+ t2 = (t2 >> 4); /* t2 = K in Q31 ->convert to Q27 */
+
+ VO_L_Extract(t2, &Anh[i], &Anl[i]); /* An[i] in Q27 */
+
+ /* Alpha = Alpha * (1-K**2) */
+ t0 = Mpy_32(Kh, Kl, Kh, Kl); /* K*K in Q31 */
+ t0 = L_abs(t0); /* Some case <0 !! */
+ t0 = vo_L_sub((Word32) 0x7fffffffL, t0); /* 1 - K*K in Q31 */
+ hi = t0 >> 16;
+ lo = (t0 & 0xffff)>>1;
+ t0 = Mpy_32(alp_h, alp_l, hi, lo); /* Alpha in Q31 */
+
+ /* Normalize Alpha */
+ j = norm_l(t0);
+ t0 = (t0 << j);
+ alp_h = t0 >> 16;
+ alp_l = (t0 & 0xffff)>>1;
+ alp_exp += j; /* Add normalization to alp_exp */
+
+ /* A[j] = An[j] */
+ for (j = 1; j <= i; j++)
+ {
+ Ah[j] = Anh[j];
+ Al[j] = Anl[j];
+ }
+ }
+ /* Truncate A[i] in Q27 to Q12 with rounding */
+ A[0] = 4096;
+ for (i = 1; i <= M; i++)
+ {
+ t0 = (Ah[i] << 16) + (Al[i] << 1);
+ old_A[i - 1] = A[i] = vo_round((t0 << 1));
+ }
+ old_rc[0] = rc[0];
+ old_rc[1] = rc[1];
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/log2.c b/media/libstagefright/codecs/amrwbenc/src/log2.c
index 0f65541..f14058e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/log2.c
+++ b/media/libstagefright/codecs/amrwbenc/src/log2.c
@@ -54,33 +54,33 @@
*************************************************************************/
void Log2_norm (
- Word32 L_x, /* (i) : input value (normalized) */
- Word16 exp, /* (i) : norm_l (L_x) */
- Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
- Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1) */
- )
+ Word32 L_x, /* (i) : input value (normalized) */
+ Word16 exp, /* (i) : norm_l (L_x) */
+ Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
+ Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1) */
+ )
{
- Word16 i, a, tmp;
- Word32 L_y;
- if (L_x <= (Word32) 0)
- {
- *exponent = 0;
- *fraction = 0;
- return;
- }
- *exponent = (30 - exp);
- L_x = (L_x >> 9);
- i = extract_h (L_x); /* Extract b25-b31 */
- L_x = (L_x >> 1);
- a = (Word16)(L_x); /* Extract b10-b24 of fraction */
- a = (Word16)(a & (Word16)0x7fff);
- i -= 32;
- L_y = L_deposit_h (table[i]); /* table[i] << 16 */
- tmp = vo_sub(table[i], table[i + 1]); /* table[i] - table[i+1] */
- L_y = vo_L_msu (L_y, tmp, a); /* L_y -= tmp*a*2 */
- *fraction = extract_h (L_y);
+ Word16 i, a, tmp;
+ Word32 L_y;
+ if (L_x <= (Word32) 0)
+ {
+ *exponent = 0;
+ *fraction = 0;
+ return;
+ }
+ *exponent = (30 - exp);
+ L_x = (L_x >> 9);
+ i = extract_h (L_x); /* Extract b25-b31 */
+ L_x = (L_x >> 1);
+ a = (Word16)(L_x); /* Extract b10-b24 of fraction */
+ a = (Word16)(a & (Word16)0x7fff);
+ i -= 32;
+ L_y = L_deposit_h (table[i]); /* table[i] << 16 */
+ tmp = vo_sub(table[i], table[i + 1]); /* table[i] - table[i+1] */
+ L_y = vo_L_msu (L_y, tmp, a); /* L_y -= tmp*a*2 */
+ *fraction = extract_h (L_y);
- return;
+ return;
}
/*************************************************************************
@@ -96,15 +96,15 @@ void Log2_norm (
*************************************************************************/
void Log2 (
- Word32 L_x, /* (i) : input value */
- Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
- Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1) */
- )
+ Word32 L_x, /* (i) : input value */
+ Word16 *exponent, /* (o) : Integer part of Log2. (range: 0<=val<=30) */
+ Word16 *fraction /* (o) : Fractional part of Log2. (range: 0<=val<1) */
+ )
{
- Word16 exp;
+ Word16 exp;
- exp = norm_l(L_x);
- Log2_norm ((L_x << exp), exp, exponent, fraction);
+ exp = norm_l(L_x);
+ Log2_norm ((L_x << exp), exp, exponent, fraction);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c b/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c
index 1d5d076..9a9dd34 100644
--- a/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c
+++ b/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c
@@ -17,7 +17,7 @@
/***********************************************************************
* File: lp_dec2.c *
* *
-* Description:Decimate a vector by 2 with 2nd order fir filter *
+* Description:Decimate a vector by 2 with 2nd order fir filter *
* *
************************************************************************/
@@ -33,36 +33,36 @@
static Word16 h_fir[L_FIR] = {4260, 7536, 9175, 7536, 4260};
void LP_Decim2(
- Word16 x[], /* in/out: signal to process */
- Word16 l, /* input : size of filtering */
- Word16 mem[] /* in/out: memory (size=3) */
- )
+ Word16 x[], /* in/out: signal to process */
+ Word16 l, /* input : size of filtering */
+ Word16 mem[] /* in/out: memory (size=3) */
+ )
{
- Word16 *p_x, x_buf[L_FRAME + L_MEM];
- Word32 i, j;
- Word32 L_tmp;
- /* copy initial filter states into buffer */
- p_x = x_buf;
- for (i = 0; i < L_MEM; i++)
- {
- *p_x++ = mem[i];
- mem[i] = x[l - L_MEM + i];
- }
- for (i = 0; i < l; i++)
- {
- *p_x++ = x[i];
- }
- for (i = 0, j = 0; i < l; i += 2, j++)
- {
- p_x = &x_buf[i];
- L_tmp = ((*p_x++) * h_fir[0]);
- L_tmp += ((*p_x++) * h_fir[1]);
- L_tmp += ((*p_x++) * h_fir[2]);
- L_tmp += ((*p_x++) * h_fir[3]);
- L_tmp += ((*p_x++) * h_fir[4]);
- x[j] = (L_tmp + 0x4000)>>15;
- }
- return;
+ Word16 *p_x, x_buf[L_FRAME + L_MEM];
+ Word32 i, j;
+ Word32 L_tmp;
+ /* copy initial filter states into buffer */
+ p_x = x_buf;
+ for (i = 0; i < L_MEM; i++)
+ {
+ *p_x++ = mem[i];
+ mem[i] = x[l - L_MEM + i];
+ }
+ for (i = 0; i < l; i++)
+ {
+ *p_x++ = x[i];
+ }
+ for (i = 0, j = 0; i < l; i += 2, j++)
+ {
+ p_x = &x_buf[i];
+ L_tmp = ((*p_x++) * h_fir[0]);
+ L_tmp += ((*p_x++) * h_fir[1]);
+ L_tmp += ((*p_x++) * h_fir[2]);
+ L_tmp += ((*p_x++) * h_fir[3]);
+ L_tmp += ((*p_x++) * h_fir[4]);
+ x[j] = (L_tmp + 0x4000)>>15;
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/math_op.c b/media/libstagefright/codecs/amrwbenc/src/math_op.c
index 7affbb2..9d7c74e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/math_op.c
+++ b/media/libstagefright/codecs/amrwbenc/src/math_op.c
@@ -55,17 +55,17 @@
|___________________________________________________________________________|
*/
Word32 Isqrt( /* (o) Q31 : output value (range: 0<=val<1) */
- Word32 L_x /* (i) Q0 : input value (range: 0<=val<=7fffffff) */
- )
+ Word32 L_x /* (i) Q0 : input value (range: 0<=val<=7fffffff) */
+ )
{
- Word16 exp;
- Word32 L_y;
- exp = norm_l(L_x);
- L_x = (L_x << exp); /* L_x is normalized */
- exp = (31 - exp);
- Isqrt_n(&L_x, &exp);
- L_y = (L_x << exp); /* denormalization */
- return (L_y);
+ Word16 exp;
+ Word32 L_y;
+ exp = norm_l(L_x);
+ L_x = (L_x << exp); /* L_x is normalized */
+ exp = (31 - exp);
+ Isqrt_n(&L_x, &exp);
+ L_y = (L_x << exp); /* denormalization */
+ return (L_y);
}
/*___________________________________________________________________________
@@ -90,43 +90,43 @@ Word32 Isqrt( /* (o) Q31 : output value (range: 0<=
*/
static Word16 table_isqrt[49] =
{
- 32767, 31790, 30894, 30070, 29309, 28602, 27945, 27330, 26755, 26214,
- 25705, 25225, 24770, 24339, 23930, 23541, 23170, 22817, 22479, 22155,
- 21845, 21548, 21263, 20988, 20724, 20470, 20225, 19988, 19760, 19539,
- 19326, 19119, 18919, 18725, 18536, 18354, 18176, 18004, 17837, 17674,
- 17515, 17361, 17211, 17064, 16921, 16782, 16646, 16514, 16384
+ 32767, 31790, 30894, 30070, 29309, 28602, 27945, 27330, 26755, 26214,
+ 25705, 25225, 24770, 24339, 23930, 23541, 23170, 22817, 22479, 22155,
+ 21845, 21548, 21263, 20988, 20724, 20470, 20225, 19988, 19760, 19539,
+ 19326, 19119, 18919, 18725, 18536, 18354, 18176, 18004, 17837, 17674,
+ 17515, 17361, 17211, 17064, 16921, 16782, 16646, 16514, 16384
};
void Isqrt_n(
- Word32 * frac, /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
- Word16 * exp /* (i/o) : exponent (value = frac x 2^exponent) */
- )
+ Word32 * frac, /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
+ Word16 * exp /* (i/o) : exponent (value = frac x 2^exponent) */
+ )
{
- Word16 i, a, tmp;
-
- if (*frac <= (Word32) 0)
- {
- *exp = 0;
- *frac = 0x7fffffffL;
- return;
- }
-
- if((*exp & 1) == 1) /*If exponant odd -> shift right */
- *frac = (*frac) >> 1;
-
- *exp = negate((*exp - 1) >> 1);
-
- *frac = (*frac >> 9);
- i = extract_h(*frac); /* Extract b25-b31 */
- *frac = (*frac >> 1);
- a = (Word16)(*frac); /* Extract b10-b24 */
- a = (Word16) (a & (Word16) 0x7fff);
- i -= 16;
- *frac = L_deposit_h(table_isqrt[i]); /* table[i] << 16 */
- tmp = vo_sub(table_isqrt[i], table_isqrt[i + 1]); /* table[i] - table[i+1]) */
- *frac = vo_L_msu(*frac, tmp, a); /* frac -= tmp*a*2 */
-
- return;
+ Word16 i, a, tmp;
+
+ if (*frac <= (Word32) 0)
+ {
+ *exp = 0;
+ *frac = 0x7fffffffL;
+ return;
+ }
+
+ if((*exp & 1) == 1) /*If exponant odd -> shift right */
+ *frac = (*frac) >> 1;
+
+ *exp = negate((*exp - 1) >> 1);
+
+ *frac = (*frac >> 9);
+ i = extract_h(*frac); /* Extract b25-b31 */
+ *frac = (*frac >> 1);
+ a = (Word16)(*frac); /* Extract b10-b24 */
+ a = (Word16) (a & (Word16) 0x7fff);
+ i -= 16;
+ *frac = L_deposit_h(table_isqrt[i]); /* table[i] << 16 */
+ tmp = vo_sub(table_isqrt[i], table_isqrt[i + 1]); /* table[i] - table[i+1]) */
+ *frac = vo_L_msu(*frac, tmp, a); /* frac -= tmp*a*2 */
+
+ return;
}
/*___________________________________________________________________________
@@ -149,34 +149,34 @@ void Isqrt_n(
*/
static Word16 table_pow2[33] =
{
- 16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911,
- 20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726,
- 25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706,
- 31379, 32066, 32767
+ 16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911,
+ 20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726,
+ 25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706,
+ 31379, 32066, 32767
};
Word32 Pow2( /* (o) Q0 : result (range: 0<=val<=0x7fffffff) */
- Word16 exponant, /* (i) Q0 : Integer part. (range: 0<=val<=30) */
- Word16 fraction /* (i) Q15 : Fractionnal part. (range: 0.0<=val<1.0) */
- )
+ Word16 exponant, /* (i) Q0 : Integer part. (range: 0<=val<=30) */
+ Word16 fraction /* (i) Q15 : Fractionnal part. (range: 0.0<=val<1.0) */
+ )
{
- Word16 exp, i, a, tmp;
- Word32 L_x;
+ Word16 exp, i, a, tmp;
+ Word32 L_x;
- L_x = vo_L_mult(fraction, 32); /* L_x = fraction<<6 */
- i = extract_h(L_x); /* Extract b10-b16 of fraction */
- L_x =L_x >> 1;
- a = (Word16)(L_x); /* Extract b0-b9 of fraction */
- a = (Word16) (a & (Word16) 0x7fff);
+ L_x = vo_L_mult(fraction, 32); /* L_x = fraction<<6 */
+ i = extract_h(L_x); /* Extract b10-b16 of fraction */
+ L_x =L_x >> 1;
+ a = (Word16)(L_x); /* Extract b0-b9 of fraction */
+ a = (Word16) (a & (Word16) 0x7fff);
- L_x = L_deposit_h(table_pow2[i]); /* table[i] << 16 */
- tmp = vo_sub(table_pow2[i], table_pow2[i + 1]); /* table[i] - table[i+1] */
- L_x -= (tmp * a)<<1; /* L_x -= tmp*a*2 */
+ L_x = L_deposit_h(table_pow2[i]); /* table[i] << 16 */
+ tmp = vo_sub(table_pow2[i], table_pow2[i + 1]); /* table[i] - table[i+1] */
+ L_x -= (tmp * a)<<1; /* L_x -= tmp*a*2 */
- exp = vo_sub(30, exponant);
- L_x = vo_L_shr_r(L_x, exp);
+ exp = vo_sub(30, exponant);
+ L_x = vo_L_shr_r(L_x, exp);
- return (L_x);
+ return (L_x);
}
/*___________________________________________________________________________
@@ -194,25 +194,30 @@ Word32 Pow2( /* (o) Q0 : result (range: 0<=
*/
Word32 Dot_product12( /* (o) Q31: normalized result (1 < val <= -1) */
- Word16 x[], /* (i) 12bits: x vector */
- Word16 y[], /* (i) 12bits: y vector */
- Word16 lg, /* (i) : vector length */
- Word16 * exp /* (o) : exponent of result (0..+30) */
- )
+ Word16 x[], /* (i) 12bits: x vector */
+ Word16 y[], /* (i) 12bits: y vector */
+ Word16 lg, /* (i) : vector length */
+ Word16 * exp /* (o) : exponent of result (0..+30) */
+ )
{
- Word16 sft;
- Word32 i, L_sum;
- L_sum = 0;
- for (i = 0; i < lg; i++)
- {
- L_sum += x[i] * y[i];
- }
- L_sum = (L_sum << 1) + 1;
- /* Normalize acc in Q31 */
- sft = norm_l(L_sum);
- L_sum = L_sum << sft;
- *exp = 30 - sft; /* exponent = 0..30 */
- return (L_sum);
+ Word16 sft;
+ Word32 i, L_sum;
+ L_sum = 0;
+ for (i = 0; i < lg; i++)
+ {
+ Word32 tmp = (Word32) x[i] * (Word32) y[i];
+ if (tmp == (Word32) 0x40000000L) {
+ tmp = MAX_32;
+ }
+ L_sum = L_add(L_sum, tmp);
+ }
+ L_sum = L_shl2(L_sum, 1);
+ L_sum = L_add(L_sum, 1);
+ /* Normalize acc in Q31 */
+ sft = norm_l(L_sum);
+ L_sum = L_sum << sft;
+ *exp = 30 - sft; /* exponent = 0..30 */
+ return (L_sum);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/mem_align.c b/media/libstagefright/codecs/amrwbenc/src/mem_align.c
index 3b7853f..04e5976 100644
--- a/media/libstagefright/codecs/amrwbenc/src/mem_align.c
+++ b/media/libstagefright/codecs/amrwbenc/src/mem_align.c
@@ -15,18 +15,18 @@
*/
/*******************************************************************************
- File: mem_align.c
+ File: mem_align.c
- Content: Memory alloc alignments functions
+ Content: Memory alloc alignments functions
*******************************************************************************/
-#include "mem_align.h"
+#include "mem_align.h"
#ifdef _MSC_VER
-#include <stddef.h>
+#include <stddef.h>
#else
-#include <stdint.h>
+#include <stdint.h>
#endif
/*****************************************************************************
@@ -39,50 +39,50 @@
void *
mem_malloc(VO_MEM_OPERATOR *pMemop, unsigned int size, unsigned char alignment, unsigned int CodecID)
{
- int ret;
- unsigned char *mem_ptr;
- VO_MEM_INFO MemInfo;
+ int ret;
+ unsigned char *mem_ptr;
+ VO_MEM_INFO MemInfo;
- if (!alignment) {
+ if (!alignment) {
- MemInfo.Flag = 0;
- MemInfo.Size = size + 1;
- ret = pMemop->Alloc(CodecID, &MemInfo);
- if(ret != 0)
- return 0;
- mem_ptr = (unsigned char *)MemInfo.VBuffer;
+ MemInfo.Flag = 0;
+ MemInfo.Size = size + 1;
+ ret = pMemop->Alloc(CodecID, &MemInfo);
+ if(ret != 0)
+ return 0;
+ mem_ptr = (unsigned char *)MemInfo.VBuffer;
- pMemop->Set(CodecID, mem_ptr, 0, size + 1);
+ pMemop->Set(CodecID, mem_ptr, 0, size + 1);
- *mem_ptr = (unsigned char)1;
+ *mem_ptr = (unsigned char)1;
- return ((void *)(mem_ptr+1));
- } else {
- unsigned char *tmp;
+ return ((void *)(mem_ptr+1));
+ } else {
+ unsigned char *tmp;
- MemInfo.Flag = 0;
- MemInfo.Size = size + alignment;
- ret = pMemop->Alloc(CodecID, &MemInfo);
- if(ret != 0)
- return 0;
+ MemInfo.Flag = 0;
+ MemInfo.Size = size + alignment;
+ ret = pMemop->Alloc(CodecID, &MemInfo);
+ if(ret != 0)
+ return 0;
- tmp = (unsigned char *)MemInfo.VBuffer;
+ tmp = (unsigned char *)MemInfo.VBuffer;
- pMemop->Set(CodecID, tmp, 0, size + alignment);
+ pMemop->Set(CodecID, tmp, 0, size + alignment);
- mem_ptr =
- (unsigned char *) ((intptr_t) (tmp + alignment - 1) &
- (~((intptr_t) (alignment - 1))));
+ mem_ptr =
+ (unsigned char *) ((intptr_t) (tmp + alignment - 1) &
+ (~((intptr_t) (alignment - 1))));
- if (mem_ptr == tmp)
- mem_ptr += alignment;
+ if (mem_ptr == tmp)
+ mem_ptr += alignment;
- *(mem_ptr - 1) = (unsigned char) (mem_ptr - tmp);
+ *(mem_ptr - 1) = (unsigned char) (mem_ptr - tmp);
- return ((void *)mem_ptr);
- }
+ return ((void *)mem_ptr);
+ }
- return(0);
+ return(0);
}
@@ -96,16 +96,16 @@ void
mem_free(VO_MEM_OPERATOR *pMemop, void *mem_ptr, unsigned int CodecID)
{
- unsigned char *ptr;
+ unsigned char *ptr;
- if (mem_ptr == 0)
- return;
+ if (mem_ptr == 0)
+ return;
- ptr = mem_ptr;
+ ptr = mem_ptr;
- ptr -= *(ptr - 1);
+ ptr -= *(ptr - 1);
- pMemop->Free(CodecID, ptr);
+ pMemop->Free(CodecID, ptr);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/oper_32b.c b/media/libstagefright/codecs/amrwbenc/src/oper_32b.c
index 27cad76..e6f80d0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/oper_32b.c
+++ b/media/libstagefright/codecs/amrwbenc/src/oper_32b.c
@@ -56,9 +56,9 @@
__inline void VO_L_Extract (Word32 L_32, Word16 *hi, Word16 *lo)
{
- *hi = (Word16)(L_32 >> 16);
- *lo = (Word16)((L_32 & 0xffff) >> 1);
- return;
+ *hi = (Word16)(L_32 >> 16);
+ *lo = (Word16)((L_32 & 0xffff) >> 1);
+ return;
}
/*****************************************************************************
@@ -84,11 +84,11 @@ __inline void VO_L_Extract (Word32 L_32, Word16 *hi, Word16 *lo)
Word32 L_Comp (Word16 hi, Word16 lo)
{
- Word32 L_32;
+ Word32 L_32;
- L_32 = L_deposit_h (hi);
+ L_32 = L_deposit_h (hi);
- return (L_mac (L_32, lo, 1)); /* = hi<<16 + lo<<1 */
+ return (L_mac (L_32, lo, 1)); /* = hi<<16 + lo<<1 */
}
/*****************************************************************************
@@ -113,13 +113,13 @@ Word32 L_Comp (Word16 hi, Word16 lo)
__inline Word32 Mpy_32 (Word16 hi1, Word16 lo1, Word16 hi2, Word16 lo2)
{
- Word32 L_32;
- L_32 = (hi1 * hi2);
- L_32 += (hi1 * lo2) >> 15;
- L_32 += (lo1 * hi2) >> 15;
- L_32 <<= 1;
+ Word32 L_32;
+ L_32 = (hi1 * hi2);
+ L_32 += (hi1 * lo2) >> 15;
+ L_32 += (lo1 * hi2) >> 15;
+ L_32 <<= 1;
- return (L_32);
+ return (L_32);
}
/*****************************************************************************
@@ -142,12 +142,12 @@ __inline Word32 Mpy_32 (Word16 hi1, Word16 lo1, Word16 hi2, Word16 lo2)
__inline Word32 Mpy_32_16 (Word16 hi, Word16 lo, Word16 n)
{
- Word32 L_32;
+ Word32 L_32;
- L_32 = (hi * n)<<1;
- L_32 += (((lo * n)>>15)<<1);
+ L_32 = (hi * n)<<1;
+ L_32 += (((lo * n)>>15)<<1);
- return (L_32);
+ return (L_32);
}
/*****************************************************************************
@@ -194,30 +194,30 @@ __inline Word32 Mpy_32_16 (Word16 hi, Word16 lo, Word16 n)
Word32 Div_32 (Word32 L_num, Word16 denom_hi, Word16 denom_lo)
{
- Word16 approx, hi, lo, n_hi, n_lo;
- Word32 L_32;
+ Word16 approx, hi, lo, n_hi, n_lo;
+ Word32 L_32;
- /* First approximation: 1 / L_denom = 1/denom_hi */
+ /* First approximation: 1 / L_denom = 1/denom_hi */
- approx = div_s ((Word16) 0x3fff, denom_hi);
+ approx = div_s ((Word16) 0x3fff, denom_hi);
- /* 1/L_denom = approx * (2.0 - L_denom * approx) */
+ /* 1/L_denom = approx * (2.0 - L_denom * approx) */
- L_32 = Mpy_32_16 (denom_hi, denom_lo, approx);
+ L_32 = Mpy_32_16 (denom_hi, denom_lo, approx);
- L_32 = L_sub ((Word32) 0x7fffffffL, L_32);
- hi = L_32 >> 16;
- lo = (L_32 & 0xffff) >> 1;
+ L_32 = L_sub ((Word32) 0x7fffffffL, L_32);
+ hi = L_32 >> 16;
+ lo = (L_32 & 0xffff) >> 1;
- L_32 = Mpy_32_16 (hi, lo, approx);
+ L_32 = Mpy_32_16 (hi, lo, approx);
- /* L_num * (1/L_denom) */
- hi = L_32 >> 16;
- lo = (L_32 & 0xffff) >> 1;
- VO_L_Extract (L_num, &n_hi, &n_lo);
- L_32 = Mpy_32 (n_hi, n_lo, hi, lo);
- L_32 = L_shl2(L_32, 2);
+ /* L_num * (1/L_denom) */
+ hi = L_32 >> 16;
+ lo = (L_32 & 0xffff) >> 1;
+ VO_L_Extract (L_num, &n_hi, &n_lo);
+ L_32 = Mpy_32 (n_hi, n_lo, hi, lo);
+ L_32 = L_shl2(L_32, 2);
- return (L_32);
+ return (L_32);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c b/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c
index b8174b9..5d2b4bd 100644
--- a/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c
+++ b/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c
@@ -18,7 +18,7 @@
* File: p_med_ol.c *
* *
* Description: Compute the open loop pitch lag *
-* output: open loop pitch lag *
+* output: open loop pitch lag *
************************************************************************/
#include "typedef.h"
@@ -29,131 +29,131 @@
#include "p_med_ol.tab"
Word16 Pitch_med_ol(
- Word16 wsp[], /* i: signal used to compute the open loop pitch*/
+ Word16 wsp[], /* i: signal used to compute the open loop pitch*/
/* wsp[-pit_max] to wsp[-1] should be known */
- Coder_State *st, /* i/o: codec global structure */
- Word16 L_frame /* i: length of frame to compute pitch */
- )
+ Coder_State *st, /* i/o: codec global structure */
+ Word16 L_frame /* i: length of frame to compute pitch */
+ )
{
- Word16 Tm;
- Word16 hi, lo;
- Word16 *ww, *we, *hp_wsp;
- Word16 exp_R0, exp_R1, exp_R2;
- Word32 i, j, max, R0, R1, R2;
- Word16 *p1, *p2;
- Word16 L_min = 17; /* minimum pitch lag: PIT_MIN / OPL_DECIM */
- Word16 L_max = 115; /* maximum pitch lag: PIT_MAX / OPL_DECIM */
- Word16 L_0 = st->old_T0_med; /* old open-loop pitch */
- Word16 *gain = &(st->ol_gain); /* normalize correlation of hp_wsp for the lag */
- Word16 *hp_wsp_mem = st->hp_wsp_mem; /* memory of the hypass filter for hp_wsp[] (lg = 9)*/
- Word16 *old_hp_wsp = st->old_hp_wsp; /* hypass wsp[] */
- Word16 wght_flg = st->ol_wght_flg; /* is weighting function used */
-
- ww = &corrweight[198];
- we = &corrweight[98 + L_max - L_0];
-
- max = MIN_32;
- Tm = 0;
- for (i = L_max; i > L_min; i--)
- {
- /* Compute the correlation */
- R0 = 0;
- p1 = wsp;
- p2 = &wsp[-i];
- for (j = 0; j < L_frame; j+=4)
- {
- R0 += vo_L_mult((*p1++), (*p2++));
- R0 += vo_L_mult((*p1++), (*p2++));
- R0 += vo_L_mult((*p1++), (*p2++));
- R0 += vo_L_mult((*p1++), (*p2++));
- }
- /* Weighting of the correlation function. */
- hi = R0>>16;
- lo = (R0 & 0xffff)>>1;
-
- R0 = Mpy_32_16(hi, lo, *ww);
- ww--;
-
- if ((L_0 > 0) && (wght_flg > 0))
- {
- /* Weight the neighbourhood of the old lag. */
- hi = R0>>16;
- lo = (R0 & 0xffff)>>1;
- R0 = Mpy_32_16(hi, lo, *we);
- we--;
- }
- if(R0 >= max)
- {
- max = R0;
- Tm = i;
- }
- }
-
- /* Hypass the wsp[] vector */
- hp_wsp = old_hp_wsp + L_max;
- Hp_wsp(wsp, hp_wsp, L_frame, hp_wsp_mem);
-
- /* Compute normalize correlation at delay Tm */
- R0 = 0;
- R1 = 0;
- R2 = 0;
- p1 = hp_wsp;
- p2 = hp_wsp - Tm;
- for (j = 0; j < L_frame; j+=4)
- {
- R2 += vo_mult32(*p1, *p1);
- R1 += vo_mult32(*p2, *p2);
- R0 += vo_mult32(*p1++, *p2++);
- R2 += vo_mult32(*p1, *p1);
- R1 += vo_mult32(*p2, *p2);
- R0 += vo_mult32(*p1++, *p2++);
- R2 += vo_mult32(*p1, *p1);
- R1 += vo_mult32(*p2, *p2);
- R0 += vo_mult32(*p1++, *p2++);
- R2 += vo_mult32(*p1, *p1);
- R1 += vo_mult32(*p2, *p2);
- R0 += vo_mult32(*p1++, *p2++);
- }
- R0 = R0 <<1;
- R1 = (R1 <<1) + 1L;
- R2 = (R2 <<1) + 1L;
- /* gain = R0/ sqrt(R1*R2) */
-
- exp_R0 = norm_l(R0);
- R0 = (R0 << exp_R0);
-
- exp_R1 = norm_l(R1);
- R1 = (R1 << exp_R1);
-
- exp_R2 = norm_l(R2);
- R2 = (R2 << exp_R2);
-
-
- R1 = vo_L_mult(vo_round(R1), vo_round(R2));
-
- i = norm_l(R1);
- R1 = (R1 << i);
-
- exp_R1 += exp_R2;
- exp_R1 += i;
- exp_R1 = 62 - exp_R1;
-
- Isqrt_n(&R1, &exp_R1);
-
- R0 = vo_L_mult(voround(R0), voround(R1));
- exp_R0 = 31 - exp_R0;
- exp_R0 += exp_R1;
-
- *gain = vo_round(L_shl(R0, exp_R0));
-
- /* Shitf hp_wsp[] for next frame */
-
- for (i = 0; i < L_max; i++)
- {
- old_hp_wsp[i] = old_hp_wsp[i + L_frame];
- }
-
- return (Tm);
+ Word16 Tm;
+ Word16 hi, lo;
+ Word16 *ww, *we, *hp_wsp;
+ Word16 exp_R0, exp_R1, exp_R2;
+ Word32 i, j, max, R0, R1, R2;
+ Word16 *p1, *p2;
+ Word16 L_min = 17; /* minimum pitch lag: PIT_MIN / OPL_DECIM */
+ Word16 L_max = 115; /* maximum pitch lag: PIT_MAX / OPL_DECIM */
+ Word16 L_0 = st->old_T0_med; /* old open-loop pitch */
+ Word16 *gain = &(st->ol_gain); /* normalize correlation of hp_wsp for the lag */
+ Word16 *hp_wsp_mem = st->hp_wsp_mem; /* memory of the hypass filter for hp_wsp[] (lg = 9)*/
+ Word16 *old_hp_wsp = st->old_hp_wsp; /* hypass wsp[] */
+ Word16 wght_flg = st->ol_wght_flg; /* is weighting function used */
+
+ ww = &corrweight[198];
+ we = &corrweight[98 + L_max - L_0];
+
+ max = MIN_32;
+ Tm = 0;
+ for (i = L_max; i > L_min; i--)
+ {
+ /* Compute the correlation */
+ R0 = 0;
+ p1 = wsp;
+ p2 = &wsp[-i];
+ for (j = 0; j < L_frame; j+=4)
+ {
+ R0 += vo_L_mult((*p1++), (*p2++));
+ R0 += vo_L_mult((*p1++), (*p2++));
+ R0 += vo_L_mult((*p1++), (*p2++));
+ R0 += vo_L_mult((*p1++), (*p2++));
+ }
+ /* Weighting of the correlation function. */
+ hi = R0>>16;
+ lo = (R0 & 0xffff)>>1;
+
+ R0 = Mpy_32_16(hi, lo, *ww);
+ ww--;
+
+ if ((L_0 > 0) && (wght_flg > 0))
+ {
+ /* Weight the neighbourhood of the old lag. */
+ hi = R0>>16;
+ lo = (R0 & 0xffff)>>1;
+ R0 = Mpy_32_16(hi, lo, *we);
+ we--;
+ }
+ if(R0 >= max)
+ {
+ max = R0;
+ Tm = i;
+ }
+ }
+
+ /* Hypass the wsp[] vector */
+ hp_wsp = old_hp_wsp + L_max;
+ Hp_wsp(wsp, hp_wsp, L_frame, hp_wsp_mem);
+
+ /* Compute normalize correlation at delay Tm */
+ R0 = 0;
+ R1 = 0;
+ R2 = 0;
+ p1 = hp_wsp;
+ p2 = hp_wsp - Tm;
+ for (j = 0; j < L_frame; j+=4)
+ {
+ R2 += vo_mult32(*p1, *p1);
+ R1 += vo_mult32(*p2, *p2);
+ R0 += vo_mult32(*p1++, *p2++);
+ R2 += vo_mult32(*p1, *p1);
+ R1 += vo_mult32(*p2, *p2);
+ R0 += vo_mult32(*p1++, *p2++);
+ R2 += vo_mult32(*p1, *p1);
+ R1 += vo_mult32(*p2, *p2);
+ R0 += vo_mult32(*p1++, *p2++);
+ R2 += vo_mult32(*p1, *p1);
+ R1 += vo_mult32(*p2, *p2);
+ R0 += vo_mult32(*p1++, *p2++);
+ }
+ R0 = R0 <<1;
+ R1 = (R1 <<1) + 1L;
+ R2 = (R2 <<1) + 1L;
+ /* gain = R0/ sqrt(R1*R2) */
+
+ exp_R0 = norm_l(R0);
+ R0 = (R0 << exp_R0);
+
+ exp_R1 = norm_l(R1);
+ R1 = (R1 << exp_R1);
+
+ exp_R2 = norm_l(R2);
+ R2 = (R2 << exp_R2);
+
+
+ R1 = vo_L_mult(voround(R1), voround(R2));
+
+ i = norm_l(R1);
+ R1 = (R1 << i);
+
+ exp_R1 += exp_R2;
+ exp_R1 += i;
+ exp_R1 = 62 - exp_R1;
+
+ Isqrt_n(&R1, &exp_R1);
+
+ R0 = vo_L_mult(voround(R0), voround(R1));
+ exp_R0 = 31 - exp_R0;
+ exp_R0 += exp_R1;
+
+ *gain = vo_round(L_shl(R0, exp_R0));
+
+ /* Shitf hp_wsp[] for next frame */
+
+ for (i = 0; i < L_max; i++)
+ {
+ old_hp_wsp[i] = old_hp_wsp[i + L_frame];
+ }
+
+ return (Tm);
}
/************************************************************************
@@ -171,84 +171,84 @@ Word16 Pitch_med_ol(
Word16 median5(Word16 x[])
{
- Word16 x1, x2, x3, x4, x5;
- Word16 tmp;
-
- x1 = x[-2];
- x2 = x[-1];
- x3 = x[0];
- x4 = x[1];
- x5 = x[2];
-
- if (x2 < x1)
- {
- tmp = x1;
- x1 = x2;
- x2 = tmp;
- }
- if (x3 < x1)
- {
- tmp = x1;
- x1 = x3;
- x3 = tmp;
- }
- if (x4 < x1)
- {
- tmp = x1;
- x1 = x4;
- x4 = tmp;
- }
- if (x5 < x1)
- {
- x5 = x1;
- }
- if (x3 < x2)
- {
- tmp = x2;
- x2 = x3;
- x3 = tmp;
- }
- if (x4 < x2)
- {
- tmp = x2;
- x2 = x4;
- x4 = tmp;
- }
- if (x5 < x2)
- {
- x5 = x2;
- }
- if (x4 < x3)
- {
- x3 = x4;
- }
- if (x5 < x3)
- {
- x3 = x5;
- }
- return (x3);
+ Word16 x1, x2, x3, x4, x5;
+ Word16 tmp;
+
+ x1 = x[-2];
+ x2 = x[-1];
+ x3 = x[0];
+ x4 = x[1];
+ x5 = x[2];
+
+ if (x2 < x1)
+ {
+ tmp = x1;
+ x1 = x2;
+ x2 = tmp;
+ }
+ if (x3 < x1)
+ {
+ tmp = x1;
+ x1 = x3;
+ x3 = tmp;
+ }
+ if (x4 < x1)
+ {
+ tmp = x1;
+ x1 = x4;
+ x4 = tmp;
+ }
+ if (x5 < x1)
+ {
+ x5 = x1;
+ }
+ if (x3 < x2)
+ {
+ tmp = x2;
+ x2 = x3;
+ x3 = tmp;
+ }
+ if (x4 < x2)
+ {
+ tmp = x2;
+ x2 = x4;
+ x4 = tmp;
+ }
+ if (x5 < x2)
+ {
+ x5 = x2;
+ }
+ if (x4 < x3)
+ {
+ x3 = x4;
+ }
+ if (x5 < x3)
+ {
+ x3 = x5;
+ }
+ return (x3);
}
Word16 Med_olag( /* output : median of 5 previous open-loop lags */
- Word16 prev_ol_lag, /* input : previous open-loop lag */
- Word16 old_ol_lag[5]
- )
+ Word16 prev_ol_lag, /* input : previous open-loop lag */
+ Word16 old_ol_lag[5]
+ )
{
- Word32 i;
+ Word32 i;
- /* Use median of 5 previous open-loop lags as old lag */
+ /* Use median of 5 previous open-loop lags as old lag */
- for (i = 4; i > 0; i--)
- {
- old_ol_lag[i] = old_ol_lag[i - 1];
- }
+ for (i = 4; i > 0; i--)
+ {
+ old_ol_lag[i] = old_ol_lag[i - 1];
+ }
- old_ol_lag[0] = prev_ol_lag;
+ old_ol_lag[0] = prev_ol_lag;
- i = median5(&old_ol_lag[2]);
+ i = median5(&old_ol_lag[2]);
- return i;
+ return i;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c b/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c
index 6f55b8f..f100253 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c
@@ -25,24 +25,24 @@
#include "basic_op.h"
void Pit_shrp(
- Word16 * x, /* in/out: impulse response (or algebraic code) */
- Word16 pit_lag, /* input : pitch lag */
- Word16 sharp, /* input : pitch sharpening factor (Q15) */
- Word16 L_subfr /* input : subframe size */
- )
+ Word16 * x, /* in/out: impulse response (or algebraic code) */
+ Word16 pit_lag, /* input : pitch lag */
+ Word16 sharp, /* input : pitch sharpening factor (Q15) */
+ Word16 L_subfr /* input : subframe size */
+ )
{
- Word32 i;
- Word32 L_tmp;
- Word16 *x_ptr = x + pit_lag;
-
- for (i = pit_lag; i < L_subfr; i++)
- {
- L_tmp = (*x_ptr << 15);
- L_tmp += *x++ * sharp;
- *x_ptr++ = ((L_tmp + 0x4000)>>15);
- }
-
- return;
+ Word32 i;
+ Word32 L_tmp;
+ Word16 *x_ptr = x + pit_lag;
+
+ for (i = pit_lag; i < L_subfr; i++)
+ {
+ L_tmp = (*x_ptr << 15);
+ L_tmp += *x++ * sharp;
+ *x_ptr++ = ((L_tmp + 0x4000)>>15);
+ }
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
index b66b55e..de2a221 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
@@ -18,7 +18,7 @@
* File: pitch_f4.c *
* *
* Description: Find the closed loop pitch period with *
-* 1/4 subsample resolution. *
+* 1/4 subsample resolution. *
* *
************************************************************************/
@@ -37,117 +37,117 @@
#ifdef ASM_OPT
void Norm_corr_asm(
- Word16 exc[], /* (i) : excitation buffer */
- Word16 xn[], /* (i) : target vector */
- Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
- Word16 L_subfr,
- Word16 t_min, /* (i) : minimum value of pitch lag. */
- Word16 t_max, /* (i) : maximum value of pitch lag. */
- Word16 corr_norm[] /* (o) Q15 : normalized correlation */
- );
+ Word16 exc[], /* (i) : excitation buffer */
+ Word16 xn[], /* (i) : target vector */
+ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
+ Word16 L_subfr,
+ Word16 t_min, /* (i) : minimum value of pitch lag. */
+ Word16 t_max, /* (i) : maximum value of pitch lag. */
+ Word16 corr_norm[] /* (o) Q15 : normalized correlation */
+ );
#else
static void Norm_Corr(
- Word16 exc[], /* (i) : excitation buffer */
- Word16 xn[], /* (i) : target vector */
- Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
- Word16 L_subfr,
- Word16 t_min, /* (i) : minimum value of pitch lag. */
- Word16 t_max, /* (i) : maximum value of pitch lag. */
- Word16 corr_norm[] /* (o) Q15 : normalized correlation */
- );
+ Word16 exc[], /* (i) : excitation buffer */
+ Word16 xn[], /* (i) : target vector */
+ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
+ Word16 L_subfr,
+ Word16 t_min, /* (i) : minimum value of pitch lag. */
+ Word16 t_max, /* (i) : maximum value of pitch lag. */
+ Word16 corr_norm[] /* (o) Q15 : normalized correlation */
+ );
#endif
static Word16 Interpol_4( /* (o) : interpolated value */
- Word16 * x, /* (i) : input vector */
- Word32 frac /* (i) : fraction (-4..+3) */
- );
+ Word16 * x, /* (i) : input vector */
+ Word32 frac /* (i) : fraction (-4..+3) */
+ );
Word16 Pitch_fr4( /* (o) : pitch period. */
- Word16 exc[], /* (i) : excitation buffer */
- Word16 xn[], /* (i) : target vector */
- Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
- Word16 t0_min, /* (i) : minimum value in the searched range. */
- Word16 t0_max, /* (i) : maximum value in the searched range. */
- Word16 * pit_frac, /* (o) : chosen fraction (0, 1, 2 or 3). */
- Word16 i_subfr, /* (i) : indicator for first subframe. */
- Word16 t0_fr2, /* (i) : minimum value for resolution 1/2 */
- Word16 t0_fr1, /* (i) : minimum value for resolution 1 */
- Word16 L_subfr /* (i) : Length of subframe */
- )
+ Word16 exc[], /* (i) : excitation buffer */
+ Word16 xn[], /* (i) : target vector */
+ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
+ Word16 t0_min, /* (i) : minimum value in the searched range. */
+ Word16 t0_max, /* (i) : maximum value in the searched range. */
+ Word16 * pit_frac, /* (o) : chosen fraction (0, 1, 2 or 3). */
+ Word16 i_subfr, /* (i) : indicator for first subframe. */
+ Word16 t0_fr2, /* (i) : minimum value for resolution 1/2 */
+ Word16 t0_fr1, /* (i) : minimum value for resolution 1 */
+ Word16 L_subfr /* (i) : Length of subframe */
+ )
{
- Word32 fraction, i;
- Word16 t_min, t_max;
- Word16 max, t0, step, temp;
- Word16 *corr;
- Word16 corr_v[40]; /* Total length = t0_max-t0_min+1+2*L_inter */
-
- /* Find interval to compute normalized correlation */
-
- t_min = t0_min - L_INTERPOL1;
- t_max = t0_max + L_INTERPOL1;
- corr = &corr_v[-t_min];
- /* Compute normalized correlation between target and filtered excitation */
+ Word32 fraction, i;
+ Word16 t_min, t_max;
+ Word16 max, t0, step, temp;
+ Word16 *corr;
+ Word16 corr_v[40]; /* Total length = t0_max-t0_min+1+2*L_inter */
+
+ /* Find interval to compute normalized correlation */
+
+ t_min = t0_min - L_INTERPOL1;
+ t_max = t0_max + L_INTERPOL1;
+ corr = &corr_v[-t_min];
+ /* Compute normalized correlation between target and filtered excitation */
#ifdef ASM_OPT /* asm optimization branch */
Norm_corr_asm(exc, xn, h, L_subfr, t_min, t_max, corr);
#else
- Norm_Corr(exc, xn, h, L_subfr, t_min, t_max, corr);
+ Norm_Corr(exc, xn, h, L_subfr, t_min, t_max, corr);
#endif
- /* Find integer pitch */
-
- max = corr[t0_min];
- t0 = t0_min;
- for (i = t0_min + 1; i <= t0_max; i++)
- {
- if (corr[i] >= max)
- {
- max = corr[i];
- t0 = i;
- }
- }
- /* If first subframe and t0 >= t0_fr1, do not search fractionnal pitch */
- if ((i_subfr == 0) && (t0 >= t0_fr1))
- {
- *pit_frac = 0;
- return (t0);
- }
- /*------------------------------------------------------------------*
- * Search fractionnal pitch with 1/4 subsample resolution. *
- * Test the fractions around t0 and choose the one which maximizes *
- * the interpolated normalized correlation. *
- *------------------------------------------------------------------*/
-
- step = 1; /* 1/4 subsample resolution */
- fraction = -3;
- if ((t0_fr2 == PIT_MIN)||((i_subfr == 0) && (t0 >= t0_fr2)))
- {
- step = 2; /* 1/2 subsample resolution */
- fraction = -2;
- }
- if(t0 == t0_min)
- {
- fraction = 0;
- }
- max = Interpol_4(&corr[t0], fraction);
-
- for (i = fraction + step; i <= 3; i += step)
- {
- temp = Interpol_4(&corr[t0], i);
- if(temp > max)
- {
- max = temp;
- fraction = i;
- }
- }
- /* limit the fraction value in the interval [0,1,2,3] */
- if (fraction < 0)
- {
- fraction += UP_SAMP;
- t0 -= 1;
- }
- *pit_frac = fraction;
- return (t0);
+ /* Find integer pitch */
+
+ max = corr[t0_min];
+ t0 = t0_min;
+ for (i = t0_min + 1; i <= t0_max; i++)
+ {
+ if (corr[i] >= max)
+ {
+ max = corr[i];
+ t0 = i;
+ }
+ }
+ /* If first subframe and t0 >= t0_fr1, do not search fractionnal pitch */
+ if ((i_subfr == 0) && (t0 >= t0_fr1))
+ {
+ *pit_frac = 0;
+ return (t0);
+ }
+ /*------------------------------------------------------------------*
+ * Search fractionnal pitch with 1/4 subsample resolution. *
+ * Test the fractions around t0 and choose the one which maximizes *
+ * the interpolated normalized correlation. *
+ *------------------------------------------------------------------*/
+
+ step = 1; /* 1/4 subsample resolution */
+ fraction = -3;
+ if ((t0_fr2 == PIT_MIN)||((i_subfr == 0) && (t0 >= t0_fr2)))
+ {
+ step = 2; /* 1/2 subsample resolution */
+ fraction = -2;
+ }
+ if(t0 == t0_min)
+ {
+ fraction = 0;
+ }
+ max = Interpol_4(&corr[t0], fraction);
+
+ for (i = fraction + step; i <= 3; i += step)
+ {
+ temp = Interpol_4(&corr[t0], i);
+ if(temp > max)
+ {
+ max = temp;
+ fraction = i;
+ }
+ }
+ /* limit the fraction value in the interval [0,1,2,3] */
+ if (fraction < 0)
+ {
+ fraction += UP_SAMP;
+ t0 -= 1;
+ }
+ *pit_frac = fraction;
+ return (t0);
}
@@ -161,109 +161,109 @@ Word16 Pitch_fr4( /* (o) : pitch period.
************************************************************************************/
#ifndef ASM_OPT
static void Norm_Corr(
- Word16 exc[], /* (i) : excitation buffer */
- Word16 xn[], /* (i) : target vector */
- Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
- Word16 L_subfr,
- Word16 t_min, /* (i) : minimum value of pitch lag. */
- Word16 t_max, /* (i) : maximum value of pitch lag. */
- Word16 corr_norm[]) /* (o) Q15 : normalized correlation */
+ Word16 exc[], /* (i) : excitation buffer */
+ Word16 xn[], /* (i) : target vector */
+ Word16 h[], /* (i) Q15 : impulse response of synth/wgt filters */
+ Word16 L_subfr,
+ Word16 t_min, /* (i) : minimum value of pitch lag. */
+ Word16 t_max, /* (i) : maximum value of pitch lag. */
+ Word16 corr_norm[]) /* (o) Q15 : normalized correlation */
{
- Word32 i, k, t;
- Word32 corr, exp_corr, norm, exp, scale;
- Word16 exp_norm, excf[L_SUBFR], tmp;
- Word32 L_tmp, L_tmp1, L_tmp2;
+ Word32 i, k, t;
+ Word32 corr, exp_corr, norm, exp, scale;
+ Word16 exp_norm, excf[L_SUBFR], tmp;
+ Word32 L_tmp, L_tmp1, L_tmp2;
UNUSED(L_subfr);
- /* compute the filtered excitation for the first delay t_min */
- k = -t_min;
+ /* compute the filtered excitation for the first delay t_min */
+ k = -t_min;
#ifdef ASM_OPT /* asm optimization branch */
- Convolve_asm(&exc[k], h, excf, 64);
+ Convolve_asm(&exc[k], h, excf, 64);
#else
- Convolve(&exc[k], h, excf, 64);
+ Convolve(&exc[k], h, excf, 64);
#endif
- /* Compute rounded down 1/sqrt(energy of xn[]) */
- L_tmp = 0;
- for (i = 0; i < 64; i+=4)
- {
- L_tmp += (xn[i] * xn[i]);
- L_tmp += (xn[i+1] * xn[i+1]);
- L_tmp += (xn[i+2] * xn[i+2]);
- L_tmp += (xn[i+3] * xn[i+3]);
- }
-
- L_tmp = (L_tmp << 1) + 1;
- exp = norm_l(L_tmp);
- exp = (32 - exp);
- //exp = exp + 2; /* energy of xn[] x 2 + rounded up */
- scale = -(exp >> 1); /* (1<<scale) < 1/sqrt(energy rounded) */
-
- /* loop for every possible period */
-
- for (t = t_min; t <= t_max; t++)
- {
- /* Compute correlation between xn[] and excf[] */
- L_tmp = 0;
- L_tmp1 = 0;
- for (i = 0; i < 64; i+=4)
- {
- L_tmp += (xn[i] * excf[i]);
- L_tmp1 += (excf[i] * excf[i]);
- L_tmp += (xn[i+1] * excf[i+1]);
- L_tmp1 += (excf[i+1] * excf[i+1]);
- L_tmp += (xn[i+2] * excf[i+2]);
- L_tmp1 += (excf[i+2] * excf[i+2]);
- L_tmp += (xn[i+3] * excf[i+3]);
- L_tmp1 += (excf[i+3] * excf[i+3]);
- }
-
- L_tmp = (L_tmp << 1) + 1;
- L_tmp1 = (L_tmp1 << 1) + 1;
-
- exp = norm_l(L_tmp);
- L_tmp = (L_tmp << exp);
- exp_corr = (30 - exp);
- corr = extract_h(L_tmp);
-
- exp = norm_l(L_tmp1);
- L_tmp = (L_tmp1 << exp);
- exp_norm = (30 - exp);
-
- Isqrt_n(&L_tmp, &exp_norm);
- norm = extract_h(L_tmp);
-
- /* Normalize correlation = correlation * (1/sqrt(energy)) */
-
- L_tmp = vo_L_mult(corr, norm);
-
- L_tmp2 = exp_corr + exp_norm + scale;
- if(L_tmp2 < 0)
- {
- L_tmp2 = -L_tmp2;
- L_tmp = L_tmp >> L_tmp2;
- }
- else
- {
- L_tmp = L_tmp << L_tmp2;
- }
-
- corr_norm[t] = vo_round(L_tmp);
- /* modify the filtered excitation excf[] for the next iteration */
-
- if(t != t_max)
- {
- k = -(t + 1);
- tmp = exc[k];
- for (i = 63; i > 0; i--)
- {
- excf[i] = add1(vo_mult(tmp, h[i]), excf[i - 1]);
- }
- excf[0] = vo_mult(tmp, h[0]);
- }
- }
- return;
+ /* Compute rounded down 1/sqrt(energy of xn[]) */
+ L_tmp = 0;
+ for (i = 0; i < 64; i+=4)
+ {
+ L_tmp += (xn[i] * xn[i]);
+ L_tmp += (xn[i+1] * xn[i+1]);
+ L_tmp += (xn[i+2] * xn[i+2]);
+ L_tmp += (xn[i+3] * xn[i+3]);
+ }
+
+ L_tmp = (L_tmp << 1) + 1;
+ exp = norm_l(L_tmp);
+ exp = (32 - exp);
+ //exp = exp + 2; /* energy of xn[] x 2 + rounded up */
+ scale = -(exp >> 1); /* (1<<scale) < 1/sqrt(energy rounded) */
+
+ /* loop for every possible period */
+
+ for (t = t_min; t <= t_max; t++)
+ {
+ /* Compute correlation between xn[] and excf[] */
+ L_tmp = 0;
+ L_tmp1 = 0;
+ for (i = 0; i < 64; i+=4)
+ {
+ L_tmp += (xn[i] * excf[i]);
+ L_tmp1 += (excf[i] * excf[i]);
+ L_tmp += (xn[i+1] * excf[i+1]);
+ L_tmp1 += (excf[i+1] * excf[i+1]);
+ L_tmp += (xn[i+2] * excf[i+2]);
+ L_tmp1 += (excf[i+2] * excf[i+2]);
+ L_tmp += (xn[i+3] * excf[i+3]);
+ L_tmp1 += (excf[i+3] * excf[i+3]);
+ }
+
+ L_tmp = (L_tmp << 1) + 1;
+ L_tmp1 = (L_tmp1 << 1) + 1;
+
+ exp = norm_l(L_tmp);
+ L_tmp = (L_tmp << exp);
+ exp_corr = (30 - exp);
+ corr = extract_h(L_tmp);
+
+ exp = norm_l(L_tmp1);
+ L_tmp = (L_tmp1 << exp);
+ exp_norm = (30 - exp);
+
+ Isqrt_n(&L_tmp, &exp_norm);
+ norm = extract_h(L_tmp);
+
+ /* Normalize correlation = correlation * (1/sqrt(energy)) */
+
+ L_tmp = vo_L_mult(corr, norm);
+
+ L_tmp2 = exp_corr + exp_norm + scale;
+ if(L_tmp2 < 0)
+ {
+ L_tmp2 = -L_tmp2;
+ L_tmp = L_tmp >> L_tmp2;
+ }
+ else
+ {
+ L_tmp = L_tmp << L_tmp2;
+ }
+
+ corr_norm[t] = vo_round(L_tmp);
+ /* modify the filtered excitation excf[] for the next iteration */
+
+ if(t != t_max)
+ {
+ k = -(t + 1);
+ tmp = exc[k];
+ for (i = 63; i > 0; i--)
+ {
+ excf[i] = add1(vo_mult(tmp, h[i]), excf[i - 1]);
+ }
+ excf[0] = vo_mult(tmp, h[0]);
+ }
+ }
+ return;
}
#endif
@@ -276,10 +276,10 @@ static void Norm_Corr(
/* 1/4 resolution interpolation filter (-3 dB at 0.791*fs/2) in Q14 */
static Word16 inter4_1[4][8] =
{
- {-12, 420, -1732, 5429, 13418, -1242, 73, 32},
- {-26, 455, -2142, 9910, 9910, -2142, 455, -26},
- {32, 73, -1242, 13418, 5429, -1732, 420, -12},
- {206, -766, 1376, 14746, 1376, -766, 206, 0}
+ {-12, 420, -1732, 5429, 13418, -1242, 73, 32},
+ {-26, 455, -2142, 9910, 9910, -2142, 455, -26},
+ {32, 73, -1242, 13418, 5429, -1732, 420, -12},
+ {206, -766, 1376, 14746, 1376, -766, 206, 0}
};
/*** Coefficients in floating point
@@ -292,34 +292,34 @@ static float inter4_1[UP_SAMP*L_INTERPOL1+1] = {
***/
static Word16 Interpol_4( /* (o) : interpolated value */
- Word16 * x, /* (i) : input vector */
- Word32 frac /* (i) : fraction (-4..+3) */
- )
+ Word16 * x, /* (i) : input vector */
+ Word32 frac /* (i) : fraction (-4..+3) */
+ )
{
- Word16 sum;
- Word32 k, L_sum;
- Word16 *ptr;
-
- if (frac < 0)
- {
- frac += UP_SAMP;
- x--;
- }
- x = x - L_INTERPOL1 + 1;
- k = UP_SAMP - 1 - frac;
- ptr = &(inter4_1[k][0]);
-
- L_sum = vo_mult32(x[0], (*ptr++));
- L_sum += vo_mult32(x[1], (*ptr++));
- L_sum += vo_mult32(x[2], (*ptr++));
- L_sum += vo_mult32(x[3], (*ptr++));
- L_sum += vo_mult32(x[4], (*ptr++));
- L_sum += vo_mult32(x[5], (*ptr++));
- L_sum += vo_mult32(x[6], (*ptr++));
- L_sum += vo_mult32(x[7], (*ptr++));
-
- sum = extract_h(L_add(L_shl2(L_sum, 2), 0x8000));
- return (sum);
+ Word16 sum;
+ Word32 k, L_sum;
+ Word16 *ptr;
+
+ if (frac < 0)
+ {
+ frac += UP_SAMP;
+ x--;
+ }
+ x = x - L_INTERPOL1 + 1;
+ k = UP_SAMP - 1 - frac;
+ ptr = &(inter4_1[k][0]);
+
+ L_sum = vo_mult32(x[0], (*ptr++));
+ L_sum += vo_mult32(x[1], (*ptr++));
+ L_sum += vo_mult32(x[2], (*ptr++));
+ L_sum += vo_mult32(x[3], (*ptr++));
+ L_sum += vo_mult32(x[4], (*ptr++));
+ L_sum += vo_mult32(x[5], (*ptr++));
+ L_sum += vo_mult32(x[6], (*ptr++));
+ L_sum += vo_mult32(x[7], (*ptr++));
+
+ sum = extract_h(L_add(L_shl2(L_sum, 2), 0x8000));
+ return (sum);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c b/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c
index 8404cf9..386cab3 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c
@@ -34,86 +34,86 @@
Word16 inter4_2[4][32] =
{
- {0,-2,4,-2,-10,38,-88,165,-275,424,-619,871,-1207,1699,-2598,5531,14031,-2147,780,-249,
- -16,153,-213,226,-209,175,-133,91,-55,28,-10,2},
+ {0,-2,4,-2,-10,38,-88,165,-275,424,-619,871,-1207,1699,-2598,5531,14031,-2147,780,-249,
+ -16,153,-213,226,-209,175,-133,91,-55,28,-10,2},
- {1,-7,19,-33,47,-52,43,-9,-60,175,-355,626,-1044,1749,-3267,10359,10359,-3267,1749,-1044,
- 626,-355,175,-60,-9,43,-52,47,-33,19, -7, 1},
+ {1,-7,19,-33,47,-52,43,-9,-60,175,-355,626,-1044,1749,-3267,10359,10359,-3267,1749,-1044,
+ 626,-355,175,-60,-9,43,-52,47,-33,19, -7, 1},
- {2,-10,28,-55,91,-133,175,-209,226,-213,153,-16,-249,780,-2147,14031,5531,-2598,1699,-1207,
- 871,-619,424,-275,165,-88,38,-10,-2,4,-2,0},
+ {2,-10,28,-55,91,-133,175,-209,226,-213,153,-16,-249,780,-2147,14031,5531,-2598,1699,-1207,
+ 871,-619,424,-275,165,-88,38,-10,-2,4,-2,0},
- {1,-7,22,-49,92,-153,231,-325,431,-544,656,-762,853,-923,968,15401,968,-923,853,-762,
- 656,-544,431,-325,231,-153,92,-49,22,-7, 1, 0}
+ {1,-7,22,-49,92,-153,231,-325,431,-544,656,-762,853,-923,968,15401,968,-923,853,-762,
+ 656,-544,431,-325,231,-153,92,-49,22,-7, 1, 0}
};
void Pred_lt4(
- Word16 exc[], /* in/out: excitation buffer */
- Word16 T0, /* input : integer pitch lag */
- Word16 frac, /* input : fraction of lag */
- Word16 L_subfr /* input : subframe size */
- )
+ Word16 exc[], /* in/out: excitation buffer */
+ Word16 T0, /* input : integer pitch lag */
+ Word16 frac, /* input : fraction of lag */
+ Word16 L_subfr /* input : subframe size */
+ )
{
- Word16 j, k, *x;
- Word32 L_sum;
- Word16 *ptr, *ptr1;
- Word16 *ptr2;
-
- x = exc - T0;
- frac = -frac;
- if (frac < 0)
- {
- frac += UP_SAMP;
- x--;
- }
- x -= 15; /* x = L_INTERPOL2 - 1 */
- k = 3 - frac; /* k = UP_SAMP - 1 - frac */
-
- ptr2 = &(inter4_2[k][0]);
- for (j = 0; j < L_subfr; j++)
- {
- ptr = ptr2;
- ptr1 = x;
- L_sum = vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
- L_sum += vo_mult32((*ptr1++), (*ptr++));
-
- L_sum = L_shl2(L_sum, 2);
- exc[j] = extract_h(L_add(L_sum, 0x8000));
- x++;
- }
-
- return;
+ Word16 j, k, *x;
+ Word32 L_sum;
+ Word16 *ptr, *ptr1;
+ Word16 *ptr2;
+
+ x = exc - T0;
+ frac = -frac;
+ if (frac < 0)
+ {
+ frac += UP_SAMP;
+ x--;
+ }
+ x -= 15; /* x = L_INTERPOL2 - 1 */
+ k = 3 - frac; /* k = UP_SAMP - 1 - frac */
+
+ ptr2 = &(inter4_2[k][0]);
+ for (j = 0; j < L_subfr; j++)
+ {
+ ptr = ptr2;
+ ptr1 = x;
+ L_sum = vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+ L_sum += vo_mult32((*ptr1++), (*ptr++));
+
+ L_sum = L_shl2(L_sum, 2);
+ exc[j] = extract_h(L_add(L_sum, 0x8000));
+ x++;
+ }
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/preemph.c b/media/libstagefright/codecs/amrwbenc/src/preemph.c
index c867bf7..70c8650 100644
--- a/media/libstagefright/codecs/amrwbenc/src/preemph.c
+++ b/media/libstagefright/codecs/amrwbenc/src/preemph.c
@@ -18,7 +18,7 @@
* File: preemph.c *
* *
* Description: Preemphasis: filtering through 1 - g z^-1 *
-* Preemph2 --> signal is multiplied by 2 *
+* Preemph2 --> signal is multiplied by 2 *
* *
************************************************************************/
@@ -26,62 +26,74 @@
#include "basic_op.h"
void Preemph(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : preemphasis coefficient */
- Word16 lg, /* (i) : lenght of filtering */
- Word16 * mem /* (i/o) : memory (x[-1]) */
- )
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : preemphasis coefficient */
+ Word16 lg, /* (i) : lenght of filtering */
+ Word16 * mem /* (i/o) : memory (x[-1]) */
+ )
{
- Word16 temp;
- Word32 i, L_tmp;
+ Word16 temp;
+ Word32 i, L_tmp;
- temp = x[lg - 1];
+ temp = x[lg - 1];
- for (i = lg - 1; i > 0; i--)
- {
- L_tmp = L_deposit_h(x[i]);
- L_tmp -= (x[i - 1] * mu)<<1;
- x[i] = (L_tmp + 0x8000)>>16;
- }
+ for (i = lg - 1; i > 0; i--)
+ {
+ L_tmp = L_deposit_h(x[i]);
+ L_tmp -= (x[i - 1] * mu)<<1;
+ x[i] = (L_tmp + 0x8000)>>16;
+ }
- L_tmp = L_deposit_h(x[0]);
- L_tmp -= ((*mem) * mu)<<1;
- x[0] = (L_tmp + 0x8000)>>16;
+ L_tmp = L_deposit_h(x[0]);
+ L_tmp -= ((*mem) * mu)<<1;
+ x[0] = (L_tmp + 0x8000)>>16;
- *mem = temp;
+ *mem = temp;
- return;
+ return;
}
void Preemph2(
- Word16 x[], /* (i/o) : input signal overwritten by the output */
- Word16 mu, /* (i) Q15 : preemphasis coefficient */
- Word16 lg, /* (i) : lenght of filtering */
- Word16 * mem /* (i/o) : memory (x[-1]) */
- )
+ Word16 x[], /* (i/o) : input signal overwritten by the output */
+ Word16 mu, /* (i) Q15 : preemphasis coefficient */
+ Word16 lg, /* (i) : lenght of filtering */
+ Word16 * mem /* (i/o) : memory (x[-1]) */
+ )
{
- Word16 temp;
- Word32 i, L_tmp;
-
- temp = x[lg - 1];
-
- for (i = (Word16) (lg - 1); i > 0; i--)
- {
- L_tmp = L_deposit_h(x[i]);
- L_tmp -= (x[i - 1] * mu)<<1;
- L_tmp = (L_tmp << 1);
- x[i] = (L_tmp + 0x8000)>>16;
- }
-
- L_tmp = L_deposit_h(x[0]);
- L_tmp -= ((*mem) * mu)<<1;
- L_tmp = (L_tmp << 1);
- x[0] = (L_tmp + 0x8000)>>16;
-
- *mem = temp;
-
- return;
+ Word16 temp;
+ Word32 i, L_tmp;
+
+ temp = x[lg - 1];
+
+ for (i = (Word16) (lg - 1); i > 0; i--)
+ {
+ L_tmp = L_deposit_h(x[i]);
+ L_tmp -= (x[i - 1] * mu)<<1; // only called with mu == 22282, so this won't overflow
+ if (L_tmp > INT32_MAX / 2) {
+ L_tmp = INT32_MAX / 2;
+ }
+ L_tmp = (L_tmp << 1);
+ if (L_tmp > INT32_MAX - 0x8000) {
+ L_tmp = INT32_MAX - 0x8000;
+ }
+ x[i] = (L_tmp + 0x8000)>>16;
+ }
+
+ L_tmp = L_deposit_h(x[0]);
+ L_tmp -= ((*mem) * mu)<<1;
+ if (L_tmp > INT32_MAX / 2) {
+ L_tmp = INT32_MAX / 2;
+ }
+ L_tmp = (L_tmp << 1);
+ if (L_tmp > INT32_MAX - 0x8000) {
+ L_tmp = INT32_MAX - 0x8000;
+ }
+ x[0] = (L_tmp + 0x8000)>>16;
+
+ *mem = temp;
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/q_gain2.c b/media/libstagefright/codecs/amrwbenc/src/q_gain2.c
index e8ca043..bb797d8 100644
--- a/media/libstagefright/codecs/amrwbenc/src/q_gain2.c
+++ b/media/libstagefright/codecs/amrwbenc/src/q_gain2.c
@@ -45,300 +45,300 @@ static Word16 pred[PRED_ORDER] = {4096, 3277, 2458, 1638};
void Init_Q_gain2(
- Word16 * mem /* output :static memory (2 words) */
- )
+ Word16 * mem /* output :static memory (2 words) */
+ )
{
- Word32 i;
+ Word32 i;
- /* 4nd order quantizer energy predictor (init to -14.0 in Q10) */
- for (i = 0; i < PRED_ORDER; i++)
- {
- mem[i] = -14336; /* past_qua_en[i] */
- }
+ /* 4nd order quantizer energy predictor (init to -14.0 in Q10) */
+ for (i = 0; i < PRED_ORDER; i++)
+ {
+ mem[i] = -14336; /* past_qua_en[i] */
+ }
- return;
+ return;
}
Word16 Q_gain2( /* Return index of quantization. */
- Word16 xn[], /* (i) Q_xn: Target vector. */
- Word16 y1[], /* (i) Q_xn: Adaptive codebook. */
- Word16 Q_xn, /* (i) : xn and y1 format */
- Word16 y2[], /* (i) Q9 : Filtered innovative vector. */
- Word16 code[], /* (i) Q9 : Innovative vector. */
- Word16 g_coeff[], /* (i) : Correlations <xn y1> <y1 y1> */
- /* Compute in G_pitch(). */
- Word16 L_subfr, /* (i) : Subframe lenght. */
- Word16 nbits, /* (i) : number of bits (6 or 7) */
- Word16 * gain_pit, /* (i/o)Q14: Pitch gain. */
- Word32 * gain_cod, /* (o) Q16 : Code gain. */
- Word16 gp_clip, /* (i) : Gp Clipping flag */
- Word16 * mem /* (i/o) : static memory (2 words) */
- )
+ Word16 xn[], /* (i) Q_xn: Target vector. */
+ Word16 y1[], /* (i) Q_xn: Adaptive codebook. */
+ Word16 Q_xn, /* (i) : xn and y1 format */
+ Word16 y2[], /* (i) Q9 : Filtered innovative vector. */
+ Word16 code[], /* (i) Q9 : Innovative vector. */
+ Word16 g_coeff[], /* (i) : Correlations <xn y1> <y1 y1> */
+ /* Compute in G_pitch(). */
+ Word16 L_subfr, /* (i) : Subframe lenght. */
+ Word16 nbits, /* (i) : number of bits (6 or 7) */
+ Word16 * gain_pit, /* (i/o)Q14: Pitch gain. */
+ Word32 * gain_cod, /* (o) Q16 : Code gain. */
+ Word16 gp_clip, /* (i) : Gp Clipping flag */
+ Word16 * mem /* (i/o) : static memory (2 words) */
+ )
{
- Word16 index, *p, min_ind, size;
- Word16 exp, frac, gcode0, exp_gcode0, e_max, exp_code, qua_ener;
- Word16 g_pitch, g2_pitch, g_code, g_pit_cod, g2_code, g2_code_lo;
- Word16 coeff[5], coeff_lo[5], exp_coeff[5];
- Word16 exp_max[5];
- Word32 i, j, L_tmp, dist_min;
- Word16 *past_qua_en, *t_qua_gain;
-
- past_qua_en = mem;
-
- /*-----------------------------------------------------------------*
- * - Find the initial quantization pitch index *
- * - Set gains search range *
- *-----------------------------------------------------------------*/
- if (nbits == 6)
- {
- t_qua_gain = t_qua_gain6b;
- min_ind = 0;
- size = RANGE;
-
- if(gp_clip == 1)
- {
- size = size - 16; /* limit gain pitch to 1.0 */
- }
- } else
- {
- t_qua_gain = t_qua_gain7b;
-
- p = t_qua_gain7b + RANGE; /* pt at 1/4th of table */
-
- j = nb_qua_gain7b - RANGE;
-
- if (gp_clip == 1)
- {
- j = j - 27; /* limit gain pitch to 1.0 */
- }
- min_ind = 0;
- g_pitch = *gain_pit;
-
- for (i = 0; i < j; i++, p += 2)
- {
- if (g_pitch > *p)
- {
- min_ind = min_ind + 1;
- }
- }
- size = RANGE;
- }
-
- /*------------------------------------------------------------------*
- * Compute coefficient need for the quantization. *
- * *
- * coeff[0] = y1 y1 *
- * coeff[1] = -2 xn y1 *
- * coeff[2] = y2 y2 *
- * coeff[3] = -2 xn y2 *
- * coeff[4] = 2 y1 y2 *
- * *
- * Product <y1 y1> and <xn y1> have been compute in G_pitch() and *
- * are in vector g_coeff[]. *
- *------------------------------------------------------------------*/
-
- coeff[0] = g_coeff[0];
- exp_coeff[0] = g_coeff[1];
- coeff[1] = negate(g_coeff[2]); /* coeff[1] = -2 xn y1 */
- exp_coeff[1] = g_coeff[3] + 1;
-
- /* Compute scalar product <y2[],y2[]> */
+ Word16 index, *p, min_ind, size;
+ Word16 exp, frac, gcode0, exp_gcode0, e_max, exp_code, qua_ener;
+ Word16 g_pitch, g2_pitch, g_code, g_pit_cod, g2_code, g2_code_lo;
+ Word16 coeff[5], coeff_lo[5], exp_coeff[5];
+ Word16 exp_max[5];
+ Word32 i, j, L_tmp, dist_min;
+ Word16 *past_qua_en, *t_qua_gain;
+
+ past_qua_en = mem;
+
+ /*-----------------------------------------------------------------*
+ * - Find the initial quantization pitch index *
+ * - Set gains search range *
+ *-----------------------------------------------------------------*/
+ if (nbits == 6)
+ {
+ t_qua_gain = t_qua_gain6b;
+ min_ind = 0;
+ size = RANGE;
+
+ if(gp_clip == 1)
+ {
+ size = size - 16; /* limit gain pitch to 1.0 */
+ }
+ } else
+ {
+ t_qua_gain = t_qua_gain7b;
+
+ p = t_qua_gain7b + RANGE; /* pt at 1/4th of table */
+
+ j = nb_qua_gain7b - RANGE;
+
+ if (gp_clip == 1)
+ {
+ j = j - 27; /* limit gain pitch to 1.0 */
+ }
+ min_ind = 0;
+ g_pitch = *gain_pit;
+
+ for (i = 0; i < j; i++, p += 2)
+ {
+ if (g_pitch > *p)
+ {
+ min_ind = min_ind + 1;
+ }
+ }
+ size = RANGE;
+ }
+
+ /*------------------------------------------------------------------*
+ * Compute coefficient need for the quantization. *
+ * *
+ * coeff[0] = y1 y1 *
+ * coeff[1] = -2 xn y1 *
+ * coeff[2] = y2 y2 *
+ * coeff[3] = -2 xn y2 *
+ * coeff[4] = 2 y1 y2 *
+ * *
+ * Product <y1 y1> and <xn y1> have been compute in G_pitch() and *
+ * are in vector g_coeff[]. *
+ *------------------------------------------------------------------*/
+
+ coeff[0] = g_coeff[0];
+ exp_coeff[0] = g_coeff[1];
+ coeff[1] = negate(g_coeff[2]); /* coeff[1] = -2 xn y1 */
+ exp_coeff[1] = g_coeff[3] + 1;
+
+ /* Compute scalar product <y2[],y2[]> */
#ifdef ASM_OPT /* asm optimization branch */
- coeff[2] = extract_h(Dot_product12_asm(y2, y2, L_subfr, &exp));
+ coeff[2] = extract_h(Dot_product12_asm(y2, y2, L_subfr, &exp));
#else
- coeff[2] = extract_h(Dot_product12(y2, y2, L_subfr, &exp));
+ coeff[2] = extract_h(Dot_product12(y2, y2, L_subfr, &exp));
#endif
- exp_coeff[2] = (exp - 18) + (Q_xn << 1); /* -18 (y2 Q9) */
+ exp_coeff[2] = (exp - 18) + (Q_xn << 1); /* -18 (y2 Q9) */
- /* Compute scalar product -2*<xn[],y2[]> */
+ /* Compute scalar product -2*<xn[],y2[]> */
#ifdef ASM_OPT /* asm optimization branch */
- coeff[3] = extract_h(L_negate(Dot_product12_asm(xn, y2, L_subfr, &exp)));
+ coeff[3] = extract_h(L_negate(Dot_product12_asm(xn, y2, L_subfr, &exp)));
#else
- coeff[3] = extract_h(L_negate(Dot_product12(xn, y2, L_subfr, &exp)));
+ coeff[3] = extract_h(L_negate(Dot_product12(xn, y2, L_subfr, &exp)));
#endif
- exp_coeff[3] = (exp - 8) + Q_xn; /* -9 (y2 Q9), +1 (2 xn y2) */
+ exp_coeff[3] = (exp - 8) + Q_xn; /* -9 (y2 Q9), +1 (2 xn y2) */
- /* Compute scalar product 2*<y1[],y2[]> */
+ /* Compute scalar product 2*<y1[],y2[]> */
#ifdef ASM_OPT /* asm optimization branch */
- coeff[4] = extract_h(Dot_product12_asm(y1, y2, L_subfr, &exp));
+ coeff[4] = extract_h(Dot_product12_asm(y1, y2, L_subfr, &exp));
#else
- coeff[4] = extract_h(Dot_product12(y1, y2, L_subfr, &exp));
+ coeff[4] = extract_h(Dot_product12(y1, y2, L_subfr, &exp));
#endif
- exp_coeff[4] = (exp - 8) + Q_xn; /* -9 (y2 Q9), +1 (2 y1 y2) */
-
- /*-----------------------------------------------------------------*
- * Find energy of code and compute: *
- * *
- * L_tmp = MEAN_ENER - 10log10(energy of code/ L_subfr) *
- * = MEAN_ENER - 3.0103*log2(energy of code/ L_subfr) *
- *-----------------------------------------------------------------*/
+ exp_coeff[4] = (exp - 8) + Q_xn; /* -9 (y2 Q9), +1 (2 y1 y2) */
+
+ /*-----------------------------------------------------------------*
+ * Find energy of code and compute: *
+ * *
+ * L_tmp = MEAN_ENER - 10log10(energy of code/ L_subfr) *
+ * = MEAN_ENER - 3.0103*log2(energy of code/ L_subfr) *
+ *-----------------------------------------------------------------*/
#ifdef ASM_OPT /* asm optimization branch */
- L_tmp = Dot_product12_asm(code, code, L_subfr, &exp_code);
+ L_tmp = Dot_product12_asm(code, code, L_subfr, &exp_code);
#else
- L_tmp = Dot_product12(code, code, L_subfr, &exp_code);
+ L_tmp = Dot_product12(code, code, L_subfr, &exp_code);
#endif
- /* exp_code: -18 (code in Q9), -6 (/L_subfr), -31 (L_tmp Q31->Q0) */
- exp_code = (exp_code - (18 + 6 + 31));
-
- Log2(L_tmp, &exp, &frac);
- exp += exp_code;
- L_tmp = Mpy_32_16(exp, frac, -24660); /* x -3.0103(Q13) -> Q14 */
-
- L_tmp += (MEAN_ENER * 8192)<<1; /* + MEAN_ENER in Q14 */
-
- /*-----------------------------------------------------------------*
- * Compute gcode0. *
- * = Sum(i=0,1) pred[i]*past_qua_en[i] + mean_ener - ener_code *
- *-----------------------------------------------------------------*/
- L_tmp = (L_tmp << 10); /* From Q14 to Q24 */
- L_tmp += (pred[0] * past_qua_en[0])<<1; /* Q13*Q10 -> Q24 */
- L_tmp += (pred[1] * past_qua_en[1])<<1; /* Q13*Q10 -> Q24 */
- L_tmp += (pred[2] * past_qua_en[2])<<1; /* Q13*Q10 -> Q24 */
- L_tmp += (pred[3] * past_qua_en[3])<<1; /* Q13*Q10 -> Q24 */
-
- gcode0 = extract_h(L_tmp); /* From Q24 to Q8 */
-
- /*-----------------------------------------------------------------*
- * gcode0 = pow(10.0, gcode0/20) *
- * = pow(2, 3.321928*gcode0/20) *
- * = pow(2, 0.166096*gcode0) *
- *-----------------------------------------------------------------*/
-
- L_tmp = vo_L_mult(gcode0, 5443); /* *0.166096 in Q15 -> Q24 */
- L_tmp = L_tmp >> 8; /* From Q24 to Q16 */
- VO_L_Extract(L_tmp, &exp_gcode0, &frac); /* Extract exponent of gcode0 */
-
- gcode0 = (Word16)(Pow2(14, frac)); /* Put 14 as exponent so that */
- /* output of Pow2() will be: */
- /* 16384 < Pow2() <= 32767 */
- exp_gcode0 -= 14;
-
- /*-------------------------------------------------------------------------*
- * Find the best quantizer *
- * ~~~~~~~~~~~~~~~~~~~~~~~ *
- * Before doing the computation we need to aling exponents of coeff[] *
- * to be sure to have the maximum precision. *
- * *
- * In the table the pitch gains are in Q14, the code gains are in Q11 and *
- * are multiply by gcode0 which have been multiply by 2^exp_gcode0. *
- * Also when we compute g_pitch*g_pitch, g_code*g_code and g_pitch*g_code *
- * we divide by 2^15. *
- * Considering all the scaling above we have: *
- * *
- * exp_code = exp_gcode0-11+15 = exp_gcode0+4 *
- * *
- * g_pitch*g_pitch = -14-14+15 *
- * g_pitch = -14 *
- * g_code*g_code = (2*exp_code)+15 *
- * g_code = exp_code *
- * g_pitch*g_code = -14 + exp_code +15 *
- * *
- * g_pitch*g_pitch * coeff[0] ;exp_max0 = exp_coeff[0] - 13 *
- * g_pitch * coeff[1] ;exp_max1 = exp_coeff[1] - 14 *
- * g_code*g_code * coeff[2] ;exp_max2 = exp_coeff[2] +15+(2*exp_code) *
- * g_code * coeff[3] ;exp_max3 = exp_coeff[3] + exp_code *
- * g_pitch*g_code * coeff[4] ;exp_max4 = exp_coeff[4] + 1 + exp_code *
- *-------------------------------------------------------------------------*/
-
- exp_code = (exp_gcode0 + 4);
- exp_max[0] = (exp_coeff[0] - 13);
- exp_max[1] = (exp_coeff[1] - 14);
- exp_max[2] = (exp_coeff[2] + (15 + (exp_code << 1)));
- exp_max[3] = (exp_coeff[3] + exp_code);
- exp_max[4] = (exp_coeff[4] + (1 + exp_code));
-
- /* Find maximum exponant */
-
- e_max = exp_max[0];
- for (i = 1; i < 5; i++)
- {
- if(exp_max[i] > e_max)
- {
- e_max = exp_max[i];
- }
- }
-
- /* align coeff[] and save in special 32 bit double precision */
-
- for (i = 0; i < 5; i++)
- {
- j = add1(vo_sub(e_max, exp_max[i]), 2);/* /4 to avoid overflow */
- L_tmp = L_deposit_h(coeff[i]);
- L_tmp = L_shr(L_tmp, j);
- VO_L_Extract(L_tmp, &coeff[i], &coeff_lo[i]);
- coeff_lo[i] = (coeff_lo[i] >> 3); /* lo >> 3 */
- }
-
- /* Codebook search */
- dist_min = MAX_32;
- p = &t_qua_gain[min_ind << 1];
-
- index = 0;
- for (i = 0; i < size; i++)
- {
- g_pitch = *p++;
- g_code = *p++;
-
- g_code = ((g_code * gcode0) + 0x4000)>>15;
- g2_pitch = ((g_pitch * g_pitch) + 0x4000)>>15;
- g_pit_cod = ((g_code * g_pitch) + 0x4000)>>15;
- L_tmp = (g_code * g_code)<<1;
- VO_L_Extract(L_tmp, &g2_code, &g2_code_lo);
-
- L_tmp = (coeff[2] * g2_code_lo)<<1;
- L_tmp = (L_tmp >> 3);
- L_tmp += (coeff_lo[0] * g2_pitch)<<1;
- L_tmp += (coeff_lo[1] * g_pitch)<<1;
- L_tmp += (coeff_lo[2] * g2_code)<<1;
- L_tmp += (coeff_lo[3] * g_code)<<1;
- L_tmp += (coeff_lo[4] * g_pit_cod)<<1;
- L_tmp = (L_tmp >> 12);
- L_tmp += (coeff[0] * g2_pitch)<<1;
- L_tmp += (coeff[1] * g_pitch)<<1;
- L_tmp += (coeff[2] * g2_code)<<1;
- L_tmp += (coeff[3] * g_code)<<1;
- L_tmp += (coeff[4] * g_pit_cod)<<1;
-
- if(L_tmp < dist_min)
- {
- dist_min = L_tmp;
- index = i;
- }
- }
-
- /* Read the quantized gains */
- index = index + min_ind;
- p = &t_qua_gain[(index + index)];
- *gain_pit = *p++; /* selected pitch gain in Q14 */
- g_code = *p++; /* selected code gain in Q11 */
-
- L_tmp = vo_L_mult(g_code, gcode0); /* Q11*Q0 -> Q12 */
- L_tmp = L_shl(L_tmp, (exp_gcode0 + 4)); /* Q12 -> Q16 */
-
- *gain_cod = L_tmp; /* gain of code in Q16 */
-
- /*---------------------------------------------------*
- * qua_ener = 20*log10(g_code) *
- * = 6.0206*log2(g_code) *
- * = 6.0206*(log2(g_codeQ11) - 11) *
- *---------------------------------------------------*/
-
- L_tmp = L_deposit_l(g_code);
- Log2(L_tmp, &exp, &frac);
- exp -= 11;
- L_tmp = Mpy_32_16(exp, frac, 24660); /* x 6.0206 in Q12 */
-
- qua_ener = (Word16)(L_tmp >> 3); /* result in Q10 */
-
- /* update table of past quantized energies */
-
- past_qua_en[3] = past_qua_en[2];
- past_qua_en[2] = past_qua_en[1];
- past_qua_en[1] = past_qua_en[0];
- past_qua_en[0] = qua_ener;
-
- return (index);
+ /* exp_code: -18 (code in Q9), -6 (/L_subfr), -31 (L_tmp Q31->Q0) */
+ exp_code = (exp_code - (18 + 6 + 31));
+
+ Log2(L_tmp, &exp, &frac);
+ exp += exp_code;
+ L_tmp = Mpy_32_16(exp, frac, -24660); /* x -3.0103(Q13) -> Q14 */
+
+ L_tmp += (MEAN_ENER * 8192)<<1; /* + MEAN_ENER in Q14 */
+
+ /*-----------------------------------------------------------------*
+ * Compute gcode0. *
+ * = Sum(i=0,1) pred[i]*past_qua_en[i] + mean_ener - ener_code *
+ *-----------------------------------------------------------------*/
+ L_tmp = (L_tmp << 10); /* From Q14 to Q24 */
+ L_tmp += (pred[0] * past_qua_en[0])<<1; /* Q13*Q10 -> Q24 */
+ L_tmp += (pred[1] * past_qua_en[1])<<1; /* Q13*Q10 -> Q24 */
+ L_tmp += (pred[2] * past_qua_en[2])<<1; /* Q13*Q10 -> Q24 */
+ L_tmp += (pred[3] * past_qua_en[3])<<1; /* Q13*Q10 -> Q24 */
+
+ gcode0 = extract_h(L_tmp); /* From Q24 to Q8 */
+
+ /*-----------------------------------------------------------------*
+ * gcode0 = pow(10.0, gcode0/20) *
+ * = pow(2, 3.321928*gcode0/20) *
+ * = pow(2, 0.166096*gcode0) *
+ *-----------------------------------------------------------------*/
+
+ L_tmp = vo_L_mult(gcode0, 5443); /* *0.166096 in Q15 -> Q24 */
+ L_tmp = L_tmp >> 8; /* From Q24 to Q16 */
+ VO_L_Extract(L_tmp, &exp_gcode0, &frac); /* Extract exponent of gcode0 */
+
+ gcode0 = (Word16)(Pow2(14, frac)); /* Put 14 as exponent so that */
+ /* output of Pow2() will be: */
+ /* 16384 < Pow2() <= 32767 */
+ exp_gcode0 -= 14;
+
+ /*-------------------------------------------------------------------------*
+ * Find the best quantizer *
+ * ~~~~~~~~~~~~~~~~~~~~~~~ *
+ * Before doing the computation we need to aling exponents of coeff[] *
+ * to be sure to have the maximum precision. *
+ * *
+ * In the table the pitch gains are in Q14, the code gains are in Q11 and *
+ * are multiply by gcode0 which have been multiply by 2^exp_gcode0. *
+ * Also when we compute g_pitch*g_pitch, g_code*g_code and g_pitch*g_code *
+ * we divide by 2^15. *
+ * Considering all the scaling above we have: *
+ * *
+ * exp_code = exp_gcode0-11+15 = exp_gcode0+4 *
+ * *
+ * g_pitch*g_pitch = -14-14+15 *
+ * g_pitch = -14 *
+ * g_code*g_code = (2*exp_code)+15 *
+ * g_code = exp_code *
+ * g_pitch*g_code = -14 + exp_code +15 *
+ * *
+ * g_pitch*g_pitch * coeff[0] ;exp_max0 = exp_coeff[0] - 13 *
+ * g_pitch * coeff[1] ;exp_max1 = exp_coeff[1] - 14 *
+ * g_code*g_code * coeff[2] ;exp_max2 = exp_coeff[2] +15+(2*exp_code) *
+ * g_code * coeff[3] ;exp_max3 = exp_coeff[3] + exp_code *
+ * g_pitch*g_code * coeff[4] ;exp_max4 = exp_coeff[4] + 1 + exp_code *
+ *-------------------------------------------------------------------------*/
+
+ exp_code = (exp_gcode0 + 4);
+ exp_max[0] = (exp_coeff[0] - 13);
+ exp_max[1] = (exp_coeff[1] - 14);
+ exp_max[2] = (exp_coeff[2] + (15 + (exp_code << 1)));
+ exp_max[3] = (exp_coeff[3] + exp_code);
+ exp_max[4] = (exp_coeff[4] + (1 + exp_code));
+
+ /* Find maximum exponant */
+
+ e_max = exp_max[0];
+ for (i = 1; i < 5; i++)
+ {
+ if(exp_max[i] > e_max)
+ {
+ e_max = exp_max[i];
+ }
+ }
+
+ /* align coeff[] and save in special 32 bit double precision */
+
+ for (i = 0; i < 5; i++)
+ {
+ j = add1(vo_sub(e_max, exp_max[i]), 2);/* /4 to avoid overflow */
+ L_tmp = L_deposit_h(coeff[i]);
+ L_tmp = L_shr(L_tmp, j);
+ VO_L_Extract(L_tmp, &coeff[i], &coeff_lo[i]);
+ coeff_lo[i] = (coeff_lo[i] >> 3); /* lo >> 3 */
+ }
+
+ /* Codebook search */
+ dist_min = MAX_32;
+ p = &t_qua_gain[min_ind << 1];
+
+ index = 0;
+ for (i = 0; i < size; i++)
+ {
+ g_pitch = *p++;
+ g_code = *p++;
+
+ g_code = ((g_code * gcode0) + 0x4000)>>15;
+ g2_pitch = ((g_pitch * g_pitch) + 0x4000)>>15;
+ g_pit_cod = ((g_code * g_pitch) + 0x4000)>>15;
+ L_tmp = (g_code * g_code)<<1;
+ VO_L_Extract(L_tmp, &g2_code, &g2_code_lo);
+
+ L_tmp = (coeff[2] * g2_code_lo)<<1;
+ L_tmp = (L_tmp >> 3);
+ L_tmp += (coeff_lo[0] * g2_pitch)<<1;
+ L_tmp += (coeff_lo[1] * g_pitch)<<1;
+ L_tmp += (coeff_lo[2] * g2_code)<<1;
+ L_tmp += (coeff_lo[3] * g_code)<<1;
+ L_tmp += (coeff_lo[4] * g_pit_cod)<<1;
+ L_tmp = (L_tmp >> 12);
+ L_tmp += (coeff[0] * g2_pitch)<<1;
+ L_tmp += (coeff[1] * g_pitch)<<1;
+ L_tmp += (coeff[2] * g2_code)<<1;
+ L_tmp += (coeff[3] * g_code)<<1;
+ L_tmp += (coeff[4] * g_pit_cod)<<1;
+
+ if(L_tmp < dist_min)
+ {
+ dist_min = L_tmp;
+ index = i;
+ }
+ }
+
+ /* Read the quantized gains */
+ index = index + min_ind;
+ p = &t_qua_gain[(index + index)];
+ *gain_pit = *p++; /* selected pitch gain in Q14 */
+ g_code = *p++; /* selected code gain in Q11 */
+
+ L_tmp = vo_L_mult(g_code, gcode0); /* Q11*Q0 -> Q12 */
+ L_tmp = L_shl(L_tmp, (exp_gcode0 + 4)); /* Q12 -> Q16 */
+
+ *gain_cod = L_tmp; /* gain of code in Q16 */
+
+ /*---------------------------------------------------*
+ * qua_ener = 20*log10(g_code) *
+ * = 6.0206*log2(g_code) *
+ * = 6.0206*(log2(g_codeQ11) - 11) *
+ *---------------------------------------------------*/
+
+ L_tmp = L_deposit_l(g_code);
+ Log2(L_tmp, &exp, &frac);
+ exp -= 11;
+ L_tmp = Mpy_32_16(exp, frac, 24660); /* x 6.0206 in Q12 */
+
+ qua_ener = (Word16)(L_tmp >> 3); /* result in Q10 */
+
+ /* update table of past quantized energies */
+
+ past_qua_en[3] = past_qua_en[2];
+ past_qua_en[2] = past_qua_en[1];
+ past_qua_en[1] = past_qua_en[0];
+ past_qua_en[0] = qua_ener;
+
+ return (index);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/q_pulse.c b/media/libstagefright/codecs/amrwbenc/src/q_pulse.c
index d658602..fe0bdda 100644
--- a/media/libstagefright/codecs/amrwbenc/src/q_pulse.c
+++ b/media/libstagefright/codecs/amrwbenc/src/q_pulse.c
@@ -29,372 +29,372 @@
#define NB_POS 16 /* pos in track, mask for sign bit */
Word32 quant_1p_N1( /* (o) return N+1 bits */
- Word16 pos, /* (i) position of the pulse */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos, /* (i) position of the pulse */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 mask;
- Word32 index;
-
- mask = (1 << N) - 1; /* mask = ((1<<N)-1); */
- /*-------------------------------------------------------*
- * Quantization of 1 pulse with N+1 bits: *
- *-------------------------------------------------------*/
- index = L_deposit_l((Word16) (pos & mask));
- if ((pos & NB_POS) != 0)
- {
- index = vo_L_add(index, L_deposit_l(1 << N)); /* index += 1 << N; */
- }
- return (index);
+ Word16 mask;
+ Word32 index;
+
+ mask = (1 << N) - 1; /* mask = ((1<<N)-1); */
+ /*-------------------------------------------------------*
+ * Quantization of 1 pulse with N+1 bits: *
+ *-------------------------------------------------------*/
+ index = L_deposit_l((Word16) (pos & mask));
+ if ((pos & NB_POS) != 0)
+ {
+ index = vo_L_add(index, L_deposit_l(1 << N)); /* index += 1 << N; */
+ }
+ return (index);
}
Word32 quant_2p_2N1( /* (o) return (2*N)+1 bits */
- Word16 pos1, /* (i) position of the pulse 1 */
- Word16 pos2, /* (i) position of the pulse 2 */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos1, /* (i) position of the pulse 1 */
+ Word16 pos2, /* (i) position of the pulse 2 */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 mask, tmp;
- Word32 index;
- mask = (1 << N) - 1; /* mask = ((1<<N)-1); */
- /*-------------------------------------------------------*
- * Quantization of 2 pulses with 2*N+1 bits: *
- *-------------------------------------------------------*/
- if (((pos2 ^ pos1) & NB_POS) == 0)
- {
- /* sign of 1st pulse == sign of 2th pulse */
- if(pos1 <= pos2) /* ((pos1 - pos2) <= 0) */
- {
- /* index = ((pos1 & mask) << N) + (pos2 & mask); */
- index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
- } else
- {
- /* ((pos2 & mask) << N) + (pos1 & mask); */
- index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
- }
- if ((pos1 & NB_POS) != 0)
- {
- tmp = (N << 1);
- index = vo_L_add(index, (1L << tmp)); /* index += 1 << (2*N); */
- }
- } else
- {
- /* sign of 1st pulse != sign of 2th pulse */
- if (vo_sub((Word16) (pos1 & mask), (Word16) (pos2 & mask)) <= 0)
- {
- /* index = ((pos2 & mask) << N) + (pos1 & mask); */
- index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
- if ((pos2 & NB_POS) != 0)
- {
- tmp = (N << 1); /* index += 1 << (2*N); */
- index = vo_L_add(index, (1L << tmp));
- }
- } else
- {
- /* index = ((pos1 & mask) << N) + (pos2 & mask); */
- index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
- if ((pos1 & NB_POS) != 0)
- {
- tmp = (N << 1);
- index = vo_L_add(index, (1 << tmp)); /* index += 1 << (2*N); */
- }
- }
- }
- return (index);
+ Word16 mask, tmp;
+ Word32 index;
+ mask = (1 << N) - 1; /* mask = ((1<<N)-1); */
+ /*-------------------------------------------------------*
+ * Quantization of 2 pulses with 2*N+1 bits: *
+ *-------------------------------------------------------*/
+ if (((pos2 ^ pos1) & NB_POS) == 0)
+ {
+ /* sign of 1st pulse == sign of 2th pulse */
+ if(pos1 <= pos2) /* ((pos1 - pos2) <= 0) */
+ {
+ /* index = ((pos1 & mask) << N) + (pos2 & mask); */
+ index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
+ } else
+ {
+ /* ((pos2 & mask) << N) + (pos1 & mask); */
+ index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
+ }
+ if ((pos1 & NB_POS) != 0)
+ {
+ tmp = (N << 1);
+ index = vo_L_add(index, (1L << tmp)); /* index += 1 << (2*N); */
+ }
+ } else
+ {
+ /* sign of 1st pulse != sign of 2th pulse */
+ if (vo_sub((Word16) (pos1 & mask), (Word16) (pos2 & mask)) <= 0)
+ {
+ /* index = ((pos2 & mask) << N) + (pos1 & mask); */
+ index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
+ if ((pos2 & NB_POS) != 0)
+ {
+ tmp = (N << 1); /* index += 1 << (2*N); */
+ index = vo_L_add(index, (1L << tmp));
+ }
+ } else
+ {
+ /* index = ((pos1 & mask) << N) + (pos2 & mask); */
+ index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
+ if ((pos1 & NB_POS) != 0)
+ {
+ tmp = (N << 1);
+ index = vo_L_add(index, (1 << tmp)); /* index += 1 << (2*N); */
+ }
+ }
+ }
+ return (index);
}
Word32 quant_3p_3N1( /* (o) return (3*N)+1 bits */
- Word16 pos1, /* (i) position of the pulse 1 */
- Word16 pos2, /* (i) position of the pulse 2 */
- Word16 pos3, /* (i) position of the pulse 3 */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos1, /* (i) position of the pulse 1 */
+ Word16 pos2, /* (i) position of the pulse 2 */
+ Word16 pos3, /* (i) position of the pulse 3 */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 nb_pos;
- Word32 index;
-
- nb_pos =(1 <<(N - 1)); /* nb_pos = (1<<(N-1)); */
- /*-------------------------------------------------------*
- * Quantization of 3 pulses with 3*N+1 bits: *
- *-------------------------------------------------------*/
- if (((pos1 ^ pos2) & nb_pos) == 0)
- {
- index = quant_2p_2N1(pos1, pos2, sub(N, 1)); /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
- /* index += (pos1 & nb_pos) << N; */
- index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
- /* index += quant_1p_N1(pos3, N) << (2*N); */
- index = vo_L_add(index, (quant_1p_N1(pos3, N)<<(N << 1)));
-
- } else if (((pos1 ^ pos3) & nb_pos) == 0)
- {
- index = quant_2p_2N1(pos1, pos3, sub(N, 1)); /* index = quant_2p_2N1(pos1, pos3, (N-1)); */
- index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
- /* index += (pos1 & nb_pos) << N; */
- index = vo_L_add(index, (quant_1p_N1(pos2, N) << (N << 1)));
- /* index += quant_1p_N1(pos2, N) <<
- * (2*N); */
- } else
- {
- index = quant_2p_2N1(pos2, pos3, (N - 1)); /* index = quant_2p_2N1(pos2, pos3, (N-1)); */
- /* index += (pos2 & nb_pos) << N; */
- index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
- /* index += quant_1p_N1(pos1, N) << (2*N); */
- index = vo_L_add(index, (quant_1p_N1(pos1, N) << (N << 1)));
- }
- return (index);
+ Word16 nb_pos;
+ Word32 index;
+
+ nb_pos =(1 <<(N - 1)); /* nb_pos = (1<<(N-1)); */
+ /*-------------------------------------------------------*
+ * Quantization of 3 pulses with 3*N+1 bits: *
+ *-------------------------------------------------------*/
+ if (((pos1 ^ pos2) & nb_pos) == 0)
+ {
+ index = quant_2p_2N1(pos1, pos2, sub(N, 1)); /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
+ /* index += (pos1 & nb_pos) << N; */
+ index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+ /* index += quant_1p_N1(pos3, N) << (2*N); */
+ index = vo_L_add(index, (quant_1p_N1(pos3, N)<<(N << 1)));
+
+ } else if (((pos1 ^ pos3) & nb_pos) == 0)
+ {
+ index = quant_2p_2N1(pos1, pos3, sub(N, 1)); /* index = quant_2p_2N1(pos1, pos3, (N-1)); */
+ index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+ /* index += (pos1 & nb_pos) << N; */
+ index = vo_L_add(index, (quant_1p_N1(pos2, N) << (N << 1)));
+ /* index += quant_1p_N1(pos2, N) <<
+ * (2*N); */
+ } else
+ {
+ index = quant_2p_2N1(pos2, pos3, (N - 1)); /* index = quant_2p_2N1(pos2, pos3, (N-1)); */
+ /* index += (pos2 & nb_pos) << N; */
+ index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
+ /* index += quant_1p_N1(pos1, N) << (2*N); */
+ index = vo_L_add(index, (quant_1p_N1(pos1, N) << (N << 1)));
+ }
+ return (index);
}
Word32 quant_4p_4N1( /* (o) return (4*N)+1 bits */
- Word16 pos1, /* (i) position of the pulse 1 */
- Word16 pos2, /* (i) position of the pulse 2 */
- Word16 pos3, /* (i) position of the pulse 3 */
- Word16 pos4, /* (i) position of the pulse 4 */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos1, /* (i) position of the pulse 1 */
+ Word16 pos2, /* (i) position of the pulse 2 */
+ Word16 pos3, /* (i) position of the pulse 3 */
+ Word16 pos4, /* (i) position of the pulse 4 */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 nb_pos;
- Word32 index;
-
- nb_pos = 1 << (N - 1); /* nb_pos = (1<<(N-1)); */
- /*-------------------------------------------------------*
- * Quantization of 4 pulses with 4*N+1 bits: *
- *-------------------------------------------------------*/
- if (((pos1 ^ pos2) & nb_pos) == 0)
- {
- index = quant_2p_2N1(pos1, pos2, sub(N, 1)); /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
- /* index += (pos1 & nb_pos) << N; */
- index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
- /* index += quant_2p_2N1(pos3, pos4, N) << (2*N); */
- index = vo_L_add(index, (quant_2p_2N1(pos3, pos4, N) << (N << 1)));
- } else if (((pos1 ^ pos3) & nb_pos) == 0)
- {
- index = quant_2p_2N1(pos1, pos3, (N - 1));
- /* index += (pos1 & nb_pos) << N; */
- index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
- /* index += quant_2p_2N1(pos2, pos4, N) << (2*N); */
- index = vo_L_add(index, (quant_2p_2N1(pos2, pos4, N) << (N << 1)));
- } else
- {
- index = quant_2p_2N1(pos2, pos3, (N - 1));
- /* index += (pos2 & nb_pos) << N; */
- index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
- /* index += quant_2p_2N1(pos1, pos4, N) << (2*N); */
- index = vo_L_add(index, (quant_2p_2N1(pos1, pos4, N) << (N << 1)));
- }
- return (index);
+ Word16 nb_pos;
+ Word32 index;
+
+ nb_pos = 1 << (N - 1); /* nb_pos = (1<<(N-1)); */
+ /*-------------------------------------------------------*
+ * Quantization of 4 pulses with 4*N+1 bits: *
+ *-------------------------------------------------------*/
+ if (((pos1 ^ pos2) & nb_pos) == 0)
+ {
+ index = quant_2p_2N1(pos1, pos2, sub(N, 1)); /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
+ /* index += (pos1 & nb_pos) << N; */
+ index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+ /* index += quant_2p_2N1(pos3, pos4, N) << (2*N); */
+ index = vo_L_add(index, (quant_2p_2N1(pos3, pos4, N) << (N << 1)));
+ } else if (((pos1 ^ pos3) & nb_pos) == 0)
+ {
+ index = quant_2p_2N1(pos1, pos3, (N - 1));
+ /* index += (pos1 & nb_pos) << N; */
+ index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+ /* index += quant_2p_2N1(pos2, pos4, N) << (2*N); */
+ index = vo_L_add(index, (quant_2p_2N1(pos2, pos4, N) << (N << 1)));
+ } else
+ {
+ index = quant_2p_2N1(pos2, pos3, (N - 1));
+ /* index += (pos2 & nb_pos) << N; */
+ index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
+ /* index += quant_2p_2N1(pos1, pos4, N) << (2*N); */
+ index = vo_L_add(index, (quant_2p_2N1(pos1, pos4, N) << (N << 1)));
+ }
+ return (index);
}
Word32 quant_4p_4N( /* (o) return 4*N bits */
- Word16 pos[], /* (i) position of the pulse 1..4 */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos[], /* (i) position of the pulse 1..4 */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 nb_pos, mask __unused, n_1, tmp;
- Word16 posA[4], posB[4];
- Word32 i, j, k, index;
-
- n_1 = (Word16) (N - 1);
- nb_pos = (1 << n_1); /* nb_pos = (1<<n_1); */
- mask = vo_sub((1 << N), 1); /* mask = ((1<<N)-1); */
-
- i = 0;
- j = 0;
- for (k = 0; k < 4; k++)
- {
- if ((pos[k] & nb_pos) == 0)
- {
- posA[i++] = pos[k];
- } else
- {
- posB[j++] = pos[k];
- }
- }
-
- switch (i)
- {
- case 0:
- tmp = vo_sub((N << 2), 3); /* index = 1 << ((4*N)-3); */
- index = (1L << tmp);
- /* index += quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1); */
- index = vo_L_add(index, quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1));
- break;
- case 1:
- /* index = quant_1p_N1(posA[0], n_1) << ((3*n_1)+1); */
- tmp = add1((Word16)((vo_L_mult(3, n_1) >> 1)), 1);
- index = L_shl(quant_1p_N1(posA[0], n_1), tmp);
- /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
- index = vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
- break;
- case 2:
- tmp = ((n_1 << 1) + 1); /* index = quant_2p_2N1(posA[0], posA[1], n_1) << ((2*n_1)+1); */
- index = L_shl(quant_2p_2N1(posA[0], posA[1], n_1), tmp);
- /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
- index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1));
- break;
- case 3:
- /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << N; */
- index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), N);
- index = vo_L_add(index, quant_1p_N1(posB[0], n_1)); /* index += quant_1p_N1(posB[0], n_1); */
- break;
- case 4:
- index = quant_4p_4N1(posA[0], posA[1], posA[2], posA[3], n_1);
- break;
- default:
- index = 0;
- fprintf(stderr, "Error in function quant_4p_4N\n");
- }
- tmp = ((N << 2) - 2); /* index += (i & 3) << ((4*N)-2); */
- index = vo_L_add(index, L_shl((L_deposit_l(i) & (3L)), tmp));
-
- return (index);
+ Word16 nb_pos, mask __unused, n_1, tmp;
+ Word16 posA[4], posB[4];
+ Word32 i, j, k, index;
+
+ n_1 = (Word16) (N - 1);
+ nb_pos = (1 << n_1); /* nb_pos = (1<<n_1); */
+ mask = vo_sub((1 << N), 1); /* mask = ((1<<N)-1); */
+
+ i = 0;
+ j = 0;
+ for (k = 0; k < 4; k++)
+ {
+ if ((pos[k] & nb_pos) == 0)
+ {
+ posA[i++] = pos[k];
+ } else
+ {
+ posB[j++] = pos[k];
+ }
+ }
+
+ switch (i)
+ {
+ case 0:
+ tmp = vo_sub((N << 2), 3); /* index = 1 << ((4*N)-3); */
+ index = (1L << tmp);
+ /* index += quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1); */
+ index = vo_L_add(index, quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1));
+ break;
+ case 1:
+ /* index = quant_1p_N1(posA[0], n_1) << ((3*n_1)+1); */
+ tmp = add1((Word16)((vo_L_mult(3, n_1) >> 1)), 1);
+ index = L_shl(quant_1p_N1(posA[0], n_1), tmp);
+ /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
+ index = vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
+ break;
+ case 2:
+ tmp = ((n_1 << 1) + 1); /* index = quant_2p_2N1(posA[0], posA[1], n_1) << ((2*n_1)+1); */
+ index = L_shl(quant_2p_2N1(posA[0], posA[1], n_1), tmp);
+ /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
+ index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1));
+ break;
+ case 3:
+ /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << N; */
+ index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), N);
+ index = vo_L_add(index, quant_1p_N1(posB[0], n_1)); /* index += quant_1p_N1(posB[0], n_1); */
+ break;
+ case 4:
+ index = quant_4p_4N1(posA[0], posA[1], posA[2], posA[3], n_1);
+ break;
+ default:
+ index = 0;
+ fprintf(stderr, "Error in function quant_4p_4N\n");
+ }
+ tmp = ((N << 2) - 2); /* index += (i & 3) << ((4*N)-2); */
+ index = vo_L_add(index, L_shl((L_deposit_l(i) & (3L)), tmp));
+
+ return (index);
}
Word32 quant_5p_5N( /* (o) return 5*N bits */
- Word16 pos[], /* (i) position of the pulse 1..5 */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos[], /* (i) position of the pulse 1..5 */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 nb_pos, n_1, tmp;
- Word16 posA[5], posB[5];
- Word32 i, j, k, index, tmp2;
-
- n_1 = (Word16) (N - 1);
- nb_pos = (1 << n_1); /* nb_pos = (1<<n_1); */
-
- i = 0;
- j = 0;
- for (k = 0; k < 5; k++)
- {
- if ((pos[k] & nb_pos) == 0)
- {
- posA[i++] = pos[k];
- } else
- {
- posB[j++] = pos[k];
- }
- }
-
- switch (i)
- {
- case 0:
- tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1); /* ((5*N)-1)) */
- index = L_shl(1L, tmp); /* index = 1 << ((5*N)-1); */
- tmp = add1((N << 1), 1); /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1);*/
- tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
- index = vo_L_add(index, tmp2);
- index = vo_L_add(index, quant_2p_2N1(posB[3], posB[4], N)); /* index += quant_2p_2N1(posB[3], posB[4], N); */
- break;
- case 1:
- tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1); /* index = 1 << ((5*N)-1); */
- index = L_shl(1L, tmp);
- tmp = add1((N << 1), 1); /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) <<((2*N)+1); */
- tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
- index = vo_L_add(index, tmp2);
- index = vo_L_add(index, quant_2p_2N1(posB[3], posA[0], N)); /* index += quant_2p_2N1(posB[3], posA[0], N); */
- break;
- case 2:
- tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1); /* ((5*N)-1)) */
- index = L_shl(1L, tmp); /* index = 1 << ((5*N)-1); */
- tmp = add1((N << 1), 1); /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1); */
- tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
- index = vo_L_add(index, tmp2);
- index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], N)); /* index += quant_2p_2N1(posA[0], posA[1], N); */
- break;
- case 3:
- tmp = add1((N << 1), 1); /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1); */
- index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
- index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], N)); /* index += quant_2p_2N1(posB[0], posB[1], N); */
- break;
- case 4:
- tmp = add1((N << 1), 1); /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1); */
- index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
- index = vo_L_add(index, quant_2p_2N1(posA[3], posB[0], N)); /* index += quant_2p_2N1(posA[3], posB[0], N); */
- break;
- case 5:
- tmp = add1((N << 1), 1); /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1); */
- index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
- index = vo_L_add(index, quant_2p_2N1(posA[3], posA[4], N)); /* index += quant_2p_2N1(posA[3], posA[4], N); */
- break;
- default:
- index = 0;
- fprintf(stderr, "Error in function quant_5p_5N\n");
- }
-
- return (index);
+ Word16 nb_pos, n_1, tmp;
+ Word16 posA[5], posB[5];
+ Word32 i, j, k, index, tmp2;
+
+ n_1 = (Word16) (N - 1);
+ nb_pos = (1 << n_1); /* nb_pos = (1<<n_1); */
+
+ i = 0;
+ j = 0;
+ for (k = 0; k < 5; k++)
+ {
+ if ((pos[k] & nb_pos) == 0)
+ {
+ posA[i++] = pos[k];
+ } else
+ {
+ posB[j++] = pos[k];
+ }
+ }
+
+ switch (i)
+ {
+ case 0:
+ tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1); /* ((5*N)-1)) */
+ index = L_shl(1L, tmp); /* index = 1 << ((5*N)-1); */
+ tmp = add1((N << 1), 1); /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1);*/
+ tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
+ index = vo_L_add(index, tmp2);
+ index = vo_L_add(index, quant_2p_2N1(posB[3], posB[4], N)); /* index += quant_2p_2N1(posB[3], posB[4], N); */
+ break;
+ case 1:
+ tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1); /* index = 1 << ((5*N)-1); */
+ index = L_shl(1L, tmp);
+ tmp = add1((N << 1), 1); /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) <<((2*N)+1); */
+ tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
+ index = vo_L_add(index, tmp2);
+ index = vo_L_add(index, quant_2p_2N1(posB[3], posA[0], N)); /* index += quant_2p_2N1(posB[3], posA[0], N); */
+ break;
+ case 2:
+ tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1); /* ((5*N)-1)) */
+ index = L_shl(1L, tmp); /* index = 1 << ((5*N)-1); */
+ tmp = add1((N << 1), 1); /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1); */
+ tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
+ index = vo_L_add(index, tmp2);
+ index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], N)); /* index += quant_2p_2N1(posA[0], posA[1], N); */
+ break;
+ case 3:
+ tmp = add1((N << 1), 1); /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1); */
+ index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
+ index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], N)); /* index += quant_2p_2N1(posB[0], posB[1], N); */
+ break;
+ case 4:
+ tmp = add1((N << 1), 1); /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1); */
+ index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
+ index = vo_L_add(index, quant_2p_2N1(posA[3], posB[0], N)); /* index += quant_2p_2N1(posA[3], posB[0], N); */
+ break;
+ case 5:
+ tmp = add1((N << 1), 1); /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1); */
+ index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
+ index = vo_L_add(index, quant_2p_2N1(posA[3], posA[4], N)); /* index += quant_2p_2N1(posA[3], posA[4], N); */
+ break;
+ default:
+ index = 0;
+ fprintf(stderr, "Error in function quant_5p_5N\n");
+ }
+
+ return (index);
}
Word32 quant_6p_6N_2( /* (o) return (6*N)-2 bits */
- Word16 pos[], /* (i) position of the pulse 1..6 */
- Word16 N) /* (i) number of bits for position */
+ Word16 pos[], /* (i) position of the pulse 1..6 */
+ Word16 N) /* (i) number of bits for position */
{
- Word16 nb_pos, n_1;
- Word16 posA[6], posB[6];
- Word32 i, j, k, index;
-
- /* !! N and n_1 are constants -> it doesn't need to be operated by Basic Operators */
- n_1 = (Word16) (N - 1);
- nb_pos = (1 << n_1); /* nb_pos = (1<<n_1); */
-
- i = 0;
- j = 0;
- for (k = 0; k < 6; k++)
- {
- if ((pos[k] & nb_pos) == 0)
- {
- posA[i++] = pos[k];
- } else
- {
- posB[j++] = pos[k];
- }
- }
-
- switch (i)
- {
- case 0:
- index = (1 << (Word16) (6 * N - 5)); /* index = 1 << ((6*N)-5); */
- index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
- index = vo_L_add(index, quant_1p_N1(posB[5], n_1)); /* index += quant_1p_N1(posB[5], n_1); */
- break;
- case 1:
- index = (1L << (Word16) (6 * N - 5)); /* index = 1 << ((6*N)-5); */
- index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
- index = vo_L_add(index, quant_1p_N1(posA[0], n_1)); /* index += quant_1p_N1(posA[0], n_1); */
- break;
- case 2:
- index = (1L << (Word16) (6 * N - 5)); /* index = 1 << ((6*N)-5); */
- /* index += quant_4p_4N(posB, n_1) << ((2*n_1)+1); */
- index = vo_L_add(index, (quant_4p_4N(posB, n_1) << (Word16) (2 * n_1 + 1)));
- index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], n_1)); /* index += quant_2p_2N1(posA[0], posA[1], n_1); */
- break;
- case 3:
- index = (quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << (Word16) (3 * n_1 + 1));
- /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((3*n_1)+1); */
- index =vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
- /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
- break;
- case 4:
- i = 2;
- index = (quant_4p_4N(posA, n_1) << (Word16) (2 * n_1 + 1)); /* index = quant_4p_4N(posA, n_1) << ((2*n_1)+1); */
- index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1)); /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
- break;
- case 5:
- i = 1;
- index = (quant_5p_5N(posA, n_1) << N); /* index = quant_5p_5N(posA, n_1) << N; */
- index = vo_L_add(index, quant_1p_N1(posB[0], n_1)); /* index += quant_1p_N1(posB[0], n_1); */
- break;
- case 6:
- i = 0;
- index = (quant_5p_5N(posA, n_1) << N); /* index = quant_5p_5N(posA, n_1) << N; */
- index = vo_L_add(index, quant_1p_N1(posA[5], n_1)); /* index += quant_1p_N1(posA[5], n_1); */
- break;
- default:
- index = 0;
- fprintf(stderr, "Error in function quant_6p_6N_2\n");
- }
- index = vo_L_add(index, ((L_deposit_l(i) & 3L) << (Word16) (6 * N - 4))); /* index += (i & 3) << ((6*N)-4); */
-
- return (index);
+ Word16 nb_pos, n_1;
+ Word16 posA[6], posB[6];
+ Word32 i, j, k, index;
+
+ /* !! N and n_1 are constants -> it doesn't need to be operated by Basic Operators */
+ n_1 = (Word16) (N - 1);
+ nb_pos = (1 << n_1); /* nb_pos = (1<<n_1); */
+
+ i = 0;
+ j = 0;
+ for (k = 0; k < 6; k++)
+ {
+ if ((pos[k] & nb_pos) == 0)
+ {
+ posA[i++] = pos[k];
+ } else
+ {
+ posB[j++] = pos[k];
+ }
+ }
+
+ switch (i)
+ {
+ case 0:
+ index = (1 << (Word16) (6 * N - 5)); /* index = 1 << ((6*N)-5); */
+ index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
+ index = vo_L_add(index, quant_1p_N1(posB[5], n_1)); /* index += quant_1p_N1(posB[5], n_1); */
+ break;
+ case 1:
+ index = (1L << (Word16) (6 * N - 5)); /* index = 1 << ((6*N)-5); */
+ index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
+ index = vo_L_add(index, quant_1p_N1(posA[0], n_1)); /* index += quant_1p_N1(posA[0], n_1); */
+ break;
+ case 2:
+ index = (1L << (Word16) (6 * N - 5)); /* index = 1 << ((6*N)-5); */
+ /* index += quant_4p_4N(posB, n_1) << ((2*n_1)+1); */
+ index = vo_L_add(index, (quant_4p_4N(posB, n_1) << (Word16) (2 * n_1 + 1)));
+ index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], n_1)); /* index += quant_2p_2N1(posA[0], posA[1], n_1); */
+ break;
+ case 3:
+ index = (quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << (Word16) (3 * n_1 + 1));
+ /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((3*n_1)+1); */
+ index =vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
+ /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
+ break;
+ case 4:
+ i = 2;
+ index = (quant_4p_4N(posA, n_1) << (Word16) (2 * n_1 + 1)); /* index = quant_4p_4N(posA, n_1) << ((2*n_1)+1); */
+ index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1)); /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
+ break;
+ case 5:
+ i = 1;
+ index = (quant_5p_5N(posA, n_1) << N); /* index = quant_5p_5N(posA, n_1) << N; */
+ index = vo_L_add(index, quant_1p_N1(posB[0], n_1)); /* index += quant_1p_N1(posB[0], n_1); */
+ break;
+ case 6:
+ i = 0;
+ index = (quant_5p_5N(posA, n_1) << N); /* index = quant_5p_5N(posA, n_1) << N; */
+ index = vo_L_add(index, quant_1p_N1(posA[5], n_1)); /* index += quant_1p_N1(posA[5], n_1); */
+ break;
+ default:
+ index = 0;
+ fprintf(stderr, "Error in function quant_6p_6N_2\n");
+ }
+ index = vo_L_add(index, ((L_deposit_l(i) & 3L) << (Word16) (6 * N - 4))); /* index += (i & 3) << ((6*N)-4); */
+
+ return (index);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c b/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c
index fc2f00d..eac98e2 100644
--- a/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c
+++ b/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c
@@ -33,30 +33,30 @@
*------------------------------------------------------------------*/
void Qisf_ns(
- Word16 * isf1, /* input : ISF in the frequency domain (0..0.5) */
- Word16 * isf_q, /* output: quantized ISF */
- Word16 * indice /* output: quantization indices */
- )
+ Word16 * isf1, /* input : ISF in the frequency domain (0..0.5) */
+ Word16 * isf_q, /* output: quantized ISF */
+ Word16 * indice /* output: quantization indices */
+ )
{
- Word16 i;
- Word32 tmp;
+ Word16 i;
+ Word32 tmp;
- for (i = 0; i < ORDER; i++)
- {
- isf_q[i] = sub(isf1[i], mean_isf_noise[i]);
- }
+ for (i = 0; i < ORDER; i++)
+ {
+ isf_q[i] = sub(isf1[i], mean_isf_noise[i]);
+ }
- indice[0] = Sub_VQ(&isf_q[0], dico1_isf_noise, 2, SIZE_BK_NOISE1, &tmp);
- indice[1] = Sub_VQ(&isf_q[2], dico2_isf_noise, 3, SIZE_BK_NOISE2, &tmp);
- indice[2] = Sub_VQ(&isf_q[5], dico3_isf_noise, 3, SIZE_BK_NOISE3, &tmp);
- indice[3] = Sub_VQ(&isf_q[8], dico4_isf_noise, 4, SIZE_BK_NOISE4, &tmp);
- indice[4] = Sub_VQ(&isf_q[12], dico5_isf_noise, 4, SIZE_BK_NOISE5, &tmp);
+ indice[0] = Sub_VQ(&isf_q[0], dico1_isf_noise, 2, SIZE_BK_NOISE1, &tmp);
+ indice[1] = Sub_VQ(&isf_q[2], dico2_isf_noise, 3, SIZE_BK_NOISE2, &tmp);
+ indice[2] = Sub_VQ(&isf_q[5], dico3_isf_noise, 3, SIZE_BK_NOISE3, &tmp);
+ indice[3] = Sub_VQ(&isf_q[8], dico4_isf_noise, 4, SIZE_BK_NOISE4, &tmp);
+ indice[4] = Sub_VQ(&isf_q[12], dico5_isf_noise, 4, SIZE_BK_NOISE5, &tmp);
- /* decoding the ISFs */
+ /* decoding the ISFs */
- Disf_ns(indice, isf_q);
+ Disf_ns(indice, isf_q);
- return;
+ return;
}
/********************************************************************
@@ -70,41 +70,41 @@ void Qisf_ns(
*********************************************************************/
void Disf_ns(
- Word16 * indice, /* input: quantization indices */
- Word16 * isf_q /* input : ISF in the frequency domain (0..0.5) */
- )
+ Word16 * indice, /* input: quantization indices */
+ Word16 * isf_q /* input : ISF in the frequency domain (0..0.5) */
+ )
{
- Word16 i;
-
- for (i = 0; i < 2; i++)
- {
- isf_q[i] = dico1_isf_noise[indice[0] * 2 + i];
- }
- for (i = 0; i < 3; i++)
- {
- isf_q[i + 2] = dico2_isf_noise[indice[1] * 3 + i];
- }
- for (i = 0; i < 3; i++)
- {
- isf_q[i + 5] = dico3_isf_noise[indice[2] * 3 + i];
- }
- for (i = 0; i < 4; i++)
- {
- isf_q[i + 8] = dico4_isf_noise[indice[3] * 4 + i];
- }
- for (i = 0; i < 4; i++)
- {
- isf_q[i + 12] = dico5_isf_noise[indice[4] * 4 + i];
- }
-
- for (i = 0; i < ORDER; i++)
- {
- isf_q[i] = add(isf_q[i], mean_isf_noise[i]);
- }
-
- Reorder_isf(isf_q, ISF_GAP, ORDER);
-
- return;
+ Word16 i;
+
+ for (i = 0; i < 2; i++)
+ {
+ isf_q[i] = dico1_isf_noise[indice[0] * 2 + i];
+ }
+ for (i = 0; i < 3; i++)
+ {
+ isf_q[i + 2] = dico2_isf_noise[indice[1] * 3 + i];
+ }
+ for (i = 0; i < 3; i++)
+ {
+ isf_q[i + 5] = dico3_isf_noise[indice[2] * 3 + i];
+ }
+ for (i = 0; i < 4; i++)
+ {
+ isf_q[i + 8] = dico4_isf_noise[indice[3] * 4 + i];
+ }
+ for (i = 0; i < 4; i++)
+ {
+ isf_q[i + 12] = dico5_isf_noise[indice[4] * 4 + i];
+ }
+
+ for (i = 0; i < ORDER; i++)
+ {
+ isf_q[i] = add(isf_q[i], mean_isf_noise[i]);
+ }
+
+ Reorder_isf(isf_q, ISF_GAP, ORDER);
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c b/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c
index c711cd0..bec334e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c
+++ b/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c
@@ -36,13 +36,13 @@
/* private functions */
static void VQ_stage1(
- Word16 * x, /* input : ISF residual vector */
- Word16 * dico, /* input : quantization codebook */
- Word16 dim, /* input : dimention of vector */
- Word16 dico_size, /* input : size of quantization codebook */
- Word16 * index, /* output: indices of survivors */
- Word16 surv /* input : number of survivor */
- );
+ Word16 * x, /* input : ISF residual vector */
+ Word16 * dico, /* input : quantization codebook */
+ Word16 dim, /* input : dimention of vector */
+ Word16 dico_size, /* input : size of quantization codebook */
+ Word16 * index, /* output: indices of survivors */
+ Word16 surv /* input : number of survivor */
+ );
/**************************************************************************
* Function: Qpisf_2s_46B() *
@@ -54,84 +54,84 @@ static void VQ_stage1(
***************************************************************************/
void Qpisf_2s_46b(
- Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
- Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
- Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
- Word16 * indice, /* (o) : quantization indices */
- Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
- )
+ Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+ Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
+ Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
+ Word16 * indice, /* (o) : quantization indices */
+ Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
+ )
{
- Word16 tmp_ind[5];
- Word16 surv1[N_SURV_MAX]; /* indices of survivors from 1st stage */
- Word32 i, k, temp, min_err, distance;
- Word16 isf[ORDER];
- Word16 isf_stage2[ORDER];
-
- for (i = 0; i < ORDER; i++)
- {
- isf[i] = vo_sub(isf1[i], mean_isf[i]);
- isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
- }
-
- VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
-
- distance = MAX_32;
-
- for (k = 0; k < nb_surv; k++)
- {
- for (i = 0; i < 9; i++)
- {
- isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
- }
- tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf, 3, SIZE_BK21, &min_err);
- temp = min_err;
- tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico22_isf, 3, SIZE_BK22, &min_err);
- temp = vo_L_add(temp, min_err);
- tmp_ind[2] = Sub_VQ(&isf_stage2[6], dico23_isf, 3, SIZE_BK23, &min_err);
- temp = vo_L_add(temp, min_err);
-
- if(temp < distance)
- {
- distance = temp;
- indice[0] = surv1[k];
- for (i = 0; i < 3; i++)
- {
- indice[i + 2] = tmp_ind[i];
- }
- }
- }
-
-
- VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
-
- distance = MAX_32;
-
- for (k = 0; k < nb_surv; k++)
- {
- for (i = 0; i < 7; i++)
- {
- isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
- }
-
- tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico24_isf, 3, SIZE_BK24, &min_err);
- temp = min_err;
- tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico25_isf, 4, SIZE_BK25, &min_err);
- temp = vo_L_add(temp, min_err);
-
- if(temp < distance)
- {
- distance = temp;
- indice[1] = surv1[k];
- for (i = 0; i < 2; i++)
- {
- indice[i + 5] = tmp_ind[i];
- }
- }
- }
-
- Dpisf_2s_46b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
-
- return;
+ Word16 tmp_ind[5];
+ Word16 surv1[N_SURV_MAX]; /* indices of survivors from 1st stage */
+ Word32 i, k, temp, min_err, distance;
+ Word16 isf[ORDER];
+ Word16 isf_stage2[ORDER];
+
+ for (i = 0; i < ORDER; i++)
+ {
+ isf[i] = vo_sub(isf1[i], mean_isf[i]);
+ isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
+ }
+
+ VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
+
+ distance = MAX_32;
+
+ for (k = 0; k < nb_surv; k++)
+ {
+ for (i = 0; i < 9; i++)
+ {
+ isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
+ }
+ tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf, 3, SIZE_BK21, &min_err);
+ temp = min_err;
+ tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico22_isf, 3, SIZE_BK22, &min_err);
+ temp = vo_L_add(temp, min_err);
+ tmp_ind[2] = Sub_VQ(&isf_stage2[6], dico23_isf, 3, SIZE_BK23, &min_err);
+ temp = vo_L_add(temp, min_err);
+
+ if(temp < distance)
+ {
+ distance = temp;
+ indice[0] = surv1[k];
+ for (i = 0; i < 3; i++)
+ {
+ indice[i + 2] = tmp_ind[i];
+ }
+ }
+ }
+
+
+ VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
+
+ distance = MAX_32;
+
+ for (k = 0; k < nb_surv; k++)
+ {
+ for (i = 0; i < 7; i++)
+ {
+ isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
+ }
+
+ tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico24_isf, 3, SIZE_BK24, &min_err);
+ temp = min_err;
+ tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico25_isf, 4, SIZE_BK25, &min_err);
+ temp = vo_L_add(temp, min_err);
+
+ if(temp < distance)
+ {
+ distance = temp;
+ indice[1] = surv1[k];
+ for (i = 0; i < 2; i++)
+ {
+ indice[i + 5] = tmp_ind[i];
+ }
+ }
+ }
+
+ Dpisf_2s_46b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
+
+ return;
}
/*****************************************************************************
@@ -144,76 +144,76 @@ void Qpisf_2s_46b(
******************************************************************************/
void Qpisf_2s_36b(
- Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
- Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
- Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
- Word16 * indice, /* (o) : quantization indices */
- Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
- )
+ Word16 * isf1, /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+ Word16 * isf_q, /* (o) Q15 : quantized ISF (0..0.5) */
+ Word16 * past_isfq, /* (io)Q15 : past ISF quantizer */
+ Word16 * indice, /* (o) : quantization indices */
+ Word16 nb_surv /* (i) : number of survivor (1, 2, 3 or 4) */
+ )
{
- Word16 i, k, tmp_ind[5];
- Word16 surv1[N_SURV_MAX]; /* indices of survivors from 1st stage */
- Word32 temp, min_err, distance;
- Word16 isf[ORDER];
- Word16 isf_stage2[ORDER];
-
- for (i = 0; i < ORDER; i++)
- {
- isf[i] = vo_sub(isf1[i], mean_isf[i]);
- isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
- }
-
- VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
-
- distance = MAX_32;
-
- for (k = 0; k < nb_surv; k++)
- {
- for (i = 0; i < 9; i++)
- {
- isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
- }
-
- tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf_36b, 5, SIZE_BK21_36b, &min_err);
- temp = min_err;
- tmp_ind[1] = Sub_VQ(&isf_stage2[5], dico22_isf_36b, 4, SIZE_BK22_36b, &min_err);
- temp = vo_L_add(temp, min_err);
-
- if(temp < distance)
- {
- distance = temp;
- indice[0] = surv1[k];
- for (i = 0; i < 2; i++)
- {
- indice[i + 2] = tmp_ind[i];
- }
- }
- }
-
- VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
- distance = MAX_32;
-
- for (k = 0; k < nb_surv; k++)
- {
- for (i = 0; i < 7; i++)
- {
- isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
- }
-
- tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico23_isf_36b, 7, SIZE_BK23_36b, &min_err);
- temp = min_err;
-
- if(temp < distance)
- {
- distance = temp;
- indice[1] = surv1[k];
- indice[4] = tmp_ind[0];
- }
- }
-
- Dpisf_2s_36b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
-
- return;
+ Word16 i, k, tmp_ind[5];
+ Word16 surv1[N_SURV_MAX]; /* indices of survivors from 1st stage */
+ Word32 temp, min_err, distance;
+ Word16 isf[ORDER];
+ Word16 isf_stage2[ORDER];
+
+ for (i = 0; i < ORDER; i++)
+ {
+ isf[i] = vo_sub(isf1[i], mean_isf[i]);
+ isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
+ }
+
+ VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
+
+ distance = MAX_32;
+
+ for (k = 0; k < nb_surv; k++)
+ {
+ for (i = 0; i < 9; i++)
+ {
+ isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
+ }
+
+ tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf_36b, 5, SIZE_BK21_36b, &min_err);
+ temp = min_err;
+ tmp_ind[1] = Sub_VQ(&isf_stage2[5], dico22_isf_36b, 4, SIZE_BK22_36b, &min_err);
+ temp = vo_L_add(temp, min_err);
+
+ if(temp < distance)
+ {
+ distance = temp;
+ indice[0] = surv1[k];
+ for (i = 0; i < 2; i++)
+ {
+ indice[i + 2] = tmp_ind[i];
+ }
+ }
+ }
+
+ VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
+ distance = MAX_32;
+
+ for (k = 0; k < nb_surv; k++)
+ {
+ for (i = 0; i < 7; i++)
+ {
+ isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
+ }
+
+ tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico23_isf_36b, 7, SIZE_BK23_36b, &min_err);
+ temp = min_err;
+
+ if(temp < distance)
+ {
+ distance = temp;
+ indice[1] = surv1[k];
+ indice[4] = tmp_ind[0];
+ }
+ }
+
+ Dpisf_2s_36b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
+
+ return;
}
/*********************************************************************
@@ -223,90 +223,90 @@ void Qpisf_2s_36b(
**********************************************************************/
void Dpisf_2s_46b(
- Word16 * indice, /* input: quantization indices */
- Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
- Word16 * past_isfq, /* i/0 : past ISF quantizer */
- Word16 * isfold, /* input : past quantized ISF */
- Word16 * isf_buf, /* input : isf buffer */
- Word16 bfi, /* input : Bad frame indicator */
- Word16 enc_dec
- )
+ Word16 * indice, /* input: quantization indices */
+ Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
+ Word16 * past_isfq, /* i/0 : past ISF quantizer */
+ Word16 * isfold, /* input : past quantized ISF */
+ Word16 * isf_buf, /* input : isf buffer */
+ Word16 bfi, /* input : Bad frame indicator */
+ Word16 enc_dec
+ )
{
- Word16 ref_isf[M], tmp;
- Word32 i, j, L_tmp;
-
- if (bfi == 0) /* Good frame */
- {
- for (i = 0; i < 9; i++)
- {
- isf_q[i] = dico1_isf[indice[0] * 9 + i];
- }
- for (i = 0; i < 7; i++)
- {
- isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
- }
-
- for (i = 0; i < 3; i++)
- {
- isf_q[i] = add1(isf_q[i], dico21_isf[indice[2] * 3 + i]);
- isf_q[i + 3] = add1(isf_q[i + 3], dico22_isf[indice[3] * 3 + i]);
- isf_q[i + 6] = add1(isf_q[i + 6], dico23_isf[indice[4] * 3 + i]);
- isf_q[i + 9] = add1(isf_q[i + 9], dico24_isf[indice[5] * 3 + i]);
- }
-
- for (i = 0; i < 4; i++)
- {
- isf_q[i + 12] = add1(isf_q[i + 12], dico25_isf[indice[6] * 4 + i]);
- }
-
- for (i = 0; i < ORDER; i++)
- {
- tmp = isf_q[i];
- isf_q[i] = add1(tmp, mean_isf[i]);
- isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
- past_isfq[i] = tmp;
- }
-
- if (enc_dec)
- {
- for (i = 0; i < M; i++)
- {
- for (j = (L_MEANBUF - 1); j > 0; j--)
- {
- isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
- }
- isf_buf[i] = isf_q[i];
- }
- }
- } else
- { /* bad frame */
- for (i = 0; i < M; i++)
- {
- L_tmp = mean_isf[i] << 14;
- for (j = 0; j < L_MEANBUF; j++)
- {
- L_tmp += (isf_buf[j * M + i] << 14);
- }
- ref_isf[i] = vo_round(L_tmp);
- }
-
- /* use the past ISFs slightly shifted towards their mean */
- for (i = 0; i < ORDER; i++)
- {
- isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
- }
-
- /* estimate past quantized residual to be used in next frame */
- for (i = 0; i < ORDER; i++)
- {
- tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU)); /* predicted ISF */
- past_isfq[i] = vo_sub(isf_q[i], tmp);
- past_isfq[i] = (past_isfq[i] >> 1); /* past_isfq[i] *= 0.5 */
- }
- }
-
- Reorder_isf(isf_q, ISF_GAP, ORDER);
- return;
+ Word16 ref_isf[M], tmp;
+ Word32 i, j, L_tmp;
+
+ if (bfi == 0) /* Good frame */
+ {
+ for (i = 0; i < 9; i++)
+ {
+ isf_q[i] = dico1_isf[indice[0] * 9 + i];
+ }
+ for (i = 0; i < 7; i++)
+ {
+ isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
+ }
+
+ for (i = 0; i < 3; i++)
+ {
+ isf_q[i] = add1(isf_q[i], dico21_isf[indice[2] * 3 + i]);
+ isf_q[i + 3] = add1(isf_q[i + 3], dico22_isf[indice[3] * 3 + i]);
+ isf_q[i + 6] = add1(isf_q[i + 6], dico23_isf[indice[4] * 3 + i]);
+ isf_q[i + 9] = add1(isf_q[i + 9], dico24_isf[indice[5] * 3 + i]);
+ }
+
+ for (i = 0; i < 4; i++)
+ {
+ isf_q[i + 12] = add1(isf_q[i + 12], dico25_isf[indice[6] * 4 + i]);
+ }
+
+ for (i = 0; i < ORDER; i++)
+ {
+ tmp = isf_q[i];
+ isf_q[i] = add1(tmp, mean_isf[i]);
+ isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
+ past_isfq[i] = tmp;
+ }
+
+ if (enc_dec)
+ {
+ for (i = 0; i < M; i++)
+ {
+ for (j = (L_MEANBUF - 1); j > 0; j--)
+ {
+ isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
+ }
+ isf_buf[i] = isf_q[i];
+ }
+ }
+ } else
+ { /* bad frame */
+ for (i = 0; i < M; i++)
+ {
+ L_tmp = mean_isf[i] << 14;
+ for (j = 0; j < L_MEANBUF; j++)
+ {
+ L_tmp += (isf_buf[j * M + i] << 14);
+ }
+ ref_isf[i] = vo_round(L_tmp);
+ }
+
+ /* use the past ISFs slightly shifted towards their mean */
+ for (i = 0; i < ORDER; i++)
+ {
+ isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
+ }
+
+ /* estimate past quantized residual to be used in next frame */
+ for (i = 0; i < ORDER; i++)
+ {
+ tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU)); /* predicted ISF */
+ past_isfq[i] = vo_sub(isf_q[i], tmp);
+ past_isfq[i] = (past_isfq[i] >> 1); /* past_isfq[i] *= 0.5 */
+ }
+ }
+
+ Reorder_isf(isf_q, ISF_GAP, ORDER);
+ return;
}
/*********************************************************************
@@ -316,92 +316,92 @@ void Dpisf_2s_46b(
*********************************************************************/
void Dpisf_2s_36b(
- Word16 * indice, /* input: quantization indices */
- Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
- Word16 * past_isfq, /* i/0 : past ISF quantizer */
- Word16 * isfold, /* input : past quantized ISF */
- Word16 * isf_buf, /* input : isf buffer */
- Word16 bfi, /* input : Bad frame indicator */
- Word16 enc_dec
- )
+ Word16 * indice, /* input: quantization indices */
+ Word16 * isf_q, /* output: quantized ISF in frequency domain (0..0.5) */
+ Word16 * past_isfq, /* i/0 : past ISF quantizer */
+ Word16 * isfold, /* input : past quantized ISF */
+ Word16 * isf_buf, /* input : isf buffer */
+ Word16 bfi, /* input : Bad frame indicator */
+ Word16 enc_dec
+ )
{
- Word16 ref_isf[M], tmp;
- Word32 i, j, L_tmp;
-
- if (bfi == 0) /* Good frame */
- {
- for (i = 0; i < 9; i++)
- {
- isf_q[i] = dico1_isf[indice[0] * 9 + i];
- }
- for (i = 0; i < 7; i++)
- {
- isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
- }
-
- for (i = 0; i < 5; i++)
- {
- isf_q[i] = add1(isf_q[i], dico21_isf_36b[indice[2] * 5 + i]);
- }
- for (i = 0; i < 4; i++)
- {
- isf_q[i + 5] = add1(isf_q[i + 5], dico22_isf_36b[indice[3] * 4 + i]);
- }
- for (i = 0; i < 7; i++)
- {
- isf_q[i + 9] = add1(isf_q[i + 9], dico23_isf_36b[indice[4] * 7 + i]);
- }
-
- for (i = 0; i < ORDER; i++)
- {
- tmp = isf_q[i];
- isf_q[i] = add1(tmp, mean_isf[i]);
- isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
- past_isfq[i] = tmp;
- }
-
-
- if (enc_dec)
- {
- for (i = 0; i < M; i++)
- {
- for (j = (L_MEANBUF - 1); j > 0; j--)
- {
- isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
- }
- isf_buf[i] = isf_q[i];
- }
- }
- } else
- { /* bad frame */
- for (i = 0; i < M; i++)
- {
- L_tmp = (mean_isf[i] << 14);
- for (j = 0; j < L_MEANBUF; j++)
- {
- L_tmp += (isf_buf[j * M + i] << 14);
- }
- ref_isf[i] = vo_round(L_tmp);
- }
-
- /* use the past ISFs slightly shifted towards their mean */
- for (i = 0; i < ORDER; i++)
- {
- isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
- }
-
- /* estimate past quantized residual to be used in next frame */
- for (i = 0; i < ORDER; i++)
- {
- tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU)); /* predicted ISF */
- past_isfq[i] = vo_sub(isf_q[i], tmp);
- past_isfq[i] = past_isfq[i] >> 1; /* past_isfq[i] *= 0.5 */
- }
- }
-
- Reorder_isf(isf_q, ISF_GAP, ORDER);
-
- return;
+ Word16 ref_isf[M], tmp;
+ Word32 i, j, L_tmp;
+
+ if (bfi == 0) /* Good frame */
+ {
+ for (i = 0; i < 9; i++)
+ {
+ isf_q[i] = dico1_isf[indice[0] * 9 + i];
+ }
+ for (i = 0; i < 7; i++)
+ {
+ isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
+ }
+
+ for (i = 0; i < 5; i++)
+ {
+ isf_q[i] = add1(isf_q[i], dico21_isf_36b[indice[2] * 5 + i]);
+ }
+ for (i = 0; i < 4; i++)
+ {
+ isf_q[i + 5] = add1(isf_q[i + 5], dico22_isf_36b[indice[3] * 4 + i]);
+ }
+ for (i = 0; i < 7; i++)
+ {
+ isf_q[i + 9] = add1(isf_q[i + 9], dico23_isf_36b[indice[4] * 7 + i]);
+ }
+
+ for (i = 0; i < ORDER; i++)
+ {
+ tmp = isf_q[i];
+ isf_q[i] = add1(tmp, mean_isf[i]);
+ isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
+ past_isfq[i] = tmp;
+ }
+
+
+ if (enc_dec)
+ {
+ for (i = 0; i < M; i++)
+ {
+ for (j = (L_MEANBUF - 1); j > 0; j--)
+ {
+ isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
+ }
+ isf_buf[i] = isf_q[i];
+ }
+ }
+ } else
+ { /* bad frame */
+ for (i = 0; i < M; i++)
+ {
+ L_tmp = (mean_isf[i] << 14);
+ for (j = 0; j < L_MEANBUF; j++)
+ {
+ L_tmp += (isf_buf[j * M + i] << 14);
+ }
+ ref_isf[i] = vo_round(L_tmp);
+ }
+
+ /* use the past ISFs slightly shifted towards their mean */
+ for (i = 0; i < ORDER; i++)
+ {
+ isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
+ }
+
+ /* estimate past quantized residual to be used in next frame */
+ for (i = 0; i < ORDER; i++)
+ {
+ tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU)); /* predicted ISF */
+ past_isfq[i] = vo_sub(isf_q[i], tmp);
+ past_isfq[i] = past_isfq[i] >> 1; /* past_isfq[i] *= 0.5 */
+ }
+ }
+
+ Reorder_isf(isf_q, ISF_GAP, ORDER);
+
+ return;
}
@@ -419,122 +419,122 @@ void Dpisf_2s_36b(
****************************************************************************/
void Reorder_isf(
- Word16 * isf, /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
- Word16 min_dist, /* (i) Q15 : minimum distance to keep */
- Word16 n /* (i) : number of ISF */
- )
+ Word16 * isf, /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
+ Word16 min_dist, /* (i) Q15 : minimum distance to keep */
+ Word16 n /* (i) : number of ISF */
+ )
{
- Word32 i;
- Word16 isf_min;
-
- isf_min = min_dist;
- for (i = 0; i < n - 1; i++)
- {
- if(isf[i] < isf_min)
- {
- isf[i] = isf_min;
- }
- isf_min = (isf[i] + min_dist);
- }
- return;
+ Word32 i;
+ Word16 isf_min;
+
+ isf_min = min_dist;
+ for (i = 0; i < n - 1; i++)
+ {
+ if(isf[i] < isf_min)
+ {
+ isf[i] = isf_min;
+ }
+ isf_min = (isf[i] + min_dist);
+ }
+ return;
}
Word16 Sub_VQ( /* output: return quantization index */
- Word16 * x, /* input : ISF residual vector */
- Word16 * dico, /* input : quantization codebook */
- Word16 dim, /* input : dimention of vector */
- Word16 dico_size, /* input : size of quantization codebook */
- Word32 * distance /* output: error of quantization */
- )
+ Word16 * x, /* input : ISF residual vector */
+ Word16 * dico, /* input : quantization codebook */
+ Word16 dim, /* input : dimention of vector */
+ Word16 dico_size, /* input : size of quantization codebook */
+ Word32 * distance /* output: error of quantization */
+ )
{
- Word16 temp, *p_dico;
- Word32 i, j, index;
- Word32 dist_min, dist;
-
- dist_min = MAX_32;
- p_dico = dico;
-
- index = 0;
- for (i = 0; i < dico_size; i++)
- {
- dist = 0;
-
- for (j = 0; j < dim; j++)
- {
- temp = x[j] - (*p_dico++);
- dist += (temp * temp)<<1;
- }
-
- if(dist < dist_min)
- {
- dist_min = dist;
- index = i;
- }
- }
-
- *distance = dist_min;
-
- /* Reading the selected vector */
- p_dico = &dico[index * dim];
- for (j = 0; j < dim; j++)
- {
- x[j] = *p_dico++;
- }
-
- return index;
+ Word16 temp, *p_dico;
+ Word32 i, j, index;
+ Word32 dist_min, dist;
+
+ dist_min = MAX_32;
+ p_dico = dico;
+
+ index = 0;
+ for (i = 0; i < dico_size; i++)
+ {
+ dist = 0;
+
+ for (j = 0; j < dim; j++)
+ {
+ temp = x[j] - (*p_dico++);
+ dist += (temp * temp)<<1;
+ }
+
+ if(dist < dist_min)
+ {
+ dist_min = dist;
+ index = i;
+ }
+ }
+
+ *distance = dist_min;
+
+ /* Reading the selected vector */
+ p_dico = &dico[index * dim];
+ for (j = 0; j < dim; j++)
+ {
+ x[j] = *p_dico++;
+ }
+
+ return index;
}
static void VQ_stage1(
- Word16 * x, /* input : ISF residual vector */
- Word16 * dico, /* input : quantization codebook */
- Word16 dim, /* input : dimention of vector */
- Word16 dico_size, /* input : size of quantization codebook */
- Word16 * index, /* output: indices of survivors */
- Word16 surv /* input : number of survivor */
- )
+ Word16 * x, /* input : ISF residual vector */
+ Word16 * dico, /* input : quantization codebook */
+ Word16 dim, /* input : dimention of vector */
+ Word16 dico_size, /* input : size of quantization codebook */
+ Word16 * index, /* output: indices of survivors */
+ Word16 surv /* input : number of survivor */
+ )
{
- Word16 temp, *p_dico;
- Word32 i, j, k, l;
- Word32 dist_min[N_SURV_MAX], dist;
-
- dist_min[0] = MAX_32;
- dist_min[1] = MAX_32;
- dist_min[2] = MAX_32;
- dist_min[3] = MAX_32;
- index[0] = 0;
- index[1] = 1;
- index[2] = 2;
- index[3] = 3;
-
- p_dico = dico;
-
- for (i = 0; i < dico_size; i++)
- {
- dist = 0;
- for (j = 0; j < dim; j++)
- {
- temp = x[j] - (*p_dico++);
- dist += (temp * temp)<<1;
- }
-
- for (k = 0; k < surv; k++)
- {
- if(dist < dist_min[k])
- {
- for (l = surv - 1; l > k; l--)
- {
- dist_min[l] = dist_min[l - 1];
- index[l] = index[l - 1];
- }
- dist_min[k] = dist;
- index[k] = i;
- break;
- }
- }
- }
- return;
+ Word16 temp, *p_dico;
+ Word32 i, j, k, l;
+ Word32 dist_min[N_SURV_MAX], dist;
+
+ dist_min[0] = MAX_32;
+ dist_min[1] = MAX_32;
+ dist_min[2] = MAX_32;
+ dist_min[3] = MAX_32;
+ index[0] = 0;
+ index[1] = 1;
+ index[2] = 2;
+ index[3] = 3;
+
+ p_dico = dico;
+
+ for (i = 0; i < dico_size; i++)
+ {
+ dist = 0;
+ for (j = 0; j < dim; j++)
+ {
+ temp = x[j] - (*p_dico++);
+ dist += (temp * temp)<<1;
+ }
+
+ for (k = 0; k < surv; k++)
+ {
+ if(dist < dist_min[k])
+ {
+ for (l = surv - 1; l > k; l--)
+ {
+ dist_min[l] = dist_min[l - 1];
+ index[l] = index[l - 1];
+ }
+ dist_min[k] = dist;
+ index[k] = i;
+ break;
+ }
+ }
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/random.c b/media/libstagefright/codecs/amrwbenc/src/random.c
index b896863..758343c 100644
--- a/media/libstagefright/codecs/amrwbenc/src/random.c
+++ b/media/libstagefright/codecs/amrwbenc/src/random.c
@@ -26,8 +26,8 @@
Word16 Random(Word16 * seed)
{
- /* static Word16 seed = 21845; */
- *seed = (Word16)(L_add((L_mult(*seed, 31821) >> 1), 13849L));
- return (*seed);
+ /* static Word16 seed = 21845; */
+ *seed = (Word16)(L_add((L_mult(*seed, 31821) >> 1), 13849L));
+ return (*seed);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/residu.c b/media/libstagefright/codecs/amrwbenc/src/residu.c
index b0c04b5..76d0e41 100644
--- a/media/libstagefright/codecs/amrwbenc/src/residu.c
+++ b/media/libstagefright/codecs/amrwbenc/src/residu.c
@@ -26,41 +26,41 @@
#include "basic_op.h"
void Residu(
- Word16 a[], /* (i) Q12 : prediction coefficients */
- Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
- Word16 y[], /* (o) x2 : residual signal */
- Word16 lg /* (i) : size of filtering */
- )
+ Word16 a[], /* (i) Q12 : prediction coefficients */
+ Word16 x[], /* (i) : speech (values x[-m..-1] are needed */
+ Word16 y[], /* (o) x2 : residual signal */
+ Word16 lg /* (i) : size of filtering */
+ )
{
- Word16 i,*p1, *p2;
- Word32 s;
- for (i = 0; i < lg; i++)
- {
- p1 = a;
- p2 = &x[i];
- s = vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1++), (*p2--));
- s += vo_mult32((*p1), (*p2));
+ Word16 i,*p1, *p2;
+ Word32 s;
+ for (i = 0; i < lg; i++)
+ {
+ p1 = a;
+ p2 = &x[i];
+ s = vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1++), (*p2--));
+ s += vo_mult32((*p1), (*p2));
- s = L_shl2(s, 5);
- y[i] = extract_h(L_add(s, 0x8000));
- }
+ s = L_shl2(s, 5);
+ y[i] = extract_h(L_add(s, 0x8000));
+ }
- return;
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/scale.c b/media/libstagefright/codecs/amrwbenc/src/scale.c
index 418cc06..21458c8 100644
--- a/media/libstagefright/codecs/amrwbenc/src/scale.c
+++ b/media/libstagefright/codecs/amrwbenc/src/scale.c
@@ -25,32 +25,32 @@
#include "basic_op.h"
void Scale_sig(
- Word16 x[], /* (i/o) : signal to scale */
- Word16 lg, /* (i) : size of x[] */
- Word16 exp /* (i) : exponent: x = round(x << exp) */
- )
+ Word16 x[], /* (i/o) : signal to scale */
+ Word16 lg, /* (i) : size of x[] */
+ Word16 exp /* (i) : exponent: x = round(x << exp) */
+ )
{
- Word32 i;
- Word32 L_tmp;
- if(exp > 0)
- {
- for (i = lg - 1 ; i >= 0; i--)
- {
- L_tmp = L_shl2(x[i], 16 + exp);
- x[i] = extract_h(L_add(L_tmp, 0x8000));
- }
- }
- else
- {
- exp = -exp;
- for (i = lg - 1; i >= 0; i--)
- {
- L_tmp = x[i] << 16;
- L_tmp >>= exp;
- x[i] = (L_tmp + 0x8000)>>16;
- }
- }
- return;
+ Word32 i;
+ Word32 L_tmp;
+ if(exp > 0)
+ {
+ for (i = lg - 1 ; i >= 0; i--)
+ {
+ L_tmp = L_shl2(x[i], 16 + exp);
+ x[i] = extract_h(L_add(L_tmp, 0x8000));
+ }
+ }
+ else
+ {
+ exp = -exp;
+ for (i = lg - 1; i >= 0; i--)
+ {
+ L_tmp = x[i] << 16;
+ L_tmp >>= exp;
+ x[i] = (L_tmp + 0x8000)>>16;
+ }
+ }
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/stream.c b/media/libstagefright/codecs/amrwbenc/src/stream.c
index 780f009..a39149e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/stream.c
+++ b/media/libstagefright/codecs/amrwbenc/src/stream.c
@@ -25,34 +25,34 @@
void voAWB_InitFrameBuffer(FrameStream *stream)
{
- stream->set_ptr = NULL;
- stream->frame_ptr_bk = stream->frame_ptr;
- stream->set_len = 0;
- stream->framebuffer_len = 0;
- stream->frame_storelen = 0;
+ stream->set_ptr = NULL;
+ stream->frame_ptr_bk = stream->frame_ptr;
+ stream->set_len = 0;
+ stream->framebuffer_len = 0;
+ stream->frame_storelen = 0;
}
void voAWB_UpdateFrameBuffer(
- FrameStream *stream,
- VO_MEM_OPERATOR *pMemOP
- )
+ FrameStream *stream,
+ VO_MEM_OPERATOR *pMemOP
+ )
{
- int len;
- len = MIN(Frame_Maxsize - stream->frame_storelen, stream->set_len);
- pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk + stream->frame_storelen , stream->set_ptr, len);
- stream->set_len -= len;
- stream->set_ptr += len;
- stream->framebuffer_len = stream->frame_storelen + len;
- stream->frame_ptr = stream->frame_ptr_bk;
- stream->used_len += len;
+ int len;
+ len = MIN(Frame_Maxsize - stream->frame_storelen, stream->set_len);
+ pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk + stream->frame_storelen , stream->set_ptr, len);
+ stream->set_len -= len;
+ stream->set_ptr += len;
+ stream->framebuffer_len = stream->frame_storelen + len;
+ stream->frame_ptr = stream->frame_ptr_bk;
+ stream->used_len += len;
}
void voAWB_FlushFrameBuffer(FrameStream *stream)
{
- stream->set_ptr = NULL;
- stream->frame_ptr_bk = stream->frame_ptr;
- stream->set_len = 0;
- stream->framebuffer_len = 0;
- stream->frame_storelen = 0;
+ stream->set_ptr = NULL;
+ stream->frame_ptr_bk = stream->frame_ptr;
+ stream->set_len = 0;
+ stream->framebuffer_len = 0;
+ stream->frame_storelen = 0;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
index 961aadc..7eba12f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
+++ b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
@@ -29,134 +29,134 @@
#define UNUSED(x) (void)(x)
void Syn_filt(
- Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
- Word16 x[], /* (i) : input signal */
- Word16 y[], /* (o) : output signal */
- Word16 lg, /* (i) : size of filtering */
- Word16 mem[], /* (i/o) : memory associated with this filtering. */
- Word16 update /* (i) : 0=no update, 1=update of memory. */
- )
+ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
+ Word16 x[], /* (i) : input signal */
+ Word16 y[], /* (o) : output signal */
+ Word16 lg, /* (i) : size of filtering */
+ Word16 mem[], /* (i/o) : memory associated with this filtering. */
+ Word16 update /* (i) : 0=no update, 1=update of memory. */
+ )
{
- Word32 i, a0;
- Word16 y_buf[L_SUBFR16k + M16k];
- Word32 L_tmp;
- Word16 *yy, *p1, *p2;
- yy = &y_buf[0];
- /* copy initial filter states into synthesis buffer */
- for (i = 0; i < 16; i++)
- {
- *yy++ = mem[i];
- }
- a0 = (a[0] >> 1); /* input / 2 */
- /* Do the filtering. */
- for (i = 0; i < lg; i++)
- {
- p1 = &a[1];
- p2 = &yy[i-1];
- L_tmp = vo_mult32(a0, x[i]);
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1++), (*p2--));
- L_tmp -= vo_mult32((*p1), (*p2));
-
- L_tmp = L_shl2(L_tmp, 4);
- y[i] = yy[i] = extract_h(L_add(L_tmp, 0x8000));
- }
- /* Update memory if required */
- if (update)
- for (i = 0; i < 16; i++)
- {
- mem[i] = yy[lg - 16 + i];
- }
- return;
+ Word32 i, a0;
+ Word16 y_buf[L_SUBFR16k + M16k];
+ Word32 L_tmp;
+ Word16 *yy, *p1, *p2;
+ yy = &y_buf[0];
+ /* copy initial filter states into synthesis buffer */
+ for (i = 0; i < 16; i++)
+ {
+ *yy++ = mem[i];
+ }
+ a0 = (a[0] >> 1); /* input / 2 */
+ /* Do the filtering. */
+ for (i = 0; i < lg; i++)
+ {
+ p1 = &a[1];
+ p2 = &yy[i-1];
+ L_tmp = vo_mult32(a0, x[i]);
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1++), (*p2--));
+ L_tmp -= vo_mult32((*p1), (*p2));
+
+ L_tmp = L_shl2(L_tmp, 4);
+ y[i] = yy[i] = extract_h(L_add(L_tmp, 0x8000));
+ }
+ /* Update memory if required */
+ if (update)
+ for (i = 0; i < 16; i++)
+ {
+ mem[i] = yy[lg - 16 + i];
+ }
+ return;
}
void Syn_filt_32(
- Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
- Word16 m, /* (i) : order of LP filter */
- Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
- Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
- Word16 sig_hi[], /* (o) /16 : synthesis high */
- Word16 sig_lo[], /* (o) /16 : synthesis low */
- Word16 lg /* (i) : size of filtering */
- )
+ Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
+ Word16 m, /* (i) : order of LP filter */
+ Word16 exc[], /* (i) Qnew: excitation (exc[i] >> Qnew) */
+ Word16 Qnew, /* (i) : exc scaling = 0(min) to 8(max) */
+ Word16 sig_hi[], /* (o) /16 : synthesis high */
+ Word16 sig_lo[], /* (o) /16 : synthesis low */
+ Word16 lg /* (i) : size of filtering */
+ )
{
- Word32 i,a0;
- Word32 L_tmp, L_tmp1;
- Word16 *p1, *p2, *p3;
+ Word32 i,a0;
+ Word32 L_tmp, L_tmp1;
+ Word16 *p1, *p2, *p3;
UNUSED(m);
- a0 = a[0] >> (4 + Qnew); /* input / 16 and >>Qnew */
- /* Do the filtering. */
- for (i = 0; i < lg; i++)
- {
- L_tmp = 0;
- L_tmp1 = 0;
- p1 = a;
- p2 = &sig_lo[i - 1];
- p3 = &sig_hi[i - 1];
-
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
- L_tmp -= vo_mult32((*p2--), (*p1));
- L_tmp1 -= vo_mult32((*p3--), (*p1++));
-
- L_tmp = L_tmp >> 11;
- L_tmp += vo_L_mult(exc[i], a0);
-
- /* sig_hi = bit16 to bit31 of synthesis */
- L_tmp = L_tmp - (L_tmp1<<1);
-
- L_tmp = L_tmp >> 3; /* ai in Q12 */
- sig_hi[i] = extract_h(L_tmp);
-
- /* sig_lo = bit4 to bit15 of synthesis */
- L_tmp >>= 4; /* 4 : sig_lo[i] >> 4 */
- sig_lo[i] = (Word16)((L_tmp - (sig_hi[i] << 13)));
- }
-
- return;
+ a0 = a[0] >> (4 + Qnew); /* input / 16 and >>Qnew */
+ /* Do the filtering. */
+ for (i = 0; i < lg; i++)
+ {
+ L_tmp = 0;
+ L_tmp1 = 0;
+ p1 = a;
+ p2 = &sig_lo[i - 1];
+ p3 = &sig_hi[i - 1];
+
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+ L_tmp -= vo_mult32((*p2--), (*p1));
+ L_tmp1 -= vo_mult32((*p3--), (*p1++));
+
+ L_tmp = L_tmp >> 11;
+ L_tmp += vo_L_mult(exc[i], a0);
+
+ /* sig_hi = bit16 to bit31 of synthesis */
+ L_tmp = L_tmp - (L_tmp1<<1);
+
+ L_tmp = L_tmp >> 3; /* ai in Q12 */
+ sig_hi[i] = extract_h(L_tmp);
+
+ /* sig_lo = bit4 to bit15 of synthesis */
+ L_tmp >>= 4; /* 4 : sig_lo[i] >> 4 */
+ sig_lo[i] = (Word16)((L_tmp - (sig_hi[i] << 13)));
+ }
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/updt_tar.c b/media/libstagefright/codecs/amrwbenc/src/updt_tar.c
index 96779fd..ba7c2ff 100644
--- a/media/libstagefright/codecs/amrwbenc/src/updt_tar.c
+++ b/media/libstagefright/codecs/amrwbenc/src/updt_tar.c
@@ -25,24 +25,25 @@
#include "basic_op.h"
void Updt_tar(
- Word16 * x, /* (i) Q0 : old target (for pitch search) */
- Word16 * x2, /* (o) Q0 : new target (for codebook search) */
- Word16 * y, /* (i) Q0 : filtered adaptive codebook vector */
- Word16 gain, /* (i) Q14 : adaptive codebook gain */
- Word16 L /* (i) : subframe size */
- )
+ Word16 * x, /* (i) Q0 : old target (for pitch search) */
+ Word16 * x2, /* (o) Q0 : new target (for codebook search) */
+ Word16 * y, /* (i) Q0 : filtered adaptive codebook vector */
+ Word16 gain, /* (i) Q14 : adaptive codebook gain */
+ Word16 L /* (i) : subframe size */
+ )
{
- Word32 i;
- Word32 L_tmp;
-
- for (i = 0; i < L; i++)
- {
- L_tmp = x[i] << 15;
- L_tmp -= (y[i] * gain)<<1;
- x2[i] = extract_h(L_shl2(L_tmp, 1));
- }
-
- return;
+ Word32 i;
+ Word32 L_tmp, L_tmp2;
+
+ for (i = 0; i < L; i++)
+ {
+ L_tmp = x[i] << 15;
+ L_tmp2 = L_mult(y[i], gain);
+ L_tmp = L_sub(L_tmp, L_tmp2);
+ x2[i] = extract_h(L_shl2(L_tmp, 1));
+ }
+
+ return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/util.c b/media/libstagefright/codecs/amrwbenc/src/util.c
index 333140d..374245f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/util.c
+++ b/media/libstagefright/codecs/amrwbenc/src/util.c
@@ -30,15 +30,15 @@
************************************************************************/
void Set_zero(
- Word16 x[], /* (o) : vector to clear */
- Word16 L /* (i) : length of vector */
- )
+ Word16 x[], /* (o) : vector to clear */
+ Word16 L /* (i) : length of vector */
+ )
{
- Word32 num = (Word32)L;
- while (num > 0) {
- *x++ = 0;
+ Word32 num = (Word32)L;
+ while (num > 0) {
+ *x++ = 0;
--num;
- }
+ }
}
@@ -49,28 +49,28 @@ void Set_zero(
*********************************************************************/
void Copy(
- Word16 x[], /* (i) : input vector */
- Word16 y[], /* (o) : output vector */
- Word16 L /* (i) : vector length */
- )
+ Word16 x[], /* (i) : input vector */
+ Word16 y[], /* (o) : output vector */
+ Word16 L /* (i) : vector length */
+ )
{
- Word32 temp1,temp2,num;
+ Word32 temp1,temp2,num;
if (L <= 0) {
return;
}
- if(L&1)
- {
- temp1 = *x++;
- *y++ = temp1;
- }
- num = (Word32)(L>>1);
- while (num > 0) {
- temp1 = *x++;
- temp2 = *x++;
- *y++ = temp1;
- *y++ = temp2;
+ if(L&1)
+ {
+ temp1 = *x++;
+ *y++ = temp1;
+ }
+ num = (Word32)(L>>1);
+ while (num > 0) {
+ temp1 = *x++;
+ temp2 = *x++;
+ *y++ = temp1;
+ *y++ = temp2;
--num;
- }
+ }
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
index df7b9b3..4cafb01 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
@@ -19,8 +19,8 @@
* *
* Description: Performs the main encoder routine *
* Fixed-point C simulation of AMR WB ACELP coding *
-* algorithm with 20 msspeech frames for *
-* wideband speech signals. *
+* algorithm with 20 msspeech frames for *
+* wideband speech signals. *
* *
************************************************************************/
@@ -51,95 +51,95 @@ static Word16 interpol_frac[NB_SUBFR] = {14746, 26214, 31457, 32767};
/* isp tables for initialization */
static Word16 isp_init[M] =
{
- 32138, 30274, 27246, 23170, 18205, 12540, 6393, 0,
- -6393, -12540, -18205, -23170, -27246, -30274, -32138, 1475
+ 32138, 30274, 27246, 23170, 18205, 12540, 6393, 0,
+ -6393, -12540, -18205, -23170, -27246, -30274, -32138, 1475
};
static Word16 isf_init[M] =
{
- 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192,
- 9216, 10240, 11264, 12288, 13312, 14336, 15360, 3840
+ 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192,
+ 9216, 10240, 11264, 12288, 13312, 14336, 15360, 3840
};
/* High Band encoding */
static const Word16 HP_gain[16] =
{
- 3624, 4673, 5597, 6479, 7425, 8378, 9324, 10264,
- 11210, 12206, 13391, 14844, 16770, 19655, 24289, 32728
+ 3624, 4673, 5597, 6479, 7425, 8378, 9324, 10264,
+ 11210, 12206, 13391, 14844, 16770, 19655, 24289, 32728
};
/* Private function declaration */
static Word16 synthesis(
- Word16 Aq[], /* A(z) : quantized Az */
- Word16 exc[], /* (i) : excitation at 12kHz */
- Word16 Q_new, /* (i) : scaling performed on exc */
- Word16 synth16k[], /* (o) : 16kHz synthesis signal */
- Coder_State * st /* (i/o) : State structure */
- );
+ Word16 Aq[], /* A(z) : quantized Az */
+ Word16 exc[], /* (i) : excitation at 12kHz */
+ Word16 Q_new, /* (i) : scaling performed on exc */
+ Word16 synth16k[], /* (o) : 16kHz synthesis signal */
+ Coder_State * st /* (i/o) : State structure */
+ );
/* Codec some parameters initialization */
void Reset_encoder(void *st, Word16 reset_all)
{
- Word16 i;
- Coder_State *cod_state;
- cod_state = (Coder_State *) st;
- Set_zero(cod_state->old_exc, PIT_MAX + L_INTERPOL);
- Set_zero(cod_state->mem_syn, M);
- Set_zero(cod_state->past_isfq, M);
- cod_state->mem_w0 = 0;
- cod_state->tilt_code = 0;
- cod_state->first_frame = 1;
- Init_gp_clip(cod_state->gp_clip);
- cod_state->L_gc_thres = 0;
- if (reset_all != 0)
- {
- /* Static vectors to zero */
- Set_zero(cod_state->old_speech, L_TOTAL - L_FRAME);
- Set_zero(cod_state->old_wsp, (PIT_MAX / OPL_DECIM));
- Set_zero(cod_state->mem_decim2, 3);
- /* routines initialization */
- Init_Decim_12k8(cod_state->mem_decim);
- Init_HP50_12k8(cod_state->mem_sig_in);
- Init_Levinson(cod_state->mem_levinson);
- Init_Q_gain2(cod_state->qua_gain);
- Init_Hp_wsp(cod_state->hp_wsp_mem);
- /* isp initialization */
- Copy(isp_init, cod_state->ispold, M);
- Copy(isp_init, cod_state->ispold_q, M);
- /* variable initialization */
- cod_state->mem_preemph = 0;
- cod_state->mem_wsp = 0;
- cod_state->Q_old = 15;
- cod_state->Q_max[0] = 15;
- cod_state->Q_max[1] = 15;
- cod_state->old_wsp_max = 0;
- cod_state->old_wsp_shift = 0;
- /* pitch ol initialization */
- cod_state->old_T0_med = 40;
- cod_state->ol_gain = 0;
- cod_state->ada_w = 0;
- cod_state->ol_wght_flg = 0;
- for (i = 0; i < 5; i++)
- {
- cod_state->old_ol_lag[i] = 40;
- }
- Set_zero(cod_state->old_hp_wsp, (L_FRAME / 2) / OPL_DECIM + (PIT_MAX / OPL_DECIM));
- Set_zero(cod_state->mem_syn_hf, M);
- Set_zero(cod_state->mem_syn_hi, M);
- Set_zero(cod_state->mem_syn_lo, M);
- Init_HP50_12k8(cod_state->mem_sig_out);
- Init_Filt_6k_7k(cod_state->mem_hf);
- Init_HP400_12k8(cod_state->mem_hp400);
- Copy(isf_init, cod_state->isfold, M);
- cod_state->mem_deemph = 0;
- cod_state->seed2 = 21845;
- Init_Filt_6k_7k(cod_state->mem_hf2);
- cod_state->gain_alpha = 32767;
- cod_state->vad_hist = 0;
- wb_vad_reset(cod_state->vadSt);
- dtx_enc_reset(cod_state->dtx_encSt, isf_init);
- }
- return;
+ Word16 i;
+ Coder_State *cod_state;
+ cod_state = (Coder_State *) st;
+ Set_zero(cod_state->old_exc, PIT_MAX + L_INTERPOL);
+ Set_zero(cod_state->mem_syn, M);
+ Set_zero(cod_state->past_isfq, M);
+ cod_state->mem_w0 = 0;
+ cod_state->tilt_code = 0;
+ cod_state->first_frame = 1;
+ Init_gp_clip(cod_state->gp_clip);
+ cod_state->L_gc_thres = 0;
+ if (reset_all != 0)
+ {
+ /* Static vectors to zero */
+ Set_zero(cod_state->old_speech, L_TOTAL - L_FRAME);
+ Set_zero(cod_state->old_wsp, (PIT_MAX / OPL_DECIM));
+ Set_zero(cod_state->mem_decim2, 3);
+ /* routines initialization */
+ Init_Decim_12k8(cod_state->mem_decim);
+ Init_HP50_12k8(cod_state->mem_sig_in);
+ Init_Levinson(cod_state->mem_levinson);
+ Init_Q_gain2(cod_state->qua_gain);
+ Init_Hp_wsp(cod_state->hp_wsp_mem);
+ /* isp initialization */
+ Copy(isp_init, cod_state->ispold, M);
+ Copy(isp_init, cod_state->ispold_q, M);
+ /* variable initialization */
+ cod_state->mem_preemph = 0;
+ cod_state->mem_wsp = 0;
+ cod_state->Q_old = 15;
+ cod_state->Q_max[0] = 15;
+ cod_state->Q_max[1] = 15;
+ cod_state->old_wsp_max = 0;
+ cod_state->old_wsp_shift = 0;
+ /* pitch ol initialization */
+ cod_state->old_T0_med = 40;
+ cod_state->ol_gain = 0;
+ cod_state->ada_w = 0;
+ cod_state->ol_wght_flg = 0;
+ for (i = 0; i < 5; i++)
+ {
+ cod_state->old_ol_lag[i] = 40;
+ }
+ Set_zero(cod_state->old_hp_wsp, (L_FRAME / 2) / OPL_DECIM + (PIT_MAX / OPL_DECIM));
+ Set_zero(cod_state->mem_syn_hf, M);
+ Set_zero(cod_state->mem_syn_hi, M);
+ Set_zero(cod_state->mem_syn_lo, M);
+ Init_HP50_12k8(cod_state->mem_sig_out);
+ Init_Filt_6k_7k(cod_state->mem_hf);
+ Init_HP400_12k8(cod_state->mem_hp400);
+ Copy(isf_init, cod_state->isfold, M);
+ cod_state->mem_deemph = 0;
+ cod_state->seed2 = 21845;
+ Init_Filt_6k_7k(cod_state->mem_hf2);
+ cod_state->gain_alpha = 32767;
+ cod_state->vad_hist = 0;
+ wb_vad_reset(cod_state->vadSt);
+ dtx_enc_reset(cod_state->dtx_encSt, isf_init);
+ }
+ return;
}
/*-----------------------------------------------------------------*
@@ -149,1176 +149,1180 @@ void Reset_encoder(void *st, Word16 reset_all)
* *
*-----------------------------------------------------------------*/
void coder(
- Word16 * mode, /* input : used mode */
- Word16 speech16k[], /* input : 320 new speech samples (at 16 kHz) */
- Word16 prms[], /* output: output parameters */
- Word16 * ser_size, /* output: bit rate of the used mode */
- void *spe_state, /* i/o : State structure */
- Word16 allow_dtx /* input : DTX ON/OFF */
- )
+ Word16 * mode, /* input : used mode */
+ Word16 speech16k[], /* input : 320 new speech samples (at 16 kHz) */
+ Word16 prms[], /* output: output parameters */
+ Word16 * ser_size, /* output: bit rate of the used mode */
+ void *spe_state, /* i/o : State structure */
+ Word16 allow_dtx /* input : DTX ON/OFF */
+ )
{
- /* Coder states */
- Coder_State *st;
- /* Speech vector */
- Word16 old_speech[L_TOTAL];
- Word16 *new_speech, *speech, *p_window;
-
- /* Weighted speech vector */
- Word16 old_wsp[L_FRAME + (PIT_MAX / OPL_DECIM)];
- Word16 *wsp;
-
- /* Excitation vector */
- Word16 old_exc[(L_FRAME + 1) + PIT_MAX + L_INTERPOL];
- Word16 *exc;
-
- /* LPC coefficients */
- Word16 r_h[M + 1], r_l[M + 1]; /* Autocorrelations of windowed speech */
- Word16 rc[M]; /* Reflection coefficients. */
- Word16 Ap[M + 1]; /* A(z) with spectral expansion */
- Word16 ispnew[M]; /* immittance spectral pairs at 4nd sfr */
- Word16 ispnew_q[M]; /* quantized ISPs at 4nd subframe */
- Word16 isf[M]; /* ISF (frequency domain) at 4nd sfr */
- Word16 *p_A, *p_Aq; /* ptr to A(z) for the 4 subframes */
- Word16 A[NB_SUBFR * (M + 1)]; /* A(z) unquantized for the 4 subframes */
- Word16 Aq[NB_SUBFR * (M + 1)]; /* A(z) quantized for the 4 subframes */
-
- /* Other vectors */
- Word16 xn[L_SUBFR]; /* Target vector for pitch search */
- Word16 xn2[L_SUBFR]; /* Target vector for codebook search */
- Word16 dn[L_SUBFR]; /* Correlation between xn2 and h1 */
- Word16 cn[L_SUBFR]; /* Target vector in residual domain */
- Word16 h1[L_SUBFR]; /* Impulse response vector */
- Word16 h2[L_SUBFR]; /* Impulse response vector */
- Word16 code[L_SUBFR]; /* Fixed codebook excitation */
- Word16 y1[L_SUBFR]; /* Filtered adaptive excitation */
- Word16 y2[L_SUBFR]; /* Filtered adaptive excitation */
- Word16 error[M + L_SUBFR]; /* error of quantization */
- Word16 synth[L_SUBFR]; /* 12.8kHz synthesis vector */
- Word16 exc2[L_FRAME]; /* excitation vector */
- Word16 buf[L_FRAME]; /* VAD buffer */
-
- /* Scalars */
- Word32 i, j, i_subfr, select, pit_flag, clip_gain, vad_flag;
- Word16 codec_mode;
- Word16 T_op, T_op2, T0, T0_min, T0_max, T0_frac, index;
- Word16 gain_pit, gain_code, g_coeff[4], g_coeff2[4];
- Word16 tmp, gain1, gain2, exp, Q_new, mu, shift, max;
- Word16 voice_fac;
- Word16 indice[8];
- Word32 L_tmp, L_gain_code, L_max, L_tmp1;
- Word16 code2[L_SUBFR]; /* Fixed codebook excitation */
- Word16 stab_fac, fac, gain_code_lo;
-
- Word16 corr_gain;
- Word16 *vo_p0, *vo_p1, *vo_p2, *vo_p3;
-
- st = (Coder_State *) spe_state;
-
- *ser_size = nb_of_bits[*mode];
- codec_mode = *mode;
-
- /*--------------------------------------------------------------------------*
- * Initialize pointers to speech vector. *
- * *
- * *
- * |-------|-------|-------|-------|-------|-------| *
- * past sp sf1 sf2 sf3 sf4 L_NEXT *
- * <------- Total speech buffer (L_TOTAL) ------> *
- * old_speech *
- * <------- LPC analysis window (L_WINDOW) ------> *
- * | <-- present frame (L_FRAME) ----> *
- * p_window | <----- new speech (L_FRAME) ----> *
- * | | *
- * speech | *
- * new_speech *
- *--------------------------------------------------------------------------*/
-
- new_speech = old_speech + L_TOTAL - L_FRAME - L_FILT; /* New speech */
- speech = old_speech + L_TOTAL - L_FRAME - L_NEXT; /* Present frame */
- p_window = old_speech + L_TOTAL - L_WINDOW;
-
- exc = old_exc + PIT_MAX + L_INTERPOL;
- wsp = old_wsp + (PIT_MAX / OPL_DECIM);
-
- /* copy coder memory state into working space */
- Copy(st->old_speech, old_speech, L_TOTAL - L_FRAME);
- Copy(st->old_wsp, old_wsp, PIT_MAX / OPL_DECIM);
- Copy(st->old_exc, old_exc, PIT_MAX + L_INTERPOL);
-
- /*---------------------------------------------------------------*
- * Down sampling signal from 16kHz to 12.8kHz *
- * -> The signal is extended by L_FILT samples (padded to zero) *
- * to avoid additional delay (L_FILT samples) in the coder. *
- * The last L_FILT samples are approximated after decimation and *
- * are used (and windowed) only in autocorrelations. *
- *---------------------------------------------------------------*/
-
- Decim_12k8(speech16k, L_FRAME16k, new_speech, st->mem_decim);
-
- /* last L_FILT samples for autocorrelation window */
- Copy(st->mem_decim, code, 2 * L_FILT16k);
- Set_zero(error, L_FILT16k); /* set next sample to zero */
- Decim_12k8(error, L_FILT16k, new_speech + L_FRAME, code);
-
- /*---------------------------------------------------------------*
- * Perform 50Hz HP filtering of input signal. *
- *---------------------------------------------------------------*/
-
- HP50_12k8(new_speech, L_FRAME, st->mem_sig_in);
-
- /* last L_FILT samples for autocorrelation window */
- Copy(st->mem_sig_in, code, 6);
- HP50_12k8(new_speech + L_FRAME, L_FILT, code);
-
- /*---------------------------------------------------------------*
- * Perform fixed preemphasis through 1 - g z^-1 *
- * Scale signal to get maximum of precision in filtering *
- *---------------------------------------------------------------*/
-
- mu = PREEMPH_FAC >> 1; /* Q15 --> Q14 */
-
- /* get max of new preemphased samples (L_FRAME+L_FILT) */
- L_tmp = new_speech[0] << 15;
- L_tmp -= (st->mem_preemph * mu)<<1;
- L_max = L_abs(L_tmp);
-
- for (i = 1; i < L_FRAME + L_FILT; i++)
- {
- L_tmp = new_speech[i] << 15;
- L_tmp -= (new_speech[i - 1] * mu)<<1;
- L_tmp = L_abs(L_tmp);
- if(L_tmp > L_max)
- {
- L_max = L_tmp;
- }
- }
-
- /* get scaling factor for new and previous samples */
- /* limit scaling to Q_MAX to keep dynamic for ringing in low signal */
- /* limit scaling to Q_MAX also to avoid a[0]<1 in syn_filt_32 */
- tmp = extract_h(L_max);
- if (tmp == 0)
- {
- shift = Q_MAX;
- } else
- {
- shift = norm_s(tmp) - 1;
- if (shift < 0)
- {
- shift = 0;
- }
- if (shift > Q_MAX)
- {
- shift = Q_MAX;
- }
- }
- Q_new = shift;
- if (Q_new > st->Q_max[0])
- {
- Q_new = st->Q_max[0];
- }
- if (Q_new > st->Q_max[1])
- {
- Q_new = st->Q_max[1];
- }
- exp = (Q_new - st->Q_old);
- st->Q_old = Q_new;
- st->Q_max[1] = st->Q_max[0];
- st->Q_max[0] = shift;
-
- /* preemphasis with scaling (L_FRAME+L_FILT) */
- tmp = new_speech[L_FRAME - 1];
-
- for (i = L_FRAME + L_FILT - 1; i > 0; i--)
- {
- L_tmp = new_speech[i] << 15;
- L_tmp -= (new_speech[i - 1] * mu)<<1;
- L_tmp = (L_tmp << Q_new);
- new_speech[i] = vo_round(L_tmp);
- }
-
- L_tmp = new_speech[0] << 15;
- L_tmp -= (st->mem_preemph * mu)<<1;
- L_tmp = (L_tmp << Q_new);
- new_speech[0] = vo_round(L_tmp);
-
- st->mem_preemph = tmp;
-
- /* scale previous samples and memory */
-
- Scale_sig(old_speech, L_TOTAL - L_FRAME - L_FILT, exp);
- Scale_sig(old_exc, PIT_MAX + L_INTERPOL, exp);
- Scale_sig(st->mem_syn, M, exp);
- Scale_sig(st->mem_decim2, 3, exp);
- Scale_sig(&(st->mem_wsp), 1, exp);
- Scale_sig(&(st->mem_w0), 1, exp);
-
- /*------------------------------------------------------------------------*
- * Call VAD *
- * Preemphesis scale down signal in low frequency and keep dynamic in HF.*
- * Vad work slightly in futur (new_speech = speech + L_NEXT - L_FILT). *
- *------------------------------------------------------------------------*/
- Copy(new_speech, buf, L_FRAME);
+ /* Coder states */
+ Coder_State *st;
+ /* Speech vector */
+ Word16 old_speech[L_TOTAL];
+ Word16 *new_speech, *speech, *p_window;
+
+ /* Weighted speech vector */
+ Word16 old_wsp[L_FRAME + (PIT_MAX / OPL_DECIM)];
+ Word16 *wsp;
+
+ /* Excitation vector */
+ Word16 old_exc[(L_FRAME + 1) + PIT_MAX + L_INTERPOL];
+ Word16 *exc;
+
+ /* LPC coefficients */
+ Word16 r_h[M + 1], r_l[M + 1]; /* Autocorrelations of windowed speech */
+ Word16 rc[M]; /* Reflection coefficients. */
+ Word16 Ap[M + 1]; /* A(z) with spectral expansion */
+ Word16 ispnew[M]; /* immittance spectral pairs at 4nd sfr */
+ Word16 ispnew_q[M]; /* quantized ISPs at 4nd subframe */
+ Word16 isf[M]; /* ISF (frequency domain) at 4nd sfr */
+ Word16 *p_A, *p_Aq; /* ptr to A(z) for the 4 subframes */
+ Word16 A[NB_SUBFR * (M + 1)]; /* A(z) unquantized for the 4 subframes */
+ Word16 Aq[NB_SUBFR * (M + 1)]; /* A(z) quantized for the 4 subframes */
+
+ /* Other vectors */
+ Word16 xn[L_SUBFR]; /* Target vector for pitch search */
+ Word16 xn2[L_SUBFR]; /* Target vector for codebook search */
+ Word16 dn[L_SUBFR]; /* Correlation between xn2 and h1 */
+ Word16 cn[L_SUBFR]; /* Target vector in residual domain */
+ Word16 h1[L_SUBFR]; /* Impulse response vector */
+ Word16 h2[L_SUBFR]; /* Impulse response vector */
+ Word16 code[L_SUBFR]; /* Fixed codebook excitation */
+ Word16 y1[L_SUBFR]; /* Filtered adaptive excitation */
+ Word16 y2[L_SUBFR]; /* Filtered adaptive excitation */
+ Word16 error[M + L_SUBFR]; /* error of quantization */
+ Word16 synth[L_SUBFR]; /* 12.8kHz synthesis vector */
+ Word16 exc2[L_FRAME]; /* excitation vector */
+ Word16 buf[L_FRAME]; /* VAD buffer */
+
+ /* Scalars */
+ Word32 i, j, i_subfr, select, pit_flag, clip_gain, vad_flag;
+ Word16 codec_mode;
+ Word16 T_op, T_op2, T0, T0_min, T0_max, T0_frac, index;
+ Word16 gain_pit, gain_code, g_coeff[4], g_coeff2[4];
+ Word16 tmp, gain1, gain2, exp, Q_new, mu, shift, max;
+ Word16 voice_fac;
+ Word16 indice[8];
+ Word32 L_tmp, L_gain_code, L_max, L_tmp1;
+ Word16 code2[L_SUBFR]; /* Fixed codebook excitation */
+ Word16 stab_fac, fac, gain_code_lo;
+
+ Word16 corr_gain;
+ Word16 *vo_p0, *vo_p1, *vo_p2, *vo_p3;
+
+ st = (Coder_State *) spe_state;
+
+ *ser_size = nb_of_bits[*mode];
+ codec_mode = *mode;
+
+ /*--------------------------------------------------------------------------*
+ * Initialize pointers to speech vector. *
+ * *
+ * *
+ * |-------|-------|-------|-------|-------|-------| *
+ * past sp sf1 sf2 sf3 sf4 L_NEXT *
+ * <------- Total speech buffer (L_TOTAL) ------> *
+ * old_speech *
+ * <------- LPC analysis window (L_WINDOW) ------> *
+ * | <-- present frame (L_FRAME) ----> *
+ * p_window | <----- new speech (L_FRAME) ----> *
+ * | | *
+ * speech | *
+ * new_speech *
+ *--------------------------------------------------------------------------*/
+
+ new_speech = old_speech + L_TOTAL - L_FRAME - L_FILT; /* New speech */
+ speech = old_speech + L_TOTAL - L_FRAME - L_NEXT; /* Present frame */
+ p_window = old_speech + L_TOTAL - L_WINDOW;
+
+ exc = old_exc + PIT_MAX + L_INTERPOL;
+ wsp = old_wsp + (PIT_MAX / OPL_DECIM);
+
+ /* copy coder memory state into working space */
+ Copy(st->old_speech, old_speech, L_TOTAL - L_FRAME);
+ Copy(st->old_wsp, old_wsp, PIT_MAX / OPL_DECIM);
+ Copy(st->old_exc, old_exc, PIT_MAX + L_INTERPOL);
+
+ /*---------------------------------------------------------------*
+ * Down sampling signal from 16kHz to 12.8kHz *
+ * -> The signal is extended by L_FILT samples (padded to zero) *
+ * to avoid additional delay (L_FILT samples) in the coder. *
+ * The last L_FILT samples are approximated after decimation and *
+ * are used (and windowed) only in autocorrelations. *
+ *---------------------------------------------------------------*/
+
+ Decim_12k8(speech16k, L_FRAME16k, new_speech, st->mem_decim);
+
+ /* last L_FILT samples for autocorrelation window */
+ Copy(st->mem_decim, code, 2 * L_FILT16k);
+ Set_zero(error, L_FILT16k); /* set next sample to zero */
+ Decim_12k8(error, L_FILT16k, new_speech + L_FRAME, code);
+
+ /*---------------------------------------------------------------*
+ * Perform 50Hz HP filtering of input signal. *
+ *---------------------------------------------------------------*/
+
+ HP50_12k8(new_speech, L_FRAME, st->mem_sig_in);
+
+ /* last L_FILT samples for autocorrelation window */
+ Copy(st->mem_sig_in, code, 6);
+ HP50_12k8(new_speech + L_FRAME, L_FILT, code);
+
+ /*---------------------------------------------------------------*
+ * Perform fixed preemphasis through 1 - g z^-1 *
+ * Scale signal to get maximum of precision in filtering *
+ *---------------------------------------------------------------*/
+
+ mu = PREEMPH_FAC >> 1; /* Q15 --> Q14 */
+
+ /* get max of new preemphased samples (L_FRAME+L_FILT) */
+ L_tmp = new_speech[0] << 15;
+ L_tmp -= (st->mem_preemph * mu)<<1;
+ L_max = L_abs(L_tmp);
+
+ for (i = 1; i < L_FRAME + L_FILT; i++)
+ {
+ L_tmp = new_speech[i] << 15;
+ L_tmp -= (new_speech[i - 1] * mu)<<1;
+ L_tmp = L_abs(L_tmp);
+ if(L_tmp > L_max)
+ {
+ L_max = L_tmp;
+ }
+ }
+
+ /* get scaling factor for new and previous samples */
+ /* limit scaling to Q_MAX to keep dynamic for ringing in low signal */
+ /* limit scaling to Q_MAX also to avoid a[0]<1 in syn_filt_32 */
+ tmp = extract_h(L_max);
+ if (tmp == 0)
+ {
+ shift = Q_MAX;
+ } else
+ {
+ shift = norm_s(tmp) - 1;
+ if (shift < 0)
+ {
+ shift = 0;
+ }
+ if (shift > Q_MAX)
+ {
+ shift = Q_MAX;
+ }
+ }
+ Q_new = shift;
+ if (Q_new > st->Q_max[0])
+ {
+ Q_new = st->Q_max[0];
+ }
+ if (Q_new > st->Q_max[1])
+ {
+ Q_new = st->Q_max[1];
+ }
+ exp = (Q_new - st->Q_old);
+ st->Q_old = Q_new;
+ st->Q_max[1] = st->Q_max[0];
+ st->Q_max[0] = shift;
+
+ /* preemphasis with scaling (L_FRAME+L_FILT) */
+ tmp = new_speech[L_FRAME - 1];
+
+ for (i = L_FRAME + L_FILT - 1; i > 0; i--)
+ {
+ L_tmp = new_speech[i] << 15;
+ L_tmp -= (new_speech[i - 1] * mu)<<1;
+ L_tmp = (L_tmp << Q_new);
+ new_speech[i] = vo_round(L_tmp);
+ }
+
+ L_tmp = new_speech[0] << 15;
+ L_tmp -= (st->mem_preemph * mu)<<1;
+ L_tmp = (L_tmp << Q_new);
+ new_speech[0] = vo_round(L_tmp);
+
+ st->mem_preemph = tmp;
+
+ /* scale previous samples and memory */
+
+ Scale_sig(old_speech, L_TOTAL - L_FRAME - L_FILT, exp);
+ Scale_sig(old_exc, PIT_MAX + L_INTERPOL, exp);
+ Scale_sig(st->mem_syn, M, exp);
+ Scale_sig(st->mem_decim2, 3, exp);
+ Scale_sig(&(st->mem_wsp), 1, exp);
+ Scale_sig(&(st->mem_w0), 1, exp);
+
+ /*------------------------------------------------------------------------*
+ * Call VAD *
+ * Preemphesis scale down signal in low frequency and keep dynamic in HF.*
+ * Vad work slightly in futur (new_speech = speech + L_NEXT - L_FILT). *
+ *------------------------------------------------------------------------*/
+ Copy(new_speech, buf, L_FRAME);
#ifdef ASM_OPT /* asm optimization branch */
- Scale_sig_opt(buf, L_FRAME, 1 - Q_new);
+ Scale_sig_opt(buf, L_FRAME, 1 - Q_new);
#else
- Scale_sig(buf, L_FRAME, 1 - Q_new);
+ Scale_sig(buf, L_FRAME, 1 - Q_new);
#endif
- vad_flag = wb_vad(st->vadSt, buf); /* Voice Activity Detection */
- if (vad_flag == 0)
- {
- st->vad_hist = (st->vad_hist + 1);
- } else
- {
- st->vad_hist = 0;
- }
-
- /* DTX processing */
- if (allow_dtx != 0)
- {
- /* Note that mode may change here */
- tx_dtx_handler(st->dtx_encSt, vad_flag, mode);
- *ser_size = nb_of_bits[*mode];
- }
-
- if(*mode != MRDTX)
- {
- Parm_serial(vad_flag, 1, &prms);
- }
- /*------------------------------------------------------------------------*
- * Perform LPC analysis *
- * ~~~~~~~~~~~~~~~~~~~~ *
- * - autocorrelation + lag windowing *
- * - Levinson-durbin algorithm to find a[] *
- * - convert a[] to isp[] *
- * - convert isp[] to isf[] for quantization *
- * - quantize and code the isf[] *
- * - convert isf[] to isp[] for interpolation *
- * - find the interpolated ISPs and convert to a[] for the 4 subframes *
- *------------------------------------------------------------------------*/
-
- /* LP analysis centered at 4nd subframe */
- Autocorr(p_window, M, r_h, r_l); /* Autocorrelations */
- Lag_window(r_h, r_l); /* Lag windowing */
- Levinson(r_h, r_l, A, rc, st->mem_levinson); /* Levinson Durbin */
- Az_isp(A, ispnew, st->ispold); /* From A(z) to ISP */
-
- /* Find the interpolated ISPs and convert to a[] for all subframes */
- Int_isp(st->ispold, ispnew, interpol_frac, A);
-
- /* update ispold[] for the next frame */
- Copy(ispnew, st->ispold, M);
-
- /* Convert ISPs to frequency domain 0..6400 */
- Isp_isf(ispnew, isf, M);
-
- /* check resonance for pitch clipping algorithm */
- Gp_clip_test_isf(isf, st->gp_clip);
-
- /*----------------------------------------------------------------------*
- * Perform PITCH_OL analysis *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~ *
- * - Find the residual res[] for the whole speech frame *
- * - Find the weighted input speech wsp[] for the whole speech frame *
- * - scale wsp[] to avoid overflow in pitch estimation *
- * - Find open loop pitch lag for whole speech frame *
- *----------------------------------------------------------------------*/
- p_A = A;
- for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
- {
- /* Weighting of LPC coefficients */
- Weight_a(p_A, Ap, GAMMA1, M);
+ vad_flag = wb_vad(st->vadSt, buf); /* Voice Activity Detection */
+ if (vad_flag == 0)
+ {
+ st->vad_hist = (st->vad_hist + 1);
+ } else
+ {
+ st->vad_hist = 0;
+ }
+
+ /* DTX processing */
+ if (allow_dtx != 0)
+ {
+ /* Note that mode may change here */
+ tx_dtx_handler(st->dtx_encSt, vad_flag, mode);
+ *ser_size = nb_of_bits[*mode];
+ }
+
+ if(*mode != MRDTX)
+ {
+ Parm_serial(vad_flag, 1, &prms);
+ }
+ /*------------------------------------------------------------------------*
+ * Perform LPC analysis *
+ * ~~~~~~~~~~~~~~~~~~~~ *
+ * - autocorrelation + lag windowing *
+ * - Levinson-durbin algorithm to find a[] *
+ * - convert a[] to isp[] *
+ * - convert isp[] to isf[] for quantization *
+ * - quantize and code the isf[] *
+ * - convert isf[] to isp[] for interpolation *
+ * - find the interpolated ISPs and convert to a[] for the 4 subframes *
+ *------------------------------------------------------------------------*/
+
+ /* LP analysis centered at 4nd subframe */
+ Autocorr(p_window, M, r_h, r_l); /* Autocorrelations */
+ Lag_window(r_h, r_l); /* Lag windowing */
+ Levinson(r_h, r_l, A, rc, st->mem_levinson); /* Levinson Durbin */
+ Az_isp(A, ispnew, st->ispold); /* From A(z) to ISP */
+
+ /* Find the interpolated ISPs and convert to a[] for all subframes */
+ Int_isp(st->ispold, ispnew, interpol_frac, A);
+
+ /* update ispold[] for the next frame */
+ Copy(ispnew, st->ispold, M);
+
+ /* Convert ISPs to frequency domain 0..6400 */
+ Isp_isf(ispnew, isf, M);
+
+ /* check resonance for pitch clipping algorithm */
+ Gp_clip_test_isf(isf, st->gp_clip);
+
+ /*----------------------------------------------------------------------*
+ * Perform PITCH_OL analysis *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~ *
+ * - Find the residual res[] for the whole speech frame *
+ * - Find the weighted input speech wsp[] for the whole speech frame *
+ * - scale wsp[] to avoid overflow in pitch estimation *
+ * - Find open loop pitch lag for whole speech frame *
+ *----------------------------------------------------------------------*/
+ p_A = A;
+ for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+ {
+ /* Weighting of LPC coefficients */
+ Weight_a(p_A, Ap, GAMMA1, M);
#ifdef ASM_OPT /* asm optimization branch */
- Residu_opt(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
+ Residu_opt(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
#else
- Residu(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
+ Residu(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
#endif
- p_A += (M + 1);
- }
-
- Deemph2(wsp, TILT_FAC, L_FRAME, &(st->mem_wsp));
-
- /* find maximum value on wsp[] for 12 bits scaling */
- max = 0;
- for (i = 0; i < L_FRAME; i++)
- {
- tmp = abs_s(wsp[i]);
- if(tmp > max)
- {
- max = tmp;
- }
- }
- tmp = st->old_wsp_max;
- if(max > tmp)
- {
- tmp = max; /* tmp = max(wsp_max, old_wsp_max) */
- }
- st->old_wsp_max = max;
-
- shift = norm_s(tmp) - 3;
- if (shift > 0)
- {
- shift = 0; /* shift = 0..-3 */
- }
- /* decimation of wsp[] to search pitch in LF and to reduce complexity */
- LP_Decim2(wsp, L_FRAME, st->mem_decim2);
-
- /* scale wsp[] in 12 bits to avoid overflow */
+ p_A += (M + 1);
+ }
+
+ Deemph2(wsp, TILT_FAC, L_FRAME, &(st->mem_wsp));
+
+ /* find maximum value on wsp[] for 12 bits scaling */
+ max = 0;
+ for (i = 0; i < L_FRAME; i++)
+ {
+ tmp = abs_s(wsp[i]);
+ if(tmp > max)
+ {
+ max = tmp;
+ }
+ }
+ tmp = st->old_wsp_max;
+ if(max > tmp)
+ {
+ tmp = max; /* tmp = max(wsp_max, old_wsp_max) */
+ }
+ st->old_wsp_max = max;
+
+ shift = norm_s(tmp) - 3;
+ if (shift > 0)
+ {
+ shift = 0; /* shift = 0..-3 */
+ }
+ /* decimation of wsp[] to search pitch in LF and to reduce complexity */
+ LP_Decim2(wsp, L_FRAME, st->mem_decim2);
+
+ /* scale wsp[] in 12 bits to avoid overflow */
#ifdef ASM_OPT /* asm optimization branch */
- Scale_sig_opt(wsp, L_FRAME / OPL_DECIM, shift);
+ Scale_sig_opt(wsp, L_FRAME / OPL_DECIM, shift);
#else
- Scale_sig(wsp, L_FRAME / OPL_DECIM, shift);
+ Scale_sig(wsp, L_FRAME / OPL_DECIM, shift);
#endif
- /* scale old_wsp (warning: exp must be Q_new-Q_old) */
- exp = exp + (shift - st->old_wsp_shift);
- st->old_wsp_shift = shift;
-
- Scale_sig(old_wsp, PIT_MAX / OPL_DECIM, exp);
- Scale_sig(st->old_hp_wsp, PIT_MAX / OPL_DECIM, exp);
-
- scale_mem_Hp_wsp(st->hp_wsp_mem, exp);
-
- /* Find open loop pitch lag for whole speech frame */
-
- if(*ser_size == NBBITS_7k)
- {
- /* Find open loop pitch lag for whole speech frame */
- T_op = Pitch_med_ol(wsp, st, L_FRAME / OPL_DECIM);
- } else
- {
- /* Find open loop pitch lag for first 1/2 frame */
- T_op = Pitch_med_ol(wsp, st, (L_FRAME/2) / OPL_DECIM);
- }
-
- if(st->ol_gain > 19661) /* 0.6 in Q15 */
- {
- st->old_T0_med = Med_olag(T_op, st->old_ol_lag);
- st->ada_w = 32767;
- } else
- {
- st->ada_w = vo_mult(st->ada_w, 29491);
- }
-
- if(st->ada_w < 26214)
- st->ol_wght_flg = 0;
- else
- st->ol_wght_flg = 1;
-
- wb_vad_tone_detection(st->vadSt, st->ol_gain);
- T_op *= OPL_DECIM;
-
- if(*ser_size != NBBITS_7k)
- {
- /* Find open loop pitch lag for second 1/2 frame */
- T_op2 = Pitch_med_ol(wsp + ((L_FRAME / 2) / OPL_DECIM), st, (L_FRAME/2) / OPL_DECIM);
-
- if(st->ol_gain > 19661) /* 0.6 in Q15 */
- {
- st->old_T0_med = Med_olag(T_op2, st->old_ol_lag);
- st->ada_w = 32767;
- } else
- {
- st->ada_w = mult(st->ada_w, 29491);
- }
-
- if(st->ada_w < 26214)
- st->ol_wght_flg = 0;
- else
- st->ol_wght_flg = 1;
-
- wb_vad_tone_detection(st->vadSt, st->ol_gain);
-
- T_op2 *= OPL_DECIM;
-
- } else
- {
- T_op2 = T_op;
- }
- /*----------------------------------------------------------------------*
- * DTX-CNG *
- *----------------------------------------------------------------------*/
- if(*mode == MRDTX) /* CNG mode */
- {
- /* Buffer isf's and energy */
+ /* scale old_wsp (warning: exp must be Q_new-Q_old) */
+ exp = exp + (shift - st->old_wsp_shift);
+ st->old_wsp_shift = shift;
+
+ Scale_sig(old_wsp, PIT_MAX / OPL_DECIM, exp);
+ Scale_sig(st->old_hp_wsp, PIT_MAX / OPL_DECIM, exp);
+
+ scale_mem_Hp_wsp(st->hp_wsp_mem, exp);
+
+ /* Find open loop pitch lag for whole speech frame */
+
+ if(*ser_size == NBBITS_7k)
+ {
+ /* Find open loop pitch lag for whole speech frame */
+ T_op = Pitch_med_ol(wsp, st, L_FRAME / OPL_DECIM);
+ } else
+ {
+ /* Find open loop pitch lag for first 1/2 frame */
+ T_op = Pitch_med_ol(wsp, st, (L_FRAME/2) / OPL_DECIM);
+ }
+
+ if(st->ol_gain > 19661) /* 0.6 in Q15 */
+ {
+ st->old_T0_med = Med_olag(T_op, st->old_ol_lag);
+ st->ada_w = 32767;
+ } else
+ {
+ st->ada_w = vo_mult(st->ada_w, 29491);
+ }
+
+ if(st->ada_w < 26214)
+ st->ol_wght_flg = 0;
+ else
+ st->ol_wght_flg = 1;
+
+ wb_vad_tone_detection(st->vadSt, st->ol_gain);
+ T_op *= OPL_DECIM;
+
+ if(*ser_size != NBBITS_7k)
+ {
+ /* Find open loop pitch lag for second 1/2 frame */
+ T_op2 = Pitch_med_ol(wsp + ((L_FRAME / 2) / OPL_DECIM), st, (L_FRAME/2) / OPL_DECIM);
+
+ if(st->ol_gain > 19661) /* 0.6 in Q15 */
+ {
+ st->old_T0_med = Med_olag(T_op2, st->old_ol_lag);
+ st->ada_w = 32767;
+ } else
+ {
+ st->ada_w = mult(st->ada_w, 29491);
+ }
+
+ if(st->ada_w < 26214)
+ st->ol_wght_flg = 0;
+ else
+ st->ol_wght_flg = 1;
+
+ wb_vad_tone_detection(st->vadSt, st->ol_gain);
+
+ T_op2 *= OPL_DECIM;
+
+ } else
+ {
+ T_op2 = T_op;
+ }
+ /*----------------------------------------------------------------------*
+ * DTX-CNG *
+ *----------------------------------------------------------------------*/
+ if(*mode == MRDTX) /* CNG mode */
+ {
+ /* Buffer isf's and energy */
#ifdef ASM_OPT /* asm optimization branch */
- Residu_opt(&A[3 * (M + 1)], speech, exc, L_FRAME);
+ Residu_opt(&A[3 * (M + 1)], speech, exc, L_FRAME);
#else
- Residu(&A[3 * (M + 1)], speech, exc, L_FRAME);
+ Residu(&A[3 * (M + 1)], speech, exc, L_FRAME);
#endif
- for (i = 0; i < L_FRAME; i++)
- {
- exc2[i] = shr(exc[i], Q_new);
- }
+ for (i = 0; i < L_FRAME; i++)
+ {
+ exc2[i] = shr(exc[i], Q_new);
+ }
- L_tmp = 0;
- for (i = 0; i < L_FRAME; i++)
- L_tmp += (exc2[i] * exc2[i])<<1;
-
- L_tmp >>= 1;
-
- dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
-
- /* Quantize and code the ISFs */
- dtx_enc(st->dtx_encSt, isf, exc2, &prms);
-
- /* Convert ISFs to the cosine domain */
- Isf_isp(isf, ispnew_q, M);
- Isp_Az(ispnew_q, Aq, M, 0);
-
- for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
- {
- corr_gain = synthesis(Aq, &exc2[i_subfr], 0, &speech16k[i_subfr * 5 / 4], st);
- }
- Copy(isf, st->isfold, M);
-
- /* reset speech coder memories */
- Reset_encoder(st, 0);
-
- /*--------------------------------------------------*
- * Update signal for next frame. *
- * -> save past of speech[] and wsp[]. *
- *--------------------------------------------------*/
-
- Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
- Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
-
- return;
- }
- /*----------------------------------------------------------------------*
- * ACELP *
- *----------------------------------------------------------------------*/
-
- /* Quantize and code the ISFs */
-
- if (*ser_size <= NBBITS_7k)
- {
- Qpisf_2s_36b(isf, isf, st->past_isfq, indice, 4);
-
- Parm_serial(indice[0], 8, &prms);
- Parm_serial(indice[1], 8, &prms);
- Parm_serial(indice[2], 7, &prms);
- Parm_serial(indice[3], 7, &prms);
- Parm_serial(indice[4], 6, &prms);
- } else
- {
- Qpisf_2s_46b(isf, isf, st->past_isfq, indice, 4);
-
- Parm_serial(indice[0], 8, &prms);
- Parm_serial(indice[1], 8, &prms);
- Parm_serial(indice[2], 6, &prms);
- Parm_serial(indice[3], 7, &prms);
- Parm_serial(indice[4], 7, &prms);
- Parm_serial(indice[5], 5, &prms);
- Parm_serial(indice[6], 5, &prms);
- }
-
- /* Check stability on isf : distance between old isf and current isf */
-
- L_tmp = 0;
- for (i = 0; i < M - 1; i++)
- {
- tmp = vo_sub(isf[i], st->isfold[i]);
- L_tmp += (tmp * tmp)<<1;
- }
-
- tmp = extract_h(L_shl2(L_tmp, 8));
-
- tmp = vo_mult(tmp, 26214); /* tmp = L_tmp*0.8/256 */
- tmp = vo_sub(20480, tmp); /* 1.25 - tmp (in Q14) */
-
- stab_fac = shl(tmp, 1);
-
- if (stab_fac < 0)
- {
- stab_fac = 0;
- }
- Copy(isf, st->isfold, M);
-
- /* Convert ISFs to the cosine domain */
- Isf_isp(isf, ispnew_q, M);
-
- if (st->first_frame != 0)
- {
- st->first_frame = 0;
- Copy(ispnew_q, st->ispold_q, M);
- }
- /* Find the interpolated ISPs and convert to a[] for all subframes */
-
- Int_isp(st->ispold_q, ispnew_q, interpol_frac, Aq);
-
- /* update ispold[] for the next frame */
- Copy(ispnew_q, st->ispold_q, M);
-
- p_Aq = Aq;
- for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
- {
+ L_tmp = 0;
+ for (i = 0; i < L_FRAME; i++)
+ L_tmp += (exc2[i] * exc2[i])<<1;
+
+ L_tmp >>= 1;
+
+ dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
+
+ /* Quantize and code the ISFs */
+ dtx_enc(st->dtx_encSt, isf, exc2, &prms);
+
+ /* Convert ISFs to the cosine domain */
+ Isf_isp(isf, ispnew_q, M);
+ Isp_Az(ispnew_q, Aq, M, 0);
+
+ for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+ {
+ corr_gain = synthesis(Aq, &exc2[i_subfr], 0, &speech16k[i_subfr * 5 / 4], st);
+ }
+ Copy(isf, st->isfold, M);
+
+ /* reset speech coder memories */
+ Reset_encoder(st, 0);
+
+ /*--------------------------------------------------*
+ * Update signal for next frame. *
+ * -> save past of speech[] and wsp[]. *
+ *--------------------------------------------------*/
+
+ Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
+ Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
+
+ return;
+ }
+ /*----------------------------------------------------------------------*
+ * ACELP *
+ *----------------------------------------------------------------------*/
+
+ /* Quantize and code the ISFs */
+
+ if (*ser_size <= NBBITS_7k)
+ {
+ Qpisf_2s_36b(isf, isf, st->past_isfq, indice, 4);
+
+ Parm_serial(indice[0], 8, &prms);
+ Parm_serial(indice[1], 8, &prms);
+ Parm_serial(indice[2], 7, &prms);
+ Parm_serial(indice[3], 7, &prms);
+ Parm_serial(indice[4], 6, &prms);
+ } else
+ {
+ Qpisf_2s_46b(isf, isf, st->past_isfq, indice, 4);
+
+ Parm_serial(indice[0], 8, &prms);
+ Parm_serial(indice[1], 8, &prms);
+ Parm_serial(indice[2], 6, &prms);
+ Parm_serial(indice[3], 7, &prms);
+ Parm_serial(indice[4], 7, &prms);
+ Parm_serial(indice[5], 5, &prms);
+ Parm_serial(indice[6], 5, &prms);
+ }
+
+ /* Check stability on isf : distance between old isf and current isf */
+
+ L_tmp = 0;
+ for (i = 0; i < M - 1; i++)
+ {
+ tmp = vo_sub(isf[i], st->isfold[i]);
+ L_tmp += (tmp * tmp)<<1;
+ }
+
+ tmp = extract_h(L_shl2(L_tmp, 8));
+
+ tmp = vo_mult(tmp, 26214); /* tmp = L_tmp*0.8/256 */
+ tmp = vo_sub(20480, tmp); /* 1.25 - tmp (in Q14) */
+
+ stab_fac = shl(tmp, 1);
+
+ if (stab_fac < 0)
+ {
+ stab_fac = 0;
+ }
+ Copy(isf, st->isfold, M);
+
+ /* Convert ISFs to the cosine domain */
+ Isf_isp(isf, ispnew_q, M);
+
+ if (st->first_frame != 0)
+ {
+ st->first_frame = 0;
+ Copy(ispnew_q, st->ispold_q, M);
+ }
+ /* Find the interpolated ISPs and convert to a[] for all subframes */
+
+ Int_isp(st->ispold_q, ispnew_q, interpol_frac, Aq);
+
+ /* update ispold[] for the next frame */
+ Copy(ispnew_q, st->ispold_q, M);
+
+ p_Aq = Aq;
+ for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+ {
#ifdef ASM_OPT /* asm optimization branch */
- Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+ Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
#else
- Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+ Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
#endif
- p_Aq += (M + 1);
- }
-
- /* Buffer isf's and energy for dtx on non-speech frame */
- if (vad_flag == 0)
- {
- for (i = 0; i < L_FRAME; i++)
- {
- exc2[i] = exc[i] >> Q_new;
- }
- L_tmp = 0;
- for (i = 0; i < L_FRAME; i++)
- L_tmp += (exc2[i] * exc2[i])<<1;
- L_tmp >>= 1;
-
- dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
- }
- /* range for closed loop pitch search in 1st subframe */
-
- T0_min = T_op - 8;
- if (T0_min < PIT_MIN)
- {
- T0_min = PIT_MIN;
- }
- T0_max = (T0_min + 15);
-
- if(T0_max > PIT_MAX)
- {
- T0_max = PIT_MAX;
- T0_min = T0_max - 15;
- }
- /*------------------------------------------------------------------------*
- * Loop for every subframe in the analysis frame *
- *------------------------------------------------------------------------*
- * To find the pitch and innovation parameters. The subframe size is *
- * L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times. *
- * - compute the target signal for pitch search *
- * - compute impulse response of weighted synthesis filter (h1[]) *
- * - find the closed-loop pitch parameters *
- * - encode the pitch dealy *
- * - find 2 lt prediction (with / without LP filter for lt pred) *
- * - find 2 pitch gains and choose the best lt prediction. *
- * - find target vector for codebook search *
- * - update the impulse response h1[] for codebook search *
- * - correlation between target vector and impulse response *
- * - codebook search and encoding *
- * - VQ of pitch and codebook gains *
- * - find voicing factor and tilt of code for next subframe. *
- * - update states of weighting filter *
- * - find excitation and synthesis speech *
- *------------------------------------------------------------------------*/
- p_A = A;
- p_Aq = Aq;
- for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
- {
- pit_flag = i_subfr;
- if ((i_subfr == 2 * L_SUBFR) && (*ser_size > NBBITS_7k))
- {
- pit_flag = 0;
- /* range for closed loop pitch search in 3rd subframe */
- T0_min = (T_op2 - 8);
-
- if (T0_min < PIT_MIN)
- {
- T0_min = PIT_MIN;
- }
- T0_max = (T0_min + 15);
- if (T0_max > PIT_MAX)
- {
- T0_max = PIT_MAX;
- T0_min = (T0_max - 15);
- }
- }
- /*-----------------------------------------------------------------------*
- * *
- * Find the target vector for pitch search: *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *
- * *
- * |------| res[n] *
- * speech[n]---| A(z) |-------- *
- * |------| | |--------| error[n] |------| *
- * zero -- (-)--| 1/A(z) |-----------| W(z) |-- target *
- * exc |--------| |------| *
- * *
- * Instead of subtracting the zero-input response of filters from *
- * the weighted input speech, the above configuration is used to *
- * compute the target vector. *
- * *
- *-----------------------------------------------------------------------*/
-
- for (i = 0; i < M; i++)
- {
- error[i] = vo_sub(speech[i + i_subfr - M], st->mem_syn[i]);
- }
+ p_Aq += (M + 1);
+ }
+
+ /* Buffer isf's and energy for dtx on non-speech frame */
+ if (vad_flag == 0)
+ {
+ for (i = 0; i < L_FRAME; i++)
+ {
+ exc2[i] = exc[i] >> Q_new;
+ }
+ L_tmp = 0;
+ for (i = 0; i < L_FRAME; i++) {
+ Word32 tmp = L_mult(exc2[i], exc2[i]); // (exc2[i] * exc2[i])<<1;
+ L_tmp = L_add(L_tmp, tmp);
+ }
+ L_tmp >>= 1;
+
+ dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
+ }
+ /* range for closed loop pitch search in 1st subframe */
+
+ T0_min = T_op - 8;
+ if (T0_min < PIT_MIN)
+ {
+ T0_min = PIT_MIN;
+ }
+ T0_max = (T0_min + 15);
+
+ if(T0_max > PIT_MAX)
+ {
+ T0_max = PIT_MAX;
+ T0_min = T0_max - 15;
+ }
+ /*------------------------------------------------------------------------*
+ * Loop for every subframe in the analysis frame *
+ *------------------------------------------------------------------------*
+ * To find the pitch and innovation parameters. The subframe size is *
+ * L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times. *
+ * - compute the target signal for pitch search *
+ * - compute impulse response of weighted synthesis filter (h1[]) *
+ * - find the closed-loop pitch parameters *
+ * - encode the pitch dealy *
+ * - find 2 lt prediction (with / without LP filter for lt pred) *
+ * - find 2 pitch gains and choose the best lt prediction. *
+ * - find target vector for codebook search *
+ * - update the impulse response h1[] for codebook search *
+ * - correlation between target vector and impulse response *
+ * - codebook search and encoding *
+ * - VQ of pitch and codebook gains *
+ * - find voicing factor and tilt of code for next subframe. *
+ * - update states of weighting filter *
+ * - find excitation and synthesis speech *
+ *------------------------------------------------------------------------*/
+ p_A = A;
+ p_Aq = Aq;
+ for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+ {
+ pit_flag = i_subfr;
+ if ((i_subfr == 2 * L_SUBFR) && (*ser_size > NBBITS_7k))
+ {
+ pit_flag = 0;
+ /* range for closed loop pitch search in 3rd subframe */
+ T0_min = (T_op2 - 8);
+
+ if (T0_min < PIT_MIN)
+ {
+ T0_min = PIT_MIN;
+ }
+ T0_max = (T0_min + 15);
+ if (T0_max > PIT_MAX)
+ {
+ T0_max = PIT_MAX;
+ T0_min = (T0_max - 15);
+ }
+ }
+ /*-----------------------------------------------------------------------*
+ * *
+ * Find the target vector for pitch search: *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *
+ * *
+ * |------| res[n] *
+ * speech[n]---| A(z) |-------- *
+ * |------| | |--------| error[n] |------| *
+ * zero -- (-)--| 1/A(z) |-----------| W(z) |-- target *
+ * exc |--------| |------| *
+ * *
+ * Instead of subtracting the zero-input response of filters from *
+ * the weighted input speech, the above configuration is used to *
+ * compute the target vector. *
+ * *
+ *-----------------------------------------------------------------------*/
+
+ for (i = 0; i < M; i++)
+ {
+ error[i] = vo_sub(speech[i + i_subfr - M], st->mem_syn[i]);
+ }
#ifdef ASM_OPT /* asm optimization branch */
- Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+ Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
#else
- Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+ Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
#endif
- Syn_filt(p_Aq, &exc[i_subfr], error + M, L_SUBFR, error, 0);
- Weight_a(p_A, Ap, GAMMA1, M);
+ Syn_filt(p_Aq, &exc[i_subfr], error + M, L_SUBFR, error, 0);
+ Weight_a(p_A, Ap, GAMMA1, M);
#ifdef ASM_OPT /* asm optimization branch */
- Residu_opt(Ap, error + M, xn, L_SUBFR);
+ Residu_opt(Ap, error + M, xn, L_SUBFR);
#else
- Residu(Ap, error + M, xn, L_SUBFR);
+ Residu(Ap, error + M, xn, L_SUBFR);
#endif
- Deemph2(xn, TILT_FAC, L_SUBFR, &(st->mem_w0));
-
- /*----------------------------------------------------------------------*
- * Find approx. target in residual domain "cn[]" for inovation search. *
- *----------------------------------------------------------------------*/
- /* first half: xn[] --> cn[] */
- Set_zero(code, M);
- Copy(xn, code + M, L_SUBFR / 2);
- tmp = 0;
- Preemph2(code + M, TILT_FAC, L_SUBFR / 2, &tmp);
- Weight_a(p_A, Ap, GAMMA1, M);
- Syn_filt(Ap,code + M, code + M, L_SUBFR / 2, code, 0);
+ Deemph2(xn, TILT_FAC, L_SUBFR, &(st->mem_w0));
+
+ /*----------------------------------------------------------------------*
+ * Find approx. target in residual domain "cn[]" for inovation search. *
+ *----------------------------------------------------------------------*/
+ /* first half: xn[] --> cn[] */
+ Set_zero(code, M);
+ Copy(xn, code + M, L_SUBFR / 2);
+ tmp = 0;
+ Preemph2(code + M, TILT_FAC, L_SUBFR / 2, &tmp);
+ Weight_a(p_A, Ap, GAMMA1, M);
+ Syn_filt(Ap,code + M, code + M, L_SUBFR / 2, code, 0);
#ifdef ASM_OPT /* asm optimization branch */
- Residu_opt(p_Aq,code + M, cn, L_SUBFR / 2);
+ Residu_opt(p_Aq,code + M, cn, L_SUBFR / 2);
#else
- Residu(p_Aq,code + M, cn, L_SUBFR / 2);
+ Residu(p_Aq,code + M, cn, L_SUBFR / 2);
#endif
- /* second half: res[] --> cn[] (approximated and faster) */
- Copy(&exc[i_subfr + (L_SUBFR / 2)], cn + (L_SUBFR / 2), L_SUBFR / 2);
-
- /*---------------------------------------------------------------*
- * Compute impulse response, h1[], of weighted synthesis filter *
- *---------------------------------------------------------------*/
-
- Set_zero(error, M + L_SUBFR);
- Weight_a(p_A, error + M, GAMMA1, M);
-
- vo_p0 = error+M;
- vo_p3 = h1;
- for (i = 0; i < L_SUBFR; i++)
- {
- L_tmp = *vo_p0 << 14; /* x4 (Q12 to Q14) */
- vo_p1 = p_Aq + 1;
- vo_p2 = vo_p0-1;
- for (j = 1; j <= M/4; j++)
- {
- L_tmp -= *vo_p1++ * *vo_p2--;
- L_tmp -= *vo_p1++ * *vo_p2--;
- L_tmp -= *vo_p1++ * *vo_p2--;
- L_tmp -= *vo_p1++ * *vo_p2--;
- }
- *vo_p3++ = *vo_p0++ = vo_round((L_tmp <<4));
- }
- /* deemph without division by 2 -> Q14 to Q15 */
- tmp = 0;
- Deemph2(h1, TILT_FAC, L_SUBFR, &tmp); /* h1 in Q14 */
-
- /* h2 in Q12 for codebook search */
- Copy(h1, h2, L_SUBFR);
-
- /*---------------------------------------------------------------*
- * scale xn[] and h1[] to avoid overflow in dot_product12() *
- *---------------------------------------------------------------*/
+ /* second half: res[] --> cn[] (approximated and faster) */
+ Copy(&exc[i_subfr + (L_SUBFR / 2)], cn + (L_SUBFR / 2), L_SUBFR / 2);
+
+ /*---------------------------------------------------------------*
+ * Compute impulse response, h1[], of weighted synthesis filter *
+ *---------------------------------------------------------------*/
+
+ Set_zero(error, M + L_SUBFR);
+ Weight_a(p_A, error + M, GAMMA1, M);
+
+ vo_p0 = error+M;
+ vo_p3 = h1;
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ L_tmp = *vo_p0 << 14; /* x4 (Q12 to Q14) */
+ vo_p1 = p_Aq + 1;
+ vo_p2 = vo_p0-1;
+ for (j = 1; j <= M/4; j++)
+ {
+ L_tmp -= *vo_p1++ * *vo_p2--;
+ L_tmp -= *vo_p1++ * *vo_p2--;
+ L_tmp -= *vo_p1++ * *vo_p2--;
+ L_tmp -= *vo_p1++ * *vo_p2--;
+ }
+ *vo_p3++ = *vo_p0++ = vo_round((L_tmp <<4));
+ }
+ /* deemph without division by 2 -> Q14 to Q15 */
+ tmp = 0;
+ Deemph2(h1, TILT_FAC, L_SUBFR, &tmp); /* h1 in Q14 */
+
+ /* h2 in Q12 for codebook search */
+ Copy(h1, h2, L_SUBFR);
+
+ /*---------------------------------------------------------------*
+ * scale xn[] and h1[] to avoid overflow in dot_product12() *
+ *---------------------------------------------------------------*/
#ifdef ASM_OPT /* asm optimization branch */
- Scale_sig_opt(h2, L_SUBFR, -2);
- Scale_sig_opt(xn, L_SUBFR, shift); /* scaling of xn[] to limit dynamic at 12 bits */
- Scale_sig_opt(h1, L_SUBFR, 1 + shift); /* set h1[] in Q15 with scaling for convolution */
+ Scale_sig_opt(h2, L_SUBFR, -2);
+ Scale_sig_opt(xn, L_SUBFR, shift); /* scaling of xn[] to limit dynamic at 12 bits */
+ Scale_sig_opt(h1, L_SUBFR, 1 + shift); /* set h1[] in Q15 with scaling for convolution */
#else
- Scale_sig(h2, L_SUBFR, -2);
- Scale_sig(xn, L_SUBFR, shift); /* scaling of xn[] to limit dynamic at 12 bits */
- Scale_sig(h1, L_SUBFR, 1 + shift); /* set h1[] in Q15 with scaling for convolution */
+ Scale_sig(h2, L_SUBFR, -2);
+ Scale_sig(xn, L_SUBFR, shift); /* scaling of xn[] to limit dynamic at 12 bits */
+ Scale_sig(h1, L_SUBFR, 1 + shift); /* set h1[] in Q15 with scaling for convolution */
#endif
- /*----------------------------------------------------------------------*
- * Closed-loop fractional pitch search *
- *----------------------------------------------------------------------*/
- /* find closed loop fractional pitch lag */
- if(*ser_size <= NBBITS_9k)
- {
- T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
- pit_flag, PIT_MIN, PIT_FR1_8b, L_SUBFR);
-
- /* encode pitch lag */
- if (pit_flag == 0) /* if 1st/3rd subframe */
- {
- /*--------------------------------------------------------------*
- * The pitch range for the 1st/3rd subframe is encoded with *
- * 8 bits and is divided as follows: *
- * PIT_MIN to PIT_FR1-1 resolution 1/2 (frac = 0 or 2) *
- * PIT_FR1 to PIT_MAX resolution 1 (frac = 0) *
- *--------------------------------------------------------------*/
- if (T0 < PIT_FR1_8b)
- {
- index = ((T0 << 1) + (T0_frac >> 1) - (PIT_MIN<<1));
- } else
- {
- index = ((T0 - PIT_FR1_8b) + ((PIT_FR1_8b - PIT_MIN)*2));
- }
-
- Parm_serial(index, 8, &prms);
-
- /* find T0_min and T0_max for subframe 2 and 4 */
- T0_min = (T0 - 8);
- if (T0_min < PIT_MIN)
- {
- T0_min = PIT_MIN;
- }
- T0_max = T0_min + 15;
- if (T0_max > PIT_MAX)
- {
- T0_max = PIT_MAX;
- T0_min = (T0_max - 15);
- }
- } else
- { /* if subframe 2 or 4 */
- /*--------------------------------------------------------------*
- * The pitch range for subframe 2 or 4 is encoded with 5 bits: *
- * T0_min to T0_max resolution 1/2 (frac = 0 or 2) *
- *--------------------------------------------------------------*/
- i = (T0 - T0_min);
- index = (i << 1) + (T0_frac >> 1);
-
- Parm_serial(index, 5, &prms);
- }
- } else
- {
- T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
- pit_flag, PIT_FR2, PIT_FR1_9b, L_SUBFR);
-
- /* encode pitch lag */
- if (pit_flag == 0) /* if 1st/3rd subframe */
- {
- /*--------------------------------------------------------------*
- * The pitch range for the 1st/3rd subframe is encoded with *
- * 9 bits and is divided as follows: *
- * PIT_MIN to PIT_FR2-1 resolution 1/4 (frac = 0,1,2 or 3) *
- * PIT_FR2 to PIT_FR1-1 resolution 1/2 (frac = 0 or 1) *
- * PIT_FR1 to PIT_MAX resolution 1 (frac = 0) *
- *--------------------------------------------------------------*/
-
- if (T0 < PIT_FR2)
- {
- index = ((T0 << 2) + T0_frac) - (PIT_MIN << 2);
- } else if(T0 < PIT_FR1_9b)
- {
- index = ((((T0 << 1) + (T0_frac >> 1)) - (PIT_FR2<<1)) + ((PIT_FR2 - PIT_MIN)<<2));
- } else
- {
- index = (((T0 - PIT_FR1_9b) + ((PIT_FR2 - PIT_MIN)<<2)) + ((PIT_FR1_9b - PIT_FR2)<<1));
- }
-
- Parm_serial(index, 9, &prms);
-
- /* find T0_min and T0_max for subframe 2 and 4 */
-
- T0_min = (T0 - 8);
- if (T0_min < PIT_MIN)
- {
- T0_min = PIT_MIN;
- }
- T0_max = T0_min + 15;
-
- if (T0_max > PIT_MAX)
- {
- T0_max = PIT_MAX;
- T0_min = (T0_max - 15);
- }
- } else
- { /* if subframe 2 or 4 */
- /*--------------------------------------------------------------*
- * The pitch range for subframe 2 or 4 is encoded with 6 bits: *
- * T0_min to T0_max resolution 1/4 (frac = 0,1,2 or 3) *
- *--------------------------------------------------------------*/
- i = (T0 - T0_min);
- index = (i << 2) + T0_frac;
- Parm_serial(index, 6, &prms);
- }
- }
-
- /*-----------------------------------------------------------------*
- * Gain clipping test to avoid unstable synthesis on frame erasure *
- *-----------------------------------------------------------------*/
-
- clip_gain = 0;
- if((st->gp_clip[0] < 154) && (st->gp_clip[1] > 14746))
- clip_gain = 1;
-
- /*-----------------------------------------------------------------*
- * - find unity gain pitch excitation (adaptive codebook entry) *
- * with fractional interpolation. *
- * - find filtered pitch exc. y1[]=exc[] convolved with h1[]) *
- * - compute pitch gain1 *
- *-----------------------------------------------------------------*/
- /* find pitch exitation */
+ /*----------------------------------------------------------------------*
+ * Closed-loop fractional pitch search *
+ *----------------------------------------------------------------------*/
+ /* find closed loop fractional pitch lag */
+ if(*ser_size <= NBBITS_9k)
+ {
+ T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
+ pit_flag, PIT_MIN, PIT_FR1_8b, L_SUBFR);
+
+ /* encode pitch lag */
+ if (pit_flag == 0) /* if 1st/3rd subframe */
+ {
+ /*--------------------------------------------------------------*
+ * The pitch range for the 1st/3rd subframe is encoded with *
+ * 8 bits and is divided as follows: *
+ * PIT_MIN to PIT_FR1-1 resolution 1/2 (frac = 0 or 2) *
+ * PIT_FR1 to PIT_MAX resolution 1 (frac = 0) *
+ *--------------------------------------------------------------*/
+ if (T0 < PIT_FR1_8b)
+ {
+ index = ((T0 << 1) + (T0_frac >> 1) - (PIT_MIN<<1));
+ } else
+ {
+ index = ((T0 - PIT_FR1_8b) + ((PIT_FR1_8b - PIT_MIN)*2));
+ }
+
+ Parm_serial(index, 8, &prms);
+
+ /* find T0_min and T0_max for subframe 2 and 4 */
+ T0_min = (T0 - 8);
+ if (T0_min < PIT_MIN)
+ {
+ T0_min = PIT_MIN;
+ }
+ T0_max = T0_min + 15;
+ if (T0_max > PIT_MAX)
+ {
+ T0_max = PIT_MAX;
+ T0_min = (T0_max - 15);
+ }
+ } else
+ { /* if subframe 2 or 4 */
+ /*--------------------------------------------------------------*
+ * The pitch range for subframe 2 or 4 is encoded with 5 bits: *
+ * T0_min to T0_max resolution 1/2 (frac = 0 or 2) *
+ *--------------------------------------------------------------*/
+ i = (T0 - T0_min);
+ index = (i << 1) + (T0_frac >> 1);
+
+ Parm_serial(index, 5, &prms);
+ }
+ } else
+ {
+ T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
+ pit_flag, PIT_FR2, PIT_FR1_9b, L_SUBFR);
+
+ /* encode pitch lag */
+ if (pit_flag == 0) /* if 1st/3rd subframe */
+ {
+ /*--------------------------------------------------------------*
+ * The pitch range for the 1st/3rd subframe is encoded with *
+ * 9 bits and is divided as follows: *
+ * PIT_MIN to PIT_FR2-1 resolution 1/4 (frac = 0,1,2 or 3) *
+ * PIT_FR2 to PIT_FR1-1 resolution 1/2 (frac = 0 or 1) *
+ * PIT_FR1 to PIT_MAX resolution 1 (frac = 0) *
+ *--------------------------------------------------------------*/
+
+ if (T0 < PIT_FR2)
+ {
+ index = ((T0 << 2) + T0_frac) - (PIT_MIN << 2);
+ } else if(T0 < PIT_FR1_9b)
+ {
+ index = ((((T0 << 1) + (T0_frac >> 1)) - (PIT_FR2<<1)) + ((PIT_FR2 - PIT_MIN)<<2));
+ } else
+ {
+ index = (((T0 - PIT_FR1_9b) + ((PIT_FR2 - PIT_MIN)<<2)) + ((PIT_FR1_9b - PIT_FR2)<<1));
+ }
+
+ Parm_serial(index, 9, &prms);
+
+ /* find T0_min and T0_max for subframe 2 and 4 */
+
+ T0_min = (T0 - 8);
+ if (T0_min < PIT_MIN)
+ {
+ T0_min = PIT_MIN;
+ }
+ T0_max = T0_min + 15;
+
+ if (T0_max > PIT_MAX)
+ {
+ T0_max = PIT_MAX;
+ T0_min = (T0_max - 15);
+ }
+ } else
+ { /* if subframe 2 or 4 */
+ /*--------------------------------------------------------------*
+ * The pitch range for subframe 2 or 4 is encoded with 6 bits: *
+ * T0_min to T0_max resolution 1/4 (frac = 0,1,2 or 3) *
+ *--------------------------------------------------------------*/
+ i = (T0 - T0_min);
+ index = (i << 2) + T0_frac;
+ Parm_serial(index, 6, &prms);
+ }
+ }
+
+ /*-----------------------------------------------------------------*
+ * Gain clipping test to avoid unstable synthesis on frame erasure *
+ *-----------------------------------------------------------------*/
+
+ clip_gain = 0;
+ if((st->gp_clip[0] < 154) && (st->gp_clip[1] > 14746))
+ clip_gain = 1;
+
+ /*-----------------------------------------------------------------*
+ * - find unity gain pitch excitation (adaptive codebook entry) *
+ * with fractional interpolation. *
+ * - find filtered pitch exc. y1[]=exc[] convolved with h1[]) *
+ * - compute pitch gain1 *
+ *-----------------------------------------------------------------*/
+ /* find pitch exitation */
#ifdef ASM_OPT /* asm optimization branch */
- pred_lt4_asm(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
+ pred_lt4_asm(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
#else
- Pred_lt4(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
+ Pred_lt4(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
#endif
- if (*ser_size > NBBITS_9k)
- {
+ if (*ser_size > NBBITS_9k)
+ {
#ifdef ASM_OPT /* asm optimization branch */
- Convolve_asm(&exc[i_subfr], h1, y1, L_SUBFR);
+ Convolve_asm(&exc[i_subfr], h1, y1, L_SUBFR);
#else
- Convolve(&exc[i_subfr], h1, y1, L_SUBFR);
+ Convolve(&exc[i_subfr], h1, y1, L_SUBFR);
#endif
- gain1 = G_pitch(xn, y1, g_coeff, L_SUBFR);
- /* clip gain if necessary to avoid problem at decoder */
- if ((clip_gain != 0) && (gain1 > GP_CLIP))
- {
- gain1 = GP_CLIP;
- }
- /* find energy of new target xn2[] */
- Updt_tar(xn, dn, y1, gain1, L_SUBFR); /* dn used temporary */
- } else
- {
- gain1 = 0;
- }
- /*-----------------------------------------------------------------*
- * - find pitch excitation filtered by 1st order LP filter. *
- * - find filtered pitch exc. y2[]=exc[] convolved with h1[]) *
- * - compute pitch gain2 *
- *-----------------------------------------------------------------*/
- /* find pitch excitation with lp filter */
- vo_p0 = exc + i_subfr-1;
- vo_p1 = code;
- /* find pitch excitation with lp filter */
- for (i = 0; i < L_SUBFR/2; i++)
- {
- L_tmp = 5898 * *vo_p0++;
- L_tmp1 = 5898 * *vo_p0;
- L_tmp += 20972 * *vo_p0++;
- L_tmp1 += 20972 * *vo_p0++;
- L_tmp1 += 5898 * *vo_p0--;
- L_tmp += 5898 * *vo_p0;
- *vo_p1++ = (L_tmp + 0x4000)>>15;
- *vo_p1++ = (L_tmp1 + 0x4000)>>15;
- }
+ gain1 = G_pitch(xn, y1, g_coeff, L_SUBFR);
+ /* clip gain if necessary to avoid problem at decoder */
+ if ((clip_gain != 0) && (gain1 > GP_CLIP))
+ {
+ gain1 = GP_CLIP;
+ }
+ /* find energy of new target xn2[] */
+ Updt_tar(xn, dn, y1, gain1, L_SUBFR); /* dn used temporary */
+ } else
+ {
+ gain1 = 0;
+ }
+ /*-----------------------------------------------------------------*
+ * - find pitch excitation filtered by 1st order LP filter. *
+ * - find filtered pitch exc. y2[]=exc[] convolved with h1[]) *
+ * - compute pitch gain2 *
+ *-----------------------------------------------------------------*/
+ /* find pitch excitation with lp filter */
+ vo_p0 = exc + i_subfr-1;
+ vo_p1 = code;
+ /* find pitch excitation with lp filter */
+ for (i = 0; i < L_SUBFR/2; i++)
+ {
+ L_tmp = 5898 * *vo_p0++;
+ L_tmp1 = 5898 * *vo_p0;
+ L_tmp += 20972 * *vo_p0++;
+ L_tmp1 += 20972 * *vo_p0++;
+ L_tmp1 += 5898 * *vo_p0--;
+ L_tmp += 5898 * *vo_p0;
+ *vo_p1++ = (L_tmp + 0x4000)>>15;
+ *vo_p1++ = (L_tmp1 + 0x4000)>>15;
+ }
#ifdef ASM_OPT /* asm optimization branch */
- Convolve_asm(code, h1, y2, L_SUBFR);
+ Convolve_asm(code, h1, y2, L_SUBFR);
#else
- Convolve(code, h1, y2, L_SUBFR);
+ Convolve(code, h1, y2, L_SUBFR);
#endif
- gain2 = G_pitch(xn, y2, g_coeff2, L_SUBFR);
-
- /* clip gain if necessary to avoid problem at decoder */
- if ((clip_gain != 0) && (gain2 > GP_CLIP))
- {
- gain2 = GP_CLIP;
- }
- /* find energy of new target xn2[] */
- Updt_tar(xn, xn2, y2, gain2, L_SUBFR);
- /*-----------------------------------------------------------------*
- * use the best prediction (minimise quadratic error). *
- *-----------------------------------------------------------------*/
- select = 0;
- if(*ser_size > NBBITS_9k)
- {
- L_tmp = 0L;
- vo_p0 = dn;
- vo_p1 = xn2;
- for (i = 0; i < L_SUBFR/2; i++)
- {
- L_tmp += *vo_p0 * *vo_p0;
- vo_p0++;
- L_tmp -= *vo_p1 * *vo_p1;
- vo_p1++;
- L_tmp += *vo_p0 * *vo_p0;
- vo_p0++;
- L_tmp -= *vo_p1 * *vo_p1;
- vo_p1++;
- }
-
- if (L_tmp <= 0)
- {
- select = 1;
- }
- Parm_serial(select, 1, &prms);
- }
- if (select == 0)
- {
- /* use the lp filter for pitch excitation prediction */
- gain_pit = gain2;
- Copy(code, &exc[i_subfr], L_SUBFR);
- Copy(y2, y1, L_SUBFR);
- Copy(g_coeff2, g_coeff, 4);
- } else
- {
- /* no filter used for pitch excitation prediction */
- gain_pit = gain1;
- Copy(dn, xn2, L_SUBFR); /* target vector for codebook search */
- }
- /*-----------------------------------------------------------------*
- * - update cn[] for codebook search *
- *-----------------------------------------------------------------*/
- Updt_tar(cn, cn, &exc[i_subfr], gain_pit, L_SUBFR);
+ gain2 = G_pitch(xn, y2, g_coeff2, L_SUBFR);
+
+ /* clip gain if necessary to avoid problem at decoder */
+ if ((clip_gain != 0) && (gain2 > GP_CLIP))
+ {
+ gain2 = GP_CLIP;
+ }
+ /* find energy of new target xn2[] */
+ Updt_tar(xn, xn2, y2, gain2, L_SUBFR);
+ /*-----------------------------------------------------------------*
+ * use the best prediction (minimise quadratic error). *
+ *-----------------------------------------------------------------*/
+ select = 0;
+ if(*ser_size > NBBITS_9k)
+ {
+ L_tmp = 0L;
+ vo_p0 = dn;
+ vo_p1 = xn2;
+ for (i = 0; i < L_SUBFR/2; i++)
+ {
+ L_tmp += *vo_p0 * *vo_p0;
+ vo_p0++;
+ L_tmp -= *vo_p1 * *vo_p1;
+ vo_p1++;
+ L_tmp += *vo_p0 * *vo_p0;
+ vo_p0++;
+ L_tmp -= *vo_p1 * *vo_p1;
+ vo_p1++;
+ }
+
+ if (L_tmp <= 0)
+ {
+ select = 1;
+ }
+ Parm_serial(select, 1, &prms);
+ }
+ if (select == 0)
+ {
+ /* use the lp filter for pitch excitation prediction */
+ gain_pit = gain2;
+ Copy(code, &exc[i_subfr], L_SUBFR);
+ Copy(y2, y1, L_SUBFR);
+ Copy(g_coeff2, g_coeff, 4);
+ } else
+ {
+ /* no filter used for pitch excitation prediction */
+ gain_pit = gain1;
+ Copy(dn, xn2, L_SUBFR); /* target vector for codebook search */
+ }
+ /*-----------------------------------------------------------------*
+ * - update cn[] for codebook search *
+ *-----------------------------------------------------------------*/
+ Updt_tar(cn, cn, &exc[i_subfr], gain_pit, L_SUBFR);
#ifdef ASM_OPT /* asm optimization branch */
- Scale_sig_opt(cn, L_SUBFR, shift); /* scaling of cn[] to limit dynamic at 12 bits */
+ Scale_sig_opt(cn, L_SUBFR, shift); /* scaling of cn[] to limit dynamic at 12 bits */
#else
- Scale_sig(cn, L_SUBFR, shift); /* scaling of cn[] to limit dynamic at 12 bits */
+ Scale_sig(cn, L_SUBFR, shift); /* scaling of cn[] to limit dynamic at 12 bits */
#endif
- /*-----------------------------------------------------------------*
- * - include fixed-gain pitch contribution into impulse resp. h1[] *
- *-----------------------------------------------------------------*/
- tmp = 0;
- Preemph(h2, st->tilt_code, L_SUBFR, &tmp);
-
- if (T0_frac > 2)
- T0 = (T0 + 1);
- Pit_shrp(h2, T0, PIT_SHARP, L_SUBFR);
- /*-----------------------------------------------------------------*
- * - Correlation between target xn2[] and impulse response h1[] *
- * - Innovative codebook search *
- *-----------------------------------------------------------------*/
- cor_h_x(h2, xn2, dn);
- if (*ser_size <= NBBITS_7k)
- {
- ACELP_2t64_fx(dn, cn, h2, code, y2, indice);
-
- Parm_serial(indice[0], 12, &prms);
- } else if(*ser_size <= NBBITS_9k)
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 20, *ser_size, indice);
-
- Parm_serial(indice[0], 5, &prms);
- Parm_serial(indice[1], 5, &prms);
- Parm_serial(indice[2], 5, &prms);
- Parm_serial(indice[3], 5, &prms);
- } else if(*ser_size <= NBBITS_12k)
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 36, *ser_size, indice);
-
- Parm_serial(indice[0], 9, &prms);
- Parm_serial(indice[1], 9, &prms);
- Parm_serial(indice[2], 9, &prms);
- Parm_serial(indice[3], 9, &prms);
- } else if(*ser_size <= NBBITS_14k)
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 44, *ser_size, indice);
-
- Parm_serial(indice[0], 13, &prms);
- Parm_serial(indice[1], 13, &prms);
- Parm_serial(indice[2], 9, &prms);
- Parm_serial(indice[3], 9, &prms);
- } else if(*ser_size <= NBBITS_16k)
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 52, *ser_size, indice);
-
- Parm_serial(indice[0], 13, &prms);
- Parm_serial(indice[1], 13, &prms);
- Parm_serial(indice[2], 13, &prms);
- Parm_serial(indice[3], 13, &prms);
- } else if(*ser_size <= NBBITS_18k)
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 64, *ser_size, indice);
-
- Parm_serial(indice[0], 2, &prms);
- Parm_serial(indice[1], 2, &prms);
- Parm_serial(indice[2], 2, &prms);
- Parm_serial(indice[3], 2, &prms);
- Parm_serial(indice[4], 14, &prms);
- Parm_serial(indice[5], 14, &prms);
- Parm_serial(indice[6], 14, &prms);
- Parm_serial(indice[7], 14, &prms);
- } else if(*ser_size <= NBBITS_20k)
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 72, *ser_size, indice);
-
- Parm_serial(indice[0], 10, &prms);
- Parm_serial(indice[1], 10, &prms);
- Parm_serial(indice[2], 2, &prms);
- Parm_serial(indice[3], 2, &prms);
- Parm_serial(indice[4], 10, &prms);
- Parm_serial(indice[5], 10, &prms);
- Parm_serial(indice[6], 14, &prms);
- Parm_serial(indice[7], 14, &prms);
- } else
- {
- ACELP_4t64_fx(dn, cn, h2, code, y2, 88, *ser_size, indice);
-
- Parm_serial(indice[0], 11, &prms);
- Parm_serial(indice[1], 11, &prms);
- Parm_serial(indice[2], 11, &prms);
- Parm_serial(indice[3], 11, &prms);
- Parm_serial(indice[4], 11, &prms);
- Parm_serial(indice[5], 11, &prms);
- Parm_serial(indice[6], 11, &prms);
- Parm_serial(indice[7], 11, &prms);
- }
- /*-------------------------------------------------------*
- * - Add the fixed-gain pitch contribution to code[]. *
- *-------------------------------------------------------*/
- tmp = 0;
- Preemph(code, st->tilt_code, L_SUBFR, &tmp);
- Pit_shrp(code, T0, PIT_SHARP, L_SUBFR);
- /*----------------------------------------------------------*
- * - Compute the fixed codebook gain *
- * - quantize fixed codebook gain *
- *----------------------------------------------------------*/
- if(*ser_size <= NBBITS_9k)
- {
- index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 6,
- &gain_pit, &L_gain_code, clip_gain, st->qua_gain);
- Parm_serial(index, 6, &prms);
- } else
- {
- index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 7,
- &gain_pit, &L_gain_code, clip_gain, st->qua_gain);
- Parm_serial(index, 7, &prms);
- }
- /* test quantized gain of pitch for pitch clipping algorithm */
- Gp_clip_test_gain_pit(gain_pit, st->gp_clip);
-
- L_tmp = L_shl(L_gain_code, Q_new);
- gain_code = extract_h(L_add(L_tmp, 0x8000));
-
- /*----------------------------------------------------------*
- * Update parameters for the next subframe. *
- * - tilt of code: 0.0 (unvoiced) to 0.5 (voiced) *
- *----------------------------------------------------------*/
- /* find voice factor in Q15 (1=voiced, -1=unvoiced) */
- Copy(&exc[i_subfr], exc2, L_SUBFR);
+ /*-----------------------------------------------------------------*
+ * - include fixed-gain pitch contribution into impulse resp. h1[] *
+ *-----------------------------------------------------------------*/
+ tmp = 0;
+ Preemph(h2, st->tilt_code, L_SUBFR, &tmp);
+
+ if (T0_frac > 2)
+ T0 = (T0 + 1);
+ Pit_shrp(h2, T0, PIT_SHARP, L_SUBFR);
+ /*-----------------------------------------------------------------*
+ * - Correlation between target xn2[] and impulse response h1[] *
+ * - Innovative codebook search *
+ *-----------------------------------------------------------------*/
+ cor_h_x(h2, xn2, dn);
+ if (*ser_size <= NBBITS_7k)
+ {
+ ACELP_2t64_fx(dn, cn, h2, code, y2, indice);
+
+ Parm_serial(indice[0], 12, &prms);
+ } else if(*ser_size <= NBBITS_9k)
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 20, *ser_size, indice);
+
+ Parm_serial(indice[0], 5, &prms);
+ Parm_serial(indice[1], 5, &prms);
+ Parm_serial(indice[2], 5, &prms);
+ Parm_serial(indice[3], 5, &prms);
+ } else if(*ser_size <= NBBITS_12k)
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 36, *ser_size, indice);
+
+ Parm_serial(indice[0], 9, &prms);
+ Parm_serial(indice[1], 9, &prms);
+ Parm_serial(indice[2], 9, &prms);
+ Parm_serial(indice[3], 9, &prms);
+ } else if(*ser_size <= NBBITS_14k)
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 44, *ser_size, indice);
+
+ Parm_serial(indice[0], 13, &prms);
+ Parm_serial(indice[1], 13, &prms);
+ Parm_serial(indice[2], 9, &prms);
+ Parm_serial(indice[3], 9, &prms);
+ } else if(*ser_size <= NBBITS_16k)
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 52, *ser_size, indice);
+
+ Parm_serial(indice[0], 13, &prms);
+ Parm_serial(indice[1], 13, &prms);
+ Parm_serial(indice[2], 13, &prms);
+ Parm_serial(indice[3], 13, &prms);
+ } else if(*ser_size <= NBBITS_18k)
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 64, *ser_size, indice);
+
+ Parm_serial(indice[0], 2, &prms);
+ Parm_serial(indice[1], 2, &prms);
+ Parm_serial(indice[2], 2, &prms);
+ Parm_serial(indice[3], 2, &prms);
+ Parm_serial(indice[4], 14, &prms);
+ Parm_serial(indice[5], 14, &prms);
+ Parm_serial(indice[6], 14, &prms);
+ Parm_serial(indice[7], 14, &prms);
+ } else if(*ser_size <= NBBITS_20k)
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 72, *ser_size, indice);
+
+ Parm_serial(indice[0], 10, &prms);
+ Parm_serial(indice[1], 10, &prms);
+ Parm_serial(indice[2], 2, &prms);
+ Parm_serial(indice[3], 2, &prms);
+ Parm_serial(indice[4], 10, &prms);
+ Parm_serial(indice[5], 10, &prms);
+ Parm_serial(indice[6], 14, &prms);
+ Parm_serial(indice[7], 14, &prms);
+ } else
+ {
+ ACELP_4t64_fx(dn, cn, h2, code, y2, 88, *ser_size, indice);
+
+ Parm_serial(indice[0], 11, &prms);
+ Parm_serial(indice[1], 11, &prms);
+ Parm_serial(indice[2], 11, &prms);
+ Parm_serial(indice[3], 11, &prms);
+ Parm_serial(indice[4], 11, &prms);
+ Parm_serial(indice[5], 11, &prms);
+ Parm_serial(indice[6], 11, &prms);
+ Parm_serial(indice[7], 11, &prms);
+ }
+ /*-------------------------------------------------------*
+ * - Add the fixed-gain pitch contribution to code[]. *
+ *-------------------------------------------------------*/
+ tmp = 0;
+ Preemph(code, st->tilt_code, L_SUBFR, &tmp);
+ Pit_shrp(code, T0, PIT_SHARP, L_SUBFR);
+ /*----------------------------------------------------------*
+ * - Compute the fixed codebook gain *
+ * - quantize fixed codebook gain *
+ *----------------------------------------------------------*/
+ if(*ser_size <= NBBITS_9k)
+ {
+ index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 6,
+ &gain_pit, &L_gain_code, clip_gain, st->qua_gain);
+ Parm_serial(index, 6, &prms);
+ } else
+ {
+ index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 7,
+ &gain_pit, &L_gain_code, clip_gain, st->qua_gain);
+ Parm_serial(index, 7, &prms);
+ }
+ /* test quantized gain of pitch for pitch clipping algorithm */
+ Gp_clip_test_gain_pit(gain_pit, st->gp_clip);
+
+ L_tmp = L_shl(L_gain_code, Q_new);
+ gain_code = extract_h(L_add(L_tmp, 0x8000));
+
+ /*----------------------------------------------------------*
+ * Update parameters for the next subframe. *
+ * - tilt of code: 0.0 (unvoiced) to 0.5 (voiced) *
+ *----------------------------------------------------------*/
+ /* find voice factor in Q15 (1=voiced, -1=unvoiced) */
+ Copy(&exc[i_subfr], exc2, L_SUBFR);
#ifdef ASM_OPT /* asm optimization branch */
- Scale_sig_opt(exc2, L_SUBFR, shift);
+ Scale_sig_opt(exc2, L_SUBFR, shift);
#else
- Scale_sig(exc2, L_SUBFR, shift);
+ Scale_sig(exc2, L_SUBFR, shift);
#endif
- voice_fac = voice_factor(exc2, shift, gain_pit, code, gain_code, L_SUBFR);
- /* tilt of code for next subframe: 0.5=voiced, 0=unvoiced */
- st->tilt_code = ((voice_fac >> 2) + 8192);
- /*------------------------------------------------------*
- * - Update filter's memory "mem_w0" for finding the *
- * target vector in the next subframe. *
- * - Find the total excitation *
- * - Find synthesis speech to update mem_syn[]. *
- *------------------------------------------------------*/
-
- /* y2 in Q9, gain_pit in Q14 */
- L_tmp = (gain_code * y2[L_SUBFR - 1])<<1;
- L_tmp = L_shl(L_tmp, (5 + shift));
- L_tmp = L_negate(L_tmp);
- L_tmp += (xn[L_SUBFR - 1] * 16384)<<1;
- L_tmp -= (y1[L_SUBFR - 1] * gain_pit)<<1;
- L_tmp = L_shl(L_tmp, (1 - shift));
- st->mem_w0 = extract_h(L_add(L_tmp, 0x8000));
-
- if (*ser_size >= NBBITS_24k)
- Copy(&exc[i_subfr], exc2, L_SUBFR);
-
- for (i = 0; i < L_SUBFR; i++)
- {
- /* code in Q9, gain_pit in Q14 */
- L_tmp = (gain_code * code[i])<<1;
- L_tmp = (L_tmp << 5);
- L_tmp += (exc[i + i_subfr] * gain_pit)<<1;
- L_tmp = L_shl2(L_tmp, 1);
- exc[i + i_subfr] = extract_h(L_add(L_tmp, 0x8000));
- }
-
- Syn_filt(p_Aq,&exc[i_subfr], synth, L_SUBFR, st->mem_syn, 1);
-
- if(*ser_size >= NBBITS_24k)
- {
- /*------------------------------------------------------------*
- * phase dispersion to enhance noise in low bit rate *
- *------------------------------------------------------------*/
- /* L_gain_code in Q16 */
- VO_L_Extract(L_gain_code, &gain_code, &gain_code_lo);
-
- /*------------------------------------------------------------*
- * noise enhancer *
- * ~~~~~~~~~~~~~~ *
- * - Enhance excitation on noise. (modify gain of code) *
- * If signal is noisy and LPC filter is stable, move gain *
- * of code 1.5 dB toward gain of code threshold. *
- * This decrease by 3 dB noise energy variation. *
- *------------------------------------------------------------*/
- tmp = (16384 - (voice_fac >> 1)); /* 1=unvoiced, 0=voiced */
- fac = vo_mult(stab_fac, tmp);
- L_tmp = L_gain_code;
- if(L_tmp < st->L_gc_thres)
- {
- L_tmp = vo_L_add(L_tmp, Mpy_32_16(gain_code, gain_code_lo, 6226));
- if(L_tmp > st->L_gc_thres)
- {
- L_tmp = st->L_gc_thres;
- }
- } else
- {
- L_tmp = Mpy_32_16(gain_code, gain_code_lo, 27536);
- if(L_tmp < st->L_gc_thres)
- {
- L_tmp = st->L_gc_thres;
- }
- }
- st->L_gc_thres = L_tmp;
-
- L_gain_code = Mpy_32_16(gain_code, gain_code_lo, (32767 - fac));
- VO_L_Extract(L_tmp, &gain_code, &gain_code_lo);
- L_gain_code = vo_L_add(L_gain_code, Mpy_32_16(gain_code, gain_code_lo, fac));
-
- /*------------------------------------------------------------*
- * pitch enhancer *
- * ~~~~~~~~~~~~~~ *
- * - Enhance excitation on voice. (HP filtering of code) *
- * On voiced signal, filtering of code by a smooth fir HP *
- * filter to decrease energy of code in low frequency. *
- *------------------------------------------------------------*/
-
- tmp = ((voice_fac >> 3) + 4096); /* 0.25=voiced, 0=unvoiced */
-
- L_tmp = L_deposit_h(code[0]);
- L_tmp -= (code[1] * tmp)<<1;
- code2[0] = vo_round(L_tmp);
-
- for (i = 1; i < L_SUBFR - 1; i++)
- {
- L_tmp = L_deposit_h(code[i]);
- L_tmp -= (code[i + 1] * tmp)<<1;
- L_tmp -= (code[i - 1] * tmp)<<1;
- code2[i] = vo_round(L_tmp);
- }
-
- L_tmp = L_deposit_h(code[L_SUBFR - 1]);
- L_tmp -= (code[L_SUBFR - 2] * tmp)<<1;
- code2[L_SUBFR - 1] = vo_round(L_tmp);
-
- /* build excitation */
- gain_code = vo_round(L_shl(L_gain_code, Q_new));
-
- for (i = 0; i < L_SUBFR; i++)
- {
- L_tmp = (code2[i] * gain_code)<<1;
- L_tmp = (L_tmp << 5);
- L_tmp += (exc2[i] * gain_pit)<<1;
- L_tmp = (L_tmp << 1);
- exc2[i] = vo_round(L_tmp);
- }
-
- corr_gain = synthesis(p_Aq, exc2, Q_new, &speech16k[i_subfr * 5 / 4], st);
- Parm_serial(corr_gain, 4, &prms);
- }
- p_A += (M + 1);
- p_Aq += (M + 1);
- } /* end of subframe loop */
-
- /*--------------------------------------------------*
- * Update signal for next frame. *
- * -> save past of speech[], wsp[] and exc[]. *
- *--------------------------------------------------*/
- Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
- Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
- Copy(&old_exc[L_FRAME], st->old_exc, PIT_MAX + L_INTERPOL);
- return;
+ voice_fac = voice_factor(exc2, shift, gain_pit, code, gain_code, L_SUBFR);
+ /* tilt of code for next subframe: 0.5=voiced, 0=unvoiced */
+ st->tilt_code = ((voice_fac >> 2) + 8192);
+ /*------------------------------------------------------*
+ * - Update filter's memory "mem_w0" for finding the *
+ * target vector in the next subframe. *
+ * - Find the total excitation *
+ * - Find synthesis speech to update mem_syn[]. *
+ *------------------------------------------------------*/
+
+ /* y2 in Q9, gain_pit in Q14 */
+ L_tmp = (gain_code * y2[L_SUBFR - 1])<<1;
+ L_tmp = L_shl(L_tmp, (5 + shift));
+ L_tmp = L_negate(L_tmp);
+ L_tmp += (xn[L_SUBFR - 1] * 16384)<<1;
+ L_tmp -= (y1[L_SUBFR - 1] * gain_pit)<<1;
+ L_tmp = L_shl(L_tmp, (1 - shift));
+ st->mem_w0 = extract_h(L_add(L_tmp, 0x8000));
+
+ if (*ser_size >= NBBITS_24k)
+ Copy(&exc[i_subfr], exc2, L_SUBFR);
+
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ Word32 tmp;
+ /* code in Q9, gain_pit in Q14 */
+ L_tmp = (gain_code * code[i])<<1;
+ L_tmp = (L_tmp << 5);
+ tmp = L_mult(exc[i + i_subfr], gain_pit); // (exc[i + i_subfr] * gain_pit)<<1
+ L_tmp = L_add(L_tmp, tmp);
+ L_tmp = L_shl2(L_tmp, 1);
+ exc[i + i_subfr] = extract_h(L_add(L_tmp, 0x8000));
+ }
+
+ Syn_filt(p_Aq,&exc[i_subfr], synth, L_SUBFR, st->mem_syn, 1);
+
+ if(*ser_size >= NBBITS_24k)
+ {
+ /*------------------------------------------------------------*
+ * phase dispersion to enhance noise in low bit rate *
+ *------------------------------------------------------------*/
+ /* L_gain_code in Q16 */
+ VO_L_Extract(L_gain_code, &gain_code, &gain_code_lo);
+
+ /*------------------------------------------------------------*
+ * noise enhancer *
+ * ~~~~~~~~~~~~~~ *
+ * - Enhance excitation on noise. (modify gain of code) *
+ * If signal is noisy and LPC filter is stable, move gain *
+ * of code 1.5 dB toward gain of code threshold. *
+ * This decrease by 3 dB noise energy variation. *
+ *------------------------------------------------------------*/
+ tmp = (16384 - (voice_fac >> 1)); /* 1=unvoiced, 0=voiced */
+ fac = vo_mult(stab_fac, tmp);
+ L_tmp = L_gain_code;
+ if(L_tmp < st->L_gc_thres)
+ {
+ L_tmp = vo_L_add(L_tmp, Mpy_32_16(gain_code, gain_code_lo, 6226));
+ if(L_tmp > st->L_gc_thres)
+ {
+ L_tmp = st->L_gc_thres;
+ }
+ } else
+ {
+ L_tmp = Mpy_32_16(gain_code, gain_code_lo, 27536);
+ if(L_tmp < st->L_gc_thres)
+ {
+ L_tmp = st->L_gc_thres;
+ }
+ }
+ st->L_gc_thres = L_tmp;
+
+ L_gain_code = Mpy_32_16(gain_code, gain_code_lo, (32767 - fac));
+ VO_L_Extract(L_tmp, &gain_code, &gain_code_lo);
+ L_gain_code = vo_L_add(L_gain_code, Mpy_32_16(gain_code, gain_code_lo, fac));
+
+ /*------------------------------------------------------------*
+ * pitch enhancer *
+ * ~~~~~~~~~~~~~~ *
+ * - Enhance excitation on voice. (HP filtering of code) *
+ * On voiced signal, filtering of code by a smooth fir HP *
+ * filter to decrease energy of code in low frequency. *
+ *------------------------------------------------------------*/
+
+ tmp = ((voice_fac >> 3) + 4096); /* 0.25=voiced, 0=unvoiced */
+
+ L_tmp = L_deposit_h(code[0]);
+ L_tmp -= (code[1] * tmp)<<1;
+ code2[0] = vo_round(L_tmp);
+
+ for (i = 1; i < L_SUBFR - 1; i++)
+ {
+ L_tmp = L_deposit_h(code[i]);
+ L_tmp -= (code[i + 1] * tmp)<<1;
+ L_tmp -= (code[i - 1] * tmp)<<1;
+ code2[i] = vo_round(L_tmp);
+ }
+
+ L_tmp = L_deposit_h(code[L_SUBFR - 1]);
+ L_tmp -= (code[L_SUBFR - 2] * tmp)<<1;
+ code2[L_SUBFR - 1] = vo_round(L_tmp);
+
+ /* build excitation */
+ gain_code = vo_round(L_shl(L_gain_code, Q_new));
+
+ for (i = 0; i < L_SUBFR; i++)
+ {
+ L_tmp = (code2[i] * gain_code)<<1;
+ L_tmp = (L_tmp << 5);
+ L_tmp += (exc2[i] * gain_pit)<<1;
+ L_tmp = (L_tmp << 1);
+ exc2[i] = voround(L_tmp);
+ }
+
+ corr_gain = synthesis(p_Aq, exc2, Q_new, &speech16k[i_subfr * 5 / 4], st);
+ Parm_serial(corr_gain, 4, &prms);
+ }
+ p_A += (M + 1);
+ p_Aq += (M + 1);
+ } /* end of subframe loop */
+
+ /*--------------------------------------------------*
+ * Update signal for next frame. *
+ * -> save past of speech[], wsp[] and exc[]. *
+ *--------------------------------------------------*/
+ Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
+ Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
+ Copy(&old_exc[L_FRAME], st->old_exc, PIT_MAX + L_INTERPOL);
+ return;
}
/*-----------------------------------------------------*
@@ -1329,225 +1333,225 @@ void coder(
*-----------------------------------------------------*/
static Word16 synthesis(
- Word16 Aq[], /* A(z) : quantized Az */
- Word16 exc[], /* (i) : excitation at 12kHz */
- Word16 Q_new, /* (i) : scaling performed on exc */
- Word16 synth16k[], /* (o) : 16kHz synthesis signal */
- Coder_State * st /* (i/o) : State structure */
- )
+ Word16 Aq[], /* A(z) : quantized Az */
+ Word16 exc[], /* (i) : excitation at 12kHz */
+ Word16 Q_new, /* (i) : scaling performed on exc */
+ Word16 synth16k[], /* (o) : 16kHz synthesis signal */
+ Coder_State * st /* (i/o) : State structure */
+ )
{
- Word16 fac, tmp, exp;
- Word16 ener, exp_ener;
- Word32 L_tmp, i;
-
- Word16 synth_hi[M + L_SUBFR], synth_lo[M + L_SUBFR];
- Word16 synth[L_SUBFR];
- Word16 HF[L_SUBFR16k]; /* High Frequency vector */
- Word16 Ap[M + 1];
-
- Word16 HF_SP[L_SUBFR16k]; /* High Frequency vector (from original signal) */
-
- Word16 HP_est_gain, HP_calc_gain, HP_corr_gain;
- Word16 dist_min, dist;
- Word16 HP_gain_ind = 0;
- Word16 gain1, gain2;
- Word16 weight1, weight2;
-
- /*------------------------------------------------------------*
- * speech synthesis *
- * ~~~~~~~~~~~~~~~~ *
- * - Find synthesis speech corresponding to exc2[]. *
- * - Perform fixed deemphasis and hp 50hz filtering. *
- * - Oversampling from 12.8kHz to 16kHz. *
- *------------------------------------------------------------*/
- Copy(st->mem_syn_hi, synth_hi, M);
- Copy(st->mem_syn_lo, synth_lo, M);
+ Word16 fac, tmp, exp;
+ Word16 ener, exp_ener;
+ Word32 L_tmp, i;
+
+ Word16 synth_hi[M + L_SUBFR], synth_lo[M + L_SUBFR];
+ Word16 synth[L_SUBFR];
+ Word16 HF[L_SUBFR16k]; /* High Frequency vector */
+ Word16 Ap[M + 1];
+
+ Word16 HF_SP[L_SUBFR16k]; /* High Frequency vector (from original signal) */
+
+ Word16 HP_est_gain, HP_calc_gain, HP_corr_gain;
+ Word16 dist_min, dist;
+ Word16 HP_gain_ind = 0;
+ Word16 gain1, gain2;
+ Word16 weight1, weight2;
+
+ /*------------------------------------------------------------*
+ * speech synthesis *
+ * ~~~~~~~~~~~~~~~~ *
+ * - Find synthesis speech corresponding to exc2[]. *
+ * - Perform fixed deemphasis and hp 50hz filtering. *
+ * - Oversampling from 12.8kHz to 16kHz. *
+ *------------------------------------------------------------*/
+ Copy(st->mem_syn_hi, synth_hi, M);
+ Copy(st->mem_syn_lo, synth_lo, M);
#ifdef ASM_OPT /* asm optimization branch */
- Syn_filt_32_asm(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
+ Syn_filt_32_asm(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
#else
- Syn_filt_32(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
+ Syn_filt_32(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
#endif
- Copy(synth_hi + L_SUBFR, st->mem_syn_hi, M);
- Copy(synth_lo + L_SUBFR, st->mem_syn_lo, M);
+ Copy(synth_hi + L_SUBFR, st->mem_syn_hi, M);
+ Copy(synth_lo + L_SUBFR, st->mem_syn_lo, M);
#ifdef ASM_OPT /* asm optimization branch */
- Deemph_32_asm(synth_hi + M, synth_lo + M, synth, &(st->mem_deemph));
+ Deemph_32_asm(synth_hi + M, synth_lo + M, synth, &(st->mem_deemph));
#else
- Deemph_32(synth_hi + M, synth_lo + M, synth, PREEMPH_FAC, L_SUBFR, &(st->mem_deemph));
+ Deemph_32(synth_hi + M, synth_lo + M, synth, PREEMPH_FAC, L_SUBFR, &(st->mem_deemph));
#endif
- HP50_12k8(synth, L_SUBFR, st->mem_sig_out);
-
- /* Original speech signal as reference for high band gain quantisation */
- for (i = 0; i < L_SUBFR16k; i++)
- {
- HF_SP[i] = synth16k[i];
- }
-
- /*------------------------------------------------------*
- * HF noise synthesis *
- * ~~~~~~~~~~~~~~~~~~ *
- * - Generate HF noise between 5.5 and 7.5 kHz. *
- * - Set energy of noise according to synthesis tilt. *
- * tilt > 0.8 ==> - 14 dB (voiced) *
- * tilt 0.5 ==> - 6 dB (voiced or noise) *
- * tilt < 0.0 ==> 0 dB (noise) *
- *------------------------------------------------------*/
- /* generate white noise vector */
- for (i = 0; i < L_SUBFR16k; i++)
- {
- HF[i] = Random(&(st->seed2))>>3;
- }
- /* energy of excitation */
+ HP50_12k8(synth, L_SUBFR, st->mem_sig_out);
+
+ /* Original speech signal as reference for high band gain quantisation */
+ for (i = 0; i < L_SUBFR16k; i++)
+ {
+ HF_SP[i] = synth16k[i];
+ }
+
+ /*------------------------------------------------------*
+ * HF noise synthesis *
+ * ~~~~~~~~~~~~~~~~~~ *
+ * - Generate HF noise between 5.5 and 7.5 kHz. *
+ * - Set energy of noise according to synthesis tilt. *
+ * tilt > 0.8 ==> - 14 dB (voiced) *
+ * tilt 0.5 ==> - 6 dB (voiced or noise) *
+ * tilt < 0.0 ==> 0 dB (noise) *
+ *------------------------------------------------------*/
+ /* generate white noise vector */
+ for (i = 0; i < L_SUBFR16k; i++)
+ {
+ HF[i] = Random(&(st->seed2))>>3;
+ }
+ /* energy of excitation */
#ifdef ASM_OPT /* asm optimization branch */
- Scale_sig_opt(exc, L_SUBFR, -3);
- Q_new = Q_new - 3;
- ener = extract_h(Dot_product12_asm(exc, exc, L_SUBFR, &exp_ener));
+ Scale_sig_opt(exc, L_SUBFR, -3);
+ Q_new = Q_new - 3;
+ ener = extract_h(Dot_product12_asm(exc, exc, L_SUBFR, &exp_ener));
#else
- Scale_sig(exc, L_SUBFR, -3);
- Q_new = Q_new - 3;
- ener = extract_h(Dot_product12(exc, exc, L_SUBFR, &exp_ener));
+ Scale_sig(exc, L_SUBFR, -3);
+ Q_new = Q_new - 3;
+ ener = extract_h(Dot_product12(exc, exc, L_SUBFR, &exp_ener));
#endif
- exp_ener = exp_ener - (Q_new + Q_new);
- /* set energy of white noise to energy of excitation */
+ exp_ener = exp_ener - (Q_new + Q_new);
+ /* set energy of white noise to energy of excitation */
#ifdef ASM_OPT /* asm optimization branch */
- tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
+ tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
#else
- tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
+ tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
#endif
- if(tmp > ener)
- {
- tmp = (tmp >> 1); /* Be sure tmp < ener */
- exp = (exp + 1);
- }
- L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
- exp = (exp - exp_ener);
- Isqrt_n(&L_tmp, &exp);
- L_tmp = L_shl(L_tmp, (exp + 1)); /* L_tmp x 2, L_tmp in Q31 */
- tmp = extract_h(L_tmp); /* tmp = 2 x sqrt(ener_exc/ener_hf) */
-
- for (i = 0; i < L_SUBFR16k; i++)
- {
- HF[i] = vo_mult(HF[i], tmp);
- }
-
- /* find tilt of synthesis speech (tilt: 1=voiced, -1=unvoiced) */
- HP400_12k8(synth, L_SUBFR, st->mem_hp400);
-
- L_tmp = 1L;
- for (i = 0; i < L_SUBFR; i++)
- L_tmp += (synth[i] * synth[i])<<1;
-
- exp = norm_l(L_tmp);
- ener = extract_h(L_tmp << exp); /* ener = r[0] */
-
- L_tmp = 1L;
- for (i = 1; i < L_SUBFR; i++)
- L_tmp +=(synth[i] * synth[i - 1])<<1;
-
- tmp = extract_h(L_tmp << exp); /* tmp = r[1] */
-
- if (tmp > 0)
- {
- fac = div_s(tmp, ener);
- } else
- {
- fac = 0;
- }
-
- /* modify energy of white noise according to synthesis tilt */
- gain1 = 32767 - fac;
- gain2 = vo_mult(gain1, 20480);
- gain2 = shl(gain2, 1);
-
- if (st->vad_hist > 0)
- {
- weight1 = 0;
- weight2 = 32767;
- } else
- {
- weight1 = 32767;
- weight2 = 0;
- }
- tmp = vo_mult(weight1, gain1);
- tmp = add1(tmp, vo_mult(weight2, gain2));
-
- if (tmp != 0)
- {
- tmp = (tmp + 1);
- }
- HP_est_gain = tmp;
-
- if(HP_est_gain < 3277)
- {
- HP_est_gain = 3277; /* 0.1 in Q15 */
- }
- /* synthesis of noise: 4.8kHz..5.6kHz --> 6kHz..7kHz */
- Weight_a(Aq, Ap, 19661, M); /* fac=0.6 */
+ if(tmp > ener)
+ {
+ tmp = (tmp >> 1); /* Be sure tmp < ener */
+ exp = (exp + 1);
+ }
+ L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
+ exp = (exp - exp_ener);
+ Isqrt_n(&L_tmp, &exp);
+ L_tmp = L_shl(L_tmp, (exp + 1)); /* L_tmp x 2, L_tmp in Q31 */
+ tmp = extract_h(L_tmp); /* tmp = 2 x sqrt(ener_exc/ener_hf) */
+
+ for (i = 0; i < L_SUBFR16k; i++)
+ {
+ HF[i] = vo_mult(HF[i], tmp);
+ }
+
+ /* find tilt of synthesis speech (tilt: 1=voiced, -1=unvoiced) */
+ HP400_12k8(synth, L_SUBFR, st->mem_hp400);
+
+ L_tmp = 1L;
+ for (i = 0; i < L_SUBFR; i++)
+ L_tmp += (synth[i] * synth[i])<<1;
+
+ exp = norm_l(L_tmp);
+ ener = extract_h(L_tmp << exp); /* ener = r[0] */
+
+ L_tmp = 1L;
+ for (i = 1; i < L_SUBFR; i++)
+ L_tmp +=(synth[i] * synth[i - 1])<<1;
+
+ tmp = extract_h(L_tmp << exp); /* tmp = r[1] */
+
+ if (tmp > 0)
+ {
+ fac = div_s(tmp, ener);
+ } else
+ {
+ fac = 0;
+ }
+
+ /* modify energy of white noise according to synthesis tilt */
+ gain1 = 32767 - fac;
+ gain2 = vo_mult(gain1, 20480);
+ gain2 = shl(gain2, 1);
+
+ if (st->vad_hist > 0)
+ {
+ weight1 = 0;
+ weight2 = 32767;
+ } else
+ {
+ weight1 = 32767;
+ weight2 = 0;
+ }
+ tmp = vo_mult(weight1, gain1);
+ tmp = add1(tmp, vo_mult(weight2, gain2));
+
+ if (tmp != 0)
+ {
+ tmp = (tmp + 1);
+ }
+ HP_est_gain = tmp;
+
+ if(HP_est_gain < 3277)
+ {
+ HP_est_gain = 3277; /* 0.1 in Q15 */
+ }
+ /* synthesis of noise: 4.8kHz..5.6kHz --> 6kHz..7kHz */
+ Weight_a(Aq, Ap, 19661, M); /* fac=0.6 */
#ifdef ASM_OPT /* asm optimization branch */
- Syn_filt_asm(Ap, HF, HF, st->mem_syn_hf);
- /* noise High Pass filtering (1ms of delay) */
- Filt_6k_7k_asm(HF, L_SUBFR16k, st->mem_hf);
- /* filtering of the original signal */
- Filt_6k_7k_asm(HF_SP, L_SUBFR16k, st->mem_hf2);
-
- /* check the gain difference */
- Scale_sig_opt(HF_SP, L_SUBFR16k, -1);
- ener = extract_h(Dot_product12_asm(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
- /* set energy of white noise to energy of excitation */
- tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
+ Syn_filt_asm(Ap, HF, HF, st->mem_syn_hf);
+ /* noise High Pass filtering (1ms of delay) */
+ Filt_6k_7k_asm(HF, L_SUBFR16k, st->mem_hf);
+ /* filtering of the original signal */
+ Filt_6k_7k_asm(HF_SP, L_SUBFR16k, st->mem_hf2);
+
+ /* check the gain difference */
+ Scale_sig_opt(HF_SP, L_SUBFR16k, -1);
+ ener = extract_h(Dot_product12_asm(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
+ /* set energy of white noise to energy of excitation */
+ tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
#else
- Syn_filt(Ap, HF, HF, L_SUBFR16k, st->mem_syn_hf, 1);
- /* noise High Pass filtering (1ms of delay) */
- Filt_6k_7k(HF, L_SUBFR16k, st->mem_hf);
- /* filtering of the original signal */
- Filt_6k_7k(HF_SP, L_SUBFR16k, st->mem_hf2);
- /* check the gain difference */
- Scale_sig(HF_SP, L_SUBFR16k, -1);
- ener = extract_h(Dot_product12(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
- /* set energy of white noise to energy of excitation */
- tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
+ Syn_filt(Ap, HF, HF, L_SUBFR16k, st->mem_syn_hf, 1);
+ /* noise High Pass filtering (1ms of delay) */
+ Filt_6k_7k(HF, L_SUBFR16k, st->mem_hf);
+ /* filtering of the original signal */
+ Filt_6k_7k(HF_SP, L_SUBFR16k, st->mem_hf2);
+ /* check the gain difference */
+ Scale_sig(HF_SP, L_SUBFR16k, -1);
+ ener = extract_h(Dot_product12(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
+ /* set energy of white noise to energy of excitation */
+ tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
#endif
- if (tmp > ener)
- {
- tmp = (tmp >> 1); /* Be sure tmp < ener */
- exp = (exp + 1);
- }
- L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
- exp = vo_sub(exp, exp_ener);
- Isqrt_n(&L_tmp, &exp);
- L_tmp = L_shl(L_tmp, exp); /* L_tmp, L_tmp in Q31 */
- HP_calc_gain = extract_h(L_tmp); /* tmp = sqrt(ener_input/ener_hf) */
-
- /* st->gain_alpha *= st->dtx_encSt->dtxHangoverCount/7 */
- L_tmp = (vo_L_mult(st->dtx_encSt->dtxHangoverCount, 4681) << 15);
- st->gain_alpha = vo_mult(st->gain_alpha, extract_h(L_tmp));
-
- if(st->dtx_encSt->dtxHangoverCount > 6)
- st->gain_alpha = 32767;
- HP_est_gain = HP_est_gain >> 1; /* From Q15 to Q14 */
- HP_corr_gain = add1(vo_mult(HP_calc_gain, st->gain_alpha), vo_mult((32767 - st->gain_alpha), HP_est_gain));
-
- /* Quantise the correction gain */
- dist_min = 32767;
- for (i = 0; i < 16; i++)
- {
- dist = vo_mult((HP_corr_gain - HP_gain[i]), (HP_corr_gain - HP_gain[i]));
- if (dist_min > dist)
- {
- dist_min = dist;
- HP_gain_ind = i;
- }
- }
- HP_corr_gain = HP_gain[HP_gain_ind];
- /* return the quantised gain index when using the highest mode, otherwise zero */
- return (HP_gain_ind);
+ if (tmp > ener)
+ {
+ tmp = (tmp >> 1); /* Be sure tmp < ener */
+ exp = (exp + 1);
+ }
+ L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
+ exp = vo_sub(exp, exp_ener);
+ Isqrt_n(&L_tmp, &exp);
+ L_tmp = L_shl(L_tmp, exp); /* L_tmp, L_tmp in Q31 */
+ HP_calc_gain = extract_h(L_tmp); /* tmp = sqrt(ener_input/ener_hf) */
+
+ /* st->gain_alpha *= st->dtx_encSt->dtxHangoverCount/7 */
+ L_tmp = (vo_L_mult(st->dtx_encSt->dtxHangoverCount, 4681) << 15);
+ st->gain_alpha = vo_mult(st->gain_alpha, extract_h(L_tmp));
+
+ if(st->dtx_encSt->dtxHangoverCount > 6)
+ st->gain_alpha = 32767;
+ HP_est_gain = HP_est_gain >> 1; /* From Q15 to Q14 */
+ HP_corr_gain = add1(vo_mult(HP_calc_gain, st->gain_alpha), vo_mult((32767 - st->gain_alpha), HP_est_gain));
+
+ /* Quantise the correction gain */
+ dist_min = 32767;
+ for (i = 0; i < 16; i++)
+ {
+ dist = vo_mult((HP_corr_gain - HP_gain[i]), (HP_corr_gain - HP_gain[i]));
+ if (dist_min > dist)
+ {
+ dist_min = dist;
+ HP_gain_ind = i;
+ }
+ }
+ HP_corr_gain = HP_gain[HP_gain_ind];
+ /* return the quantised gain index when using the highest mode, otherwise zero */
+ return (HP_gain_ind);
}
/*************************************************
@@ -1558,33 +1562,33 @@ static Word16 synthesis(
int AMR_Enc_Encode(HAMRENC hCodec)
{
- Word32 i;
- Coder_State *gData = (Coder_State*)hCodec;
- Word16 *signal;
- Word16 packed_size = 0;
- Word16 prms[NB_BITS_MAX];
- Word16 coding_mode = 0, nb_bits, allow_dtx, mode, reset_flag;
- mode = gData->mode;
- coding_mode = gData->mode;
- nb_bits = nb_of_bits[mode];
- signal = (Word16 *)gData->inputStream;
- allow_dtx = gData->allow_dtx;
-
- /* check for homing frame */
- reset_flag = encoder_homing_frame_test(signal);
-
- for (i = 0; i < L_FRAME16k; i++) /* Delete the 2 LSBs (14-bit input) */
- {
- *(signal + i) = (Word16) (*(signal + i) & 0xfffC);
- }
-
- coder(&coding_mode, signal, prms, &nb_bits, gData, allow_dtx);
- packed_size = PackBits(prms, coding_mode, mode, gData);
- if (reset_flag != 0)
- {
- Reset_encoder(gData, 1);
- }
- return packed_size;
+ Word32 i;
+ Coder_State *gData = (Coder_State*)hCodec;
+ Word16 *signal;
+ Word16 packed_size = 0;
+ Word16 prms[NB_BITS_MAX];
+ Word16 coding_mode = 0, nb_bits, allow_dtx, mode, reset_flag;
+ mode = gData->mode;
+ coding_mode = gData->mode;
+ nb_bits = nb_of_bits[mode];
+ signal = (Word16 *)gData->inputStream;
+ allow_dtx = gData->allow_dtx;
+
+ /* check for homing frame */
+ reset_flag = encoder_homing_frame_test(signal);
+
+ for (i = 0; i < L_FRAME16k; i++) /* Delete the 2 LSBs (14-bit input) */
+ {
+ *(signal + i) = (Word16) (*(signal + i) & 0xfffC);
+ }
+
+ coder(&coding_mode, signal, prms, &nb_bits, gData, allow_dtx);
+ packed_size = PackBits(prms, coding_mode, mode, gData);
+ if (reset_flag != 0)
+ {
+ Reset_encoder(gData, 1);
+ }
+ return packed_size;
}
/***************************************************************************
@@ -1594,94 +1598,94 @@ int AMR_Enc_Encode(HAMRENC hCodec)
***************************************************************************/
VO_U32 VO_API voAMRWB_Init(VO_HANDLE * phCodec, /* o: the audio codec handle */
- VO_AUDIO_CODINGTYPE vType, /* i: Codec Type ID */
- VO_CODEC_INIT_USERDATA * pUserData /* i: init Parameters */
- )
+ VO_AUDIO_CODINGTYPE vType, /* i: Codec Type ID */
+ VO_CODEC_INIT_USERDATA * pUserData /* i: init Parameters */
+ )
{
- Coder_State *st;
- FrameStream *stream;
+ Coder_State *st;
+ FrameStream *stream;
#ifdef USE_DEAULT_MEM
- VO_MEM_OPERATOR voMemoprator;
+ VO_MEM_OPERATOR voMemoprator;
#endif
- VO_MEM_OPERATOR *pMemOP;
+ VO_MEM_OPERATOR *pMemOP;
UNUSED(vType);
- int interMem = 0;
+ int interMem = 0;
- if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
- {
+ if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
+ {
#ifdef USE_DEAULT_MEM
- voMemoprator.Alloc = cmnMemAlloc;
- voMemoprator.Copy = cmnMemCopy;
- voMemoprator.Free = cmnMemFree;
- voMemoprator.Set = cmnMemSet;
- voMemoprator.Check = cmnMemCheck;
- interMem = 1;
- pMemOP = &voMemoprator;
+ voMemoprator.Alloc = cmnMemAlloc;
+ voMemoprator.Copy = cmnMemCopy;
+ voMemoprator.Free = cmnMemFree;
+ voMemoprator.Set = cmnMemSet;
+ voMemoprator.Check = cmnMemCheck;
+ interMem = 1;
+ pMemOP = &voMemoprator;
#else
- *phCodec = NULL;
- return VO_ERR_INVALID_ARG;
+ *phCodec = NULL;
+ return VO_ERR_INVALID_ARG;
#endif
- }
- else
- {
- pMemOP = (VO_MEM_OPERATOR *)pUserData->memData;
- }
- /*-------------------------------------------------------------------------*
- * Memory allocation for coder state. *
- *-------------------------------------------------------------------------*/
- if ((st = (Coder_State *)mem_malloc(pMemOP, sizeof(Coder_State), 32, VO_INDEX_ENC_AMRWB)) == NULL)
- {
- return VO_ERR_OUTOF_MEMORY;
- }
-
- st->vadSt = NULL;
- st->dtx_encSt = NULL;
- st->sid_update_counter = 3;
- st->sid_handover_debt = 0;
- st->prev_ft = TX_SPEECH;
- st->inputStream = NULL;
- st->inputSize = 0;
-
- /* Default setting */
- st->mode = VOAMRWB_MD2385; /* bit rate 23.85kbps */
- st->frameType = VOAMRWB_RFC3267; /* frame type: RFC3267 */
- st->allow_dtx = 0; /* disable DTX mode */
-
- st->outputStream = NULL;
- st->outputSize = 0;
-
- st->stream = (FrameStream *)mem_malloc(pMemOP, sizeof(FrameStream), 32, VO_INDEX_ENC_AMRWB);
- if(st->stream == NULL)
- return VO_ERR_OUTOF_MEMORY;
-
- st->stream->frame_ptr = (unsigned char *)mem_malloc(pMemOP, Frame_Maxsize, 32, VO_INDEX_ENC_AMRWB);
- if(st->stream->frame_ptr == NULL)
- return VO_ERR_OUTOF_MEMORY;
-
- stream = st->stream;
- voAWB_InitFrameBuffer(stream);
-
- wb_vad_init(&(st->vadSt), pMemOP);
- dtx_enc_init(&(st->dtx_encSt), isf_init, pMemOP);
-
- Reset_encoder((void *) st, 1);
-
- if(interMem)
- {
- st->voMemoprator.Alloc = cmnMemAlloc;
- st->voMemoprator.Copy = cmnMemCopy;
- st->voMemoprator.Free = cmnMemFree;
- st->voMemoprator.Set = cmnMemSet;
- st->voMemoprator.Check = cmnMemCheck;
- pMemOP = &st->voMemoprator;
- }
-
- st->pvoMemop = pMemOP;
-
- *phCodec = (void *) st;
-
- return VO_ERR_NONE;
+ }
+ else
+ {
+ pMemOP = (VO_MEM_OPERATOR *)pUserData->memData;
+ }
+ /*-------------------------------------------------------------------------*
+ * Memory allocation for coder state. *
+ *-------------------------------------------------------------------------*/
+ if ((st = (Coder_State *)mem_malloc(pMemOP, sizeof(Coder_State), 32, VO_INDEX_ENC_AMRWB)) == NULL)
+ {
+ return VO_ERR_OUTOF_MEMORY;
+ }
+
+ st->vadSt = NULL;
+ st->dtx_encSt = NULL;
+ st->sid_update_counter = 3;
+ st->sid_handover_debt = 0;
+ st->prev_ft = TX_SPEECH;
+ st->inputStream = NULL;
+ st->inputSize = 0;
+
+ /* Default setting */
+ st->mode = VOAMRWB_MD2385; /* bit rate 23.85kbps */
+ st->frameType = VOAMRWB_RFC3267; /* frame type: RFC3267 */
+ st->allow_dtx = 0; /* disable DTX mode */
+
+ st->outputStream = NULL;
+ st->outputSize = 0;
+
+ st->stream = (FrameStream *)mem_malloc(pMemOP, sizeof(FrameStream), 32, VO_INDEX_ENC_AMRWB);
+ if(st->stream == NULL)
+ return VO_ERR_OUTOF_MEMORY;
+
+ st->stream->frame_ptr = (unsigned char *)mem_malloc(pMemOP, Frame_Maxsize, 32, VO_INDEX_ENC_AMRWB);
+ if(st->stream->frame_ptr == NULL)
+ return VO_ERR_OUTOF_MEMORY;
+
+ stream = st->stream;
+ voAWB_InitFrameBuffer(stream);
+
+ wb_vad_init(&(st->vadSt), pMemOP);
+ dtx_enc_init(&(st->dtx_encSt), isf_init, pMemOP);
+
+ Reset_encoder((void *) st, 1);
+
+ if(interMem)
+ {
+ st->voMemoprator.Alloc = cmnMemAlloc;
+ st->voMemoprator.Copy = cmnMemCopy;
+ st->voMemoprator.Free = cmnMemFree;
+ st->voMemoprator.Set = cmnMemSet;
+ st->voMemoprator.Check = cmnMemCheck;
+ pMemOP = &st->voMemoprator;
+ }
+
+ st->pvoMemop = pMemOP;
+
+ *phCodec = (void *) st;
+
+ return VO_ERR_NONE;
}
/**********************************************************************************
@@ -1691,32 +1695,32 @@ VO_U32 VO_API voAMRWB_Init(VO_HANDLE * phCodec, /* o: the audi
***********************************************************************************/
VO_U32 VO_API voAMRWB_SetInputData(
- VO_HANDLE hCodec, /* i/o: The codec handle which was created by Init function */
- VO_CODECBUFFER * pInput /* i: The input buffer parameter */
- )
+ VO_HANDLE hCodec, /* i/o: The codec handle which was created by Init function */
+ VO_CODECBUFFER * pInput /* i: The input buffer parameter */
+ )
{
- Coder_State *gData;
- FrameStream *stream;
+ Coder_State *gData;
+ FrameStream *stream;
- if(NULL == hCodec)
- {
- return VO_ERR_INVALID_ARG;
- }
+ if(NULL == hCodec)
+ {
+ return VO_ERR_INVALID_ARG;
+ }
- gData = (Coder_State *)hCodec;
- stream = gData->stream;
+ gData = (Coder_State *)hCodec;
+ stream = gData->stream;
- if(NULL == pInput || NULL == pInput->Buffer)
- {
- return VO_ERR_INVALID_ARG;
- }
+ if(NULL == pInput || NULL == pInput->Buffer)
+ {
+ return VO_ERR_INVALID_ARG;
+ }
- stream->set_ptr = pInput->Buffer;
- stream->set_len = pInput->Length;
- stream->frame_ptr = stream->frame_ptr_bk;
- stream->used_len = 0;
+ stream->set_ptr = pInput->Buffer;
+ stream->set_len = pInput->Length;
+ stream->frame_ptr = stream->frame_ptr_bk;
+ stream->used_len = 0;
- return VO_ERR_NONE;
+ return VO_ERR_NONE;
}
/**************************************************************************************
@@ -1726,52 +1730,52 @@ VO_U32 VO_API voAMRWB_SetInputData(
***************************************************************************************/
VO_U32 VO_API voAMRWB_GetOutputData(
- VO_HANDLE hCodec, /* i: The Codec Handle which was created by Init function*/
- VO_CODECBUFFER * pOutput, /* o: The output audio data */
- VO_AUDIO_OUTPUTINFO * pAudioFormat /* o: The encoder module filled audio format and used the input size*/
- )
+ VO_HANDLE hCodec, /* i: The Codec Handle which was created by Init function*/
+ VO_CODECBUFFER * pOutput, /* o: The output audio data */
+ VO_AUDIO_OUTPUTINFO * pAudioFormat /* o: The encoder module filled audio format and used the input size*/
+ )
{
- Coder_State* gData = (Coder_State*)hCodec;
- VO_MEM_OPERATOR *pMemOP;
- FrameStream *stream = (FrameStream *)gData->stream;
- pMemOP = (VO_MEM_OPERATOR *)gData->pvoMemop;
-
- if(stream->framebuffer_len < Frame_MaxByte) /* check the work buffer len */
- {
- stream->frame_storelen = stream->framebuffer_len;
- if(stream->frame_storelen)
- {
- pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk , stream->frame_ptr , stream->frame_storelen);
- }
- if(stream->set_len > 0)
- {
- voAWB_UpdateFrameBuffer(stream, pMemOP);
- }
- if(stream->framebuffer_len < Frame_MaxByte)
- {
- if(pAudioFormat)
- pAudioFormat->InputUsed = stream->used_len;
- return VO_ERR_INPUT_BUFFER_SMALL;
- }
- }
-
- gData->inputStream = stream->frame_ptr;
- gData->outputStream = (unsigned short*)pOutput->Buffer;
-
- gData->outputSize = AMR_Enc_Encode(gData); /* encoder main function */
-
- pOutput->Length = gData->outputSize; /* get the output buffer length */
- stream->frame_ptr += 640; /* update the work buffer ptr */
- stream->framebuffer_len -= 640;
-
- if(pAudioFormat) /* return output audio information */
- {
- pAudioFormat->Format.Channels = 1;
- pAudioFormat->Format.SampleRate = 8000;
- pAudioFormat->Format.SampleBits = 16;
- pAudioFormat->InputUsed = stream->used_len;
- }
- return VO_ERR_NONE;
+ Coder_State* gData = (Coder_State*)hCodec;
+ VO_MEM_OPERATOR *pMemOP;
+ FrameStream *stream = (FrameStream *)gData->stream;
+ pMemOP = (VO_MEM_OPERATOR *)gData->pvoMemop;
+
+ if(stream->framebuffer_len < Frame_MaxByte) /* check the work buffer len */
+ {
+ stream->frame_storelen = stream->framebuffer_len;
+ if(stream->frame_storelen)
+ {
+ pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk , stream->frame_ptr , stream->frame_storelen);
+ }
+ if(stream->set_len > 0)
+ {
+ voAWB_UpdateFrameBuffer(stream, pMemOP);
+ }
+ if(stream->framebuffer_len < Frame_MaxByte)
+ {
+ if(pAudioFormat)
+ pAudioFormat->InputUsed = stream->used_len;
+ return VO_ERR_INPUT_BUFFER_SMALL;
+ }
+ }
+
+ gData->inputStream = stream->frame_ptr;
+ gData->outputStream = (unsigned short*)pOutput->Buffer;
+
+ gData->outputSize = AMR_Enc_Encode(gData); /* encoder main function */
+
+ pOutput->Length = gData->outputSize; /* get the output buffer length */
+ stream->frame_ptr += 640; /* update the work buffer ptr */
+ stream->framebuffer_len -= 640;
+
+ if(pAudioFormat) /* return output audio information */
+ {
+ pAudioFormat->Format.Channels = 1;
+ pAudioFormat->Format.SampleRate = 8000;
+ pAudioFormat->Format.SampleBits = 16;
+ pAudioFormat->InputUsed = stream->used_len;
+ }
+ return VO_ERR_NONE;
}
/*************************************************************************
@@ -1782,50 +1786,50 @@ VO_U32 VO_API voAMRWB_GetOutputData(
VO_U32 VO_API voAMRWB_SetParam(
- VO_HANDLE hCodec, /* i/o: The Codec Handle which was created by Init function */
- VO_S32 uParamID, /* i: The param ID */
- VO_PTR pData /* i: The param value depend on the ID */
- )
+ VO_HANDLE hCodec, /* i/o: The Codec Handle which was created by Init function */
+ VO_S32 uParamID, /* i: The param ID */
+ VO_PTR pData /* i: The param value depend on the ID */
+ )
{
- Coder_State* gData = (Coder_State*)hCodec;
- FrameStream *stream = (FrameStream *)(gData->stream);
- int *lValue = (int*)pData;
-
- switch(uParamID)
- {
- /* setting AMR-WB frame type*/
- case VO_PID_AMRWB_FRAMETYPE:
- if(*lValue < VOAMRWB_DEFAULT || *lValue > VOAMRWB_RFC3267)
- return VO_ERR_WRONG_PARAM_ID;
- gData->frameType = *lValue;
- break;
- /* setting AMR-WB bit rate */
- case VO_PID_AMRWB_MODE:
- {
- if(*lValue < VOAMRWB_MD66 || *lValue > VOAMRWB_MD2385)
- return VO_ERR_WRONG_PARAM_ID;
- gData->mode = *lValue;
- }
- break;
- /* enable or disable DTX mode */
- case VO_PID_AMRWB_DTX:
- gData->allow_dtx = (Word16)(*lValue);
- break;
-
- case VO_PID_COMMON_HEADDATA:
- break;
+ Coder_State* gData = (Coder_State*)hCodec;
+ FrameStream *stream = (FrameStream *)(gData->stream);
+ int *lValue = (int*)pData;
+
+ switch(uParamID)
+ {
+ /* setting AMR-WB frame type*/
+ case VO_PID_AMRWB_FRAMETYPE:
+ if(*lValue < VOAMRWB_DEFAULT || *lValue > VOAMRWB_RFC3267)
+ return VO_ERR_WRONG_PARAM_ID;
+ gData->frameType = *lValue;
+ break;
+ /* setting AMR-WB bit rate */
+ case VO_PID_AMRWB_MODE:
+ {
+ if(*lValue < VOAMRWB_MD66 || *lValue > VOAMRWB_MD2385)
+ return VO_ERR_WRONG_PARAM_ID;
+ gData->mode = *lValue;
+ }
+ break;
+ /* enable or disable DTX mode */
+ case VO_PID_AMRWB_DTX:
+ gData->allow_dtx = (Word16)(*lValue);
+ break;
+
+ case VO_PID_COMMON_HEADDATA:
+ break;
/* flush the work buffer */
- case VO_PID_COMMON_FLUSH:
- stream->set_ptr = NULL;
- stream->frame_storelen = 0;
- stream->framebuffer_len = 0;
- stream->set_len = 0;
- break;
-
- default:
- return VO_ERR_WRONG_PARAM_ID;
- }
- return VO_ERR_NONE;
+ case VO_PID_COMMON_FLUSH:
+ stream->set_ptr = NULL;
+ stream->frame_storelen = 0;
+ stream->framebuffer_len = 0;
+ stream->set_len = 0;
+ break;
+
+ default:
+ return VO_ERR_WRONG_PARAM_ID;
+ }
+ return VO_ERR_NONE;
}
/**************************************************************************
@@ -1835,52 +1839,52 @@ VO_U32 VO_API voAMRWB_SetParam(
***************************************************************************/
VO_U32 VO_API voAMRWB_GetParam(
- VO_HANDLE hCodec, /* i: The Codec Handle which was created by Init function */
- VO_S32 uParamID, /* i: The param ID */
- VO_PTR pData /* o: The param value depend on the ID */
- )
+ VO_HANDLE hCodec, /* i: The Codec Handle which was created by Init function */
+ VO_S32 uParamID, /* i: The param ID */
+ VO_PTR pData /* o: The param value depend on the ID */
+ )
{
- int temp;
- Coder_State* gData = (Coder_State*)hCodec;
-
- if (gData==NULL)
- return VO_ERR_INVALID_ARG;
- switch(uParamID)
- {
- /* output audio format */
- case VO_PID_AMRWB_FORMAT:
- {
- VO_AUDIO_FORMAT* fmt = (VO_AUDIO_FORMAT*)pData;
- fmt->Channels = 1;
- fmt->SampleRate = 16000;
- fmt->SampleBits = 16;
- break;
- }
+ int temp;
+ Coder_State* gData = (Coder_State*)hCodec;
+
+ if (gData==NULL)
+ return VO_ERR_INVALID_ARG;
+ switch(uParamID)
+ {
+ /* output audio format */
+ case VO_PID_AMRWB_FORMAT:
+ {
+ VO_AUDIO_FORMAT* fmt = (VO_AUDIO_FORMAT*)pData;
+ fmt->Channels = 1;
+ fmt->SampleRate = 16000;
+ fmt->SampleBits = 16;
+ break;
+ }
/* output audio channel number */
- case VO_PID_AMRWB_CHANNELS:
- temp = 1;
- pData = (void *)(&temp);
- break;
+ case VO_PID_AMRWB_CHANNELS:
+ temp = 1;
+ pData = (void *)(&temp);
+ break;
/* output audio sample rate */
- case VO_PID_AMRWB_SAMPLERATE:
- temp = 16000;
- pData = (void *)(&temp);
- break;
- /* output audio frame type */
- case VO_PID_AMRWB_FRAMETYPE:
- temp = gData->frameType;
- pData = (void *)(&temp);
- break;
- /* output audio bit rate */
- case VO_PID_AMRWB_MODE:
- temp = gData->mode;
- pData = (void *)(&temp);
- break;
- default:
- return VO_ERR_WRONG_PARAM_ID;
- }
-
- return VO_ERR_NONE;
+ case VO_PID_AMRWB_SAMPLERATE:
+ temp = 16000;
+ pData = (void *)(&temp);
+ break;
+ /* output audio frame type */
+ case VO_PID_AMRWB_FRAMETYPE:
+ temp = gData->frameType;
+ pData = (void *)(&temp);
+ break;
+ /* output audio bit rate */
+ case VO_PID_AMRWB_MODE:
+ temp = gData->mode;
+ pData = (void *)(&temp);
+ break;
+ default:
+ return VO_ERR_WRONG_PARAM_ID;
+ }
+
+ return VO_ERR_NONE;
}
/***********************************************************************************
@@ -1890,32 +1894,32 @@ VO_U32 VO_API voAMRWB_GetParam(
*************************************************************************************/
VO_U32 VO_API voAMRWB_Uninit(VO_HANDLE hCodec /* i/o: Codec handle pointer */
- )
+ )
{
- Coder_State* gData = (Coder_State*)hCodec;
- VO_MEM_OPERATOR *pMemOP;
- pMemOP = gData->pvoMemop;
-
- if(hCodec)
- {
- if(gData->stream)
- {
- if(gData->stream->frame_ptr_bk)
- {
- mem_free(pMemOP, gData->stream->frame_ptr_bk, VO_INDEX_ENC_AMRWB);
- gData->stream->frame_ptr_bk = NULL;
- }
- mem_free(pMemOP, gData->stream, VO_INDEX_ENC_AMRWB);
- gData->stream = NULL;
- }
- wb_vad_exit(&(((Coder_State *) gData)->vadSt), pMemOP);
- dtx_enc_exit(&(((Coder_State *) gData)->dtx_encSt), pMemOP);
-
- mem_free(pMemOP, hCodec, VO_INDEX_ENC_AMRWB);
- hCodec = NULL;
- }
-
- return VO_ERR_NONE;
+ Coder_State* gData = (Coder_State*)hCodec;
+ VO_MEM_OPERATOR *pMemOP;
+ pMemOP = gData->pvoMemop;
+
+ if(hCodec)
+ {
+ if(gData->stream)
+ {
+ if(gData->stream->frame_ptr_bk)
+ {
+ mem_free(pMemOP, gData->stream->frame_ptr_bk, VO_INDEX_ENC_AMRWB);
+ gData->stream->frame_ptr_bk = NULL;
+ }
+ mem_free(pMemOP, gData->stream, VO_INDEX_ENC_AMRWB);
+ gData->stream = NULL;
+ }
+ wb_vad_exit(&(((Coder_State *) gData)->vadSt), pMemOP);
+ dtx_enc_exit(&(((Coder_State *) gData)->dtx_encSt), pMemOP);
+
+ mem_free(pMemOP, hCodec, VO_INDEX_ENC_AMRWB);
+ hCodec = NULL;
+ }
+
+ return VO_ERR_NONE;
}
/********************************************************************************
@@ -1925,19 +1929,19 @@ VO_U32 VO_API voAMRWB_Uninit(VO_HANDLE hCodec /* i/o: Codec handle poi
********************************************************************************/
VO_S32 VO_API voGetAMRWBEncAPI(
- VO_AUDIO_CODECAPI * pEncHandle /* i/o: Codec handle pointer */
- )
+ VO_AUDIO_CODECAPI * pEncHandle /* i/o: Codec handle pointer */
+ )
{
- if(NULL == pEncHandle)
- return VO_ERR_INVALID_ARG;
- pEncHandle->Init = voAMRWB_Init;
- pEncHandle->SetInputData = voAMRWB_SetInputData;
- pEncHandle->GetOutputData = voAMRWB_GetOutputData;
- pEncHandle->SetParam = voAMRWB_SetParam;
- pEncHandle->GetParam = voAMRWB_GetParam;
- pEncHandle->Uninit = voAMRWB_Uninit;
-
- return VO_ERR_NONE;
+ if(NULL == pEncHandle)
+ return VO_ERR_INVALID_ARG;
+ pEncHandle->Init = voAMRWB_Init;
+ pEncHandle->SetInputData = voAMRWB_SetInputData;
+ pEncHandle->GetOutputData = voAMRWB_GetOutputData;
+ pEncHandle->SetParam = voAMRWB_SetParam;
+ pEncHandle->GetParam = voAMRWB_GetParam;
+ pEncHandle->Uninit = voAMRWB_Uninit;
+
+ return VO_ERR_NONE;
}
#ifdef __cplusplus
diff --git a/media/libstagefright/codecs/amrwbenc/src/voicefac.c b/media/libstagefright/codecs/amrwbenc/src/voicefac.c
index d890044..c9f48c2 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voicefac.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voicefac.c
@@ -26,65 +26,65 @@
#include "math_op.h"
Word16 voice_factor( /* (o) Q15 : factor (-1=unvoiced to 1=voiced) */
- Word16 exc[], /* (i) Q_exc : pitch excitation */
- Word16 Q_exc, /* (i) : exc format */
- Word16 gain_pit, /* (i) Q14 : gain of pitch */
- Word16 code[], /* (i) Q9 : Fixed codebook excitation */
- Word16 gain_code, /* (i) Q0 : gain of code */
- Word16 L_subfr /* (i) : subframe length */
- )
+ Word16 exc[], /* (i) Q_exc : pitch excitation */
+ Word16 Q_exc, /* (i) : exc format */
+ Word16 gain_pit, /* (i) Q14 : gain of pitch */
+ Word16 code[], /* (i) Q9 : Fixed codebook excitation */
+ Word16 gain_code, /* (i) Q0 : gain of code */
+ Word16 L_subfr /* (i) : subframe length */
+ )
{
- Word16 tmp, exp, ener1, exp1, ener2, exp2;
- Word32 i, L_tmp;
+ Word16 tmp, exp, ener1, exp1, ener2, exp2;
+ Word32 i, L_tmp;
#ifdef ASM_OPT /* asm optimization branch */
- ener1 = extract_h(Dot_product12_asm(exc, exc, L_subfr, &exp1));
+ ener1 = extract_h(Dot_product12_asm(exc, exc, L_subfr, &exp1));
#else
- ener1 = extract_h(Dot_product12(exc, exc, L_subfr, &exp1));
+ ener1 = extract_h(Dot_product12(exc, exc, L_subfr, &exp1));
#endif
- exp1 = exp1 - (Q_exc + Q_exc);
- L_tmp = vo_L_mult(gain_pit, gain_pit);
- exp = norm_l(L_tmp);
- tmp = extract_h(L_tmp << exp);
- ener1 = vo_mult(ener1, tmp);
- exp1 = exp1 - exp - 10; /* 10 -> gain_pit Q14 to Q9 */
+ exp1 = exp1 - (Q_exc + Q_exc);
+ L_tmp = vo_L_mult(gain_pit, gain_pit);
+ exp = norm_l(L_tmp);
+ tmp = extract_h(L_tmp << exp);
+ ener1 = vo_mult(ener1, tmp);
+ exp1 = exp1 - exp - 10; /* 10 -> gain_pit Q14 to Q9 */
#ifdef ASM_OPT /* asm optimization branch */
- ener2 = extract_h(Dot_product12_asm(code, code, L_subfr, &exp2));
+ ener2 = extract_h(Dot_product12_asm(code, code, L_subfr, &exp2));
#else
- ener2 = extract_h(Dot_product12(code, code, L_subfr, &exp2));
+ ener2 = extract_h(Dot_product12(code, code, L_subfr, &exp2));
#endif
- exp = norm_s(gain_code);
- tmp = gain_code << exp;
- tmp = vo_mult(tmp, tmp);
- ener2 = vo_mult(ener2, tmp);
- exp2 = exp2 - (exp + exp);
+ exp = norm_s(gain_code);
+ tmp = gain_code << exp;
+ tmp = vo_mult(tmp, tmp);
+ ener2 = vo_mult(ener2, tmp);
+ exp2 = exp2 - (exp + exp);
- i = exp1 - exp2;
+ i = exp1 - exp2;
- if (i >= 0)
- {
- ener1 = ener1 >> 1;
- ener2 = ener2 >> (i + 1);
- } else
- {
- ener1 = ener1 >> (1 - i);
- ener2 = ener2 >> 1;
- }
+ if (i >= 0)
+ {
+ ener1 = ener1 >> 1;
+ ener2 = ener2 >> (i + 1);
+ } else
+ {
+ ener1 = ener1 >> (1 - i);
+ ener2 = ener2 >> 1;
+ }
- tmp = vo_sub(ener1, ener2);
- ener1 = add1(add1(ener1, ener2), 1);
+ tmp = vo_sub(ener1, ener2);
+ ener1 = add1(add1(ener1, ener2), 1);
- if (tmp >= 0)
- {
- tmp = div_s(tmp, ener1);
- } else
- {
- tmp = vo_negate(div_s(vo_negate(tmp), ener1));
- }
+ if (tmp >= 0)
+ {
+ tmp = div_s(tmp, ener1);
+ } else
+ {
+ tmp = vo_negate(div_s(vo_negate(tmp), ener1));
+ }
- return (tmp);
+ return (tmp);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/wb_vad.c b/media/libstagefright/codecs/amrwbenc/src/wb_vad.c
index 2beaefd..866a69c 100644
--- a/media/libstagefright/codecs/amrwbenc/src/wb_vad.c
+++ b/media/libstagefright/codecs/amrwbenc/src/wb_vad.c
@@ -44,30 +44,30 @@
*********************************************************************************/
static Word16 ilog2( /* return: output value of the log2 */
- Word16 mant /* i: value to be converted */
- )
+ Word16 mant /* i: value to be converted */
+ )
{
- Word16 ex, ex2, res;
- Word32 i, l_temp;
-
- if (mant <= 0)
- {
- mant = 1;
- }
- ex = norm_s(mant);
- mant = mant << ex;
-
- for (i = 0; i < 3; i++)
- mant = vo_mult(mant, mant);
- l_temp = vo_L_mult(mant, mant);
-
- ex2 = norm_l(l_temp);
- mant = extract_h(l_temp << ex2);
-
- res = (ex + 16) << 10;
- res = add1(res, (ex2 << 6));
- res = vo_sub(add1(res, 127), (mant >> 8));
- return (res);
+ Word16 ex, ex2, res;
+ Word32 i, l_temp;
+
+ if (mant <= 0)
+ {
+ mant = 1;
+ }
+ ex = norm_s(mant);
+ mant = mant << ex;
+
+ for (i = 0; i < 3; i++)
+ mant = vo_mult(mant, mant);
+ l_temp = vo_L_mult(mant, mant);
+
+ ex2 = norm_l(l_temp);
+ mant = extract_h(l_temp << ex2);
+
+ res = (ex + 16) << 10;
+ res = add1(res, (ex2 << 6));
+ res = vo_sub(add1(res, 127), (mant >> 8));
+ return (res);
}
/******************************************************************************
@@ -79,23 +79,23 @@ static Word16 ilog2( /* return: output value of the log2 *
*******************************************************************************/
static void filter5(
- Word16 * in0, /* i/o : input values; output low-pass part */
- Word16 * in1, /* i/o : input values; output high-pass part */
- Word16 data[] /* i/o : filter memory */
- )
+ Word16 * in0, /* i/o : input values; output low-pass part */
+ Word16 * in1, /* i/o : input values; output high-pass part */
+ Word16 data[] /* i/o : filter memory */
+ )
{
- Word16 temp0, temp1, temp2;
+ Word16 temp0, temp1, temp2;
- temp0 = vo_sub(*in0, vo_mult(COEFF5_1, data[0]));
- temp1 = add1(data[0], vo_mult(COEFF5_1, temp0));
- data[0] = temp0;
+ temp0 = vo_sub(*in0, vo_mult(COEFF5_1, data[0]));
+ temp1 = add1(data[0], vo_mult(COEFF5_1, temp0));
+ data[0] = temp0;
- temp0 = vo_sub(*in1, vo_mult(COEFF5_2, data[1]));
- temp2 = add1(data[1], vo_mult(COEFF5_2, temp0));
- data[1] = temp0;
+ temp0 = vo_sub(*in1, vo_mult(COEFF5_2, data[1]));
+ temp2 = add1(data[1], vo_mult(COEFF5_2, temp0));
+ data[1] = temp0;
- *in0 = extract_h((vo_L_add(temp1, temp2) << 15));
- *in1 = extract_h((vo_L_sub(temp1, temp2) << 15));
+ *in0 = extract_h((vo_L_add(temp1, temp2) << 15));
+ *in1 = extract_h((vo_L_sub(temp1, temp2) << 15));
}
/******************************************************************************
@@ -107,19 +107,19 @@ static void filter5(
*******************************************************************************/
static void filter3(
- Word16 * in0, /* i/o : input values; output low-pass part */
- Word16 * in1, /* i/o : input values; output high-pass part */
- Word16 * data /* i/o : filter memory */
- )
+ Word16 * in0, /* i/o : input values; output low-pass part */
+ Word16 * in1, /* i/o : input values; output high-pass part */
+ Word16 * data /* i/o : filter memory */
+ )
{
- Word16 temp1, temp2;
+ Word16 temp1, temp2;
- temp1 = vo_sub(*in1, vo_mult(COEFF3, *data));
- temp2 = add1(*data, vo_mult(COEFF3, temp1));
- *data = temp1;
+ temp1 = vo_sub(*in1, vo_mult(COEFF3, *data));
+ temp2 = add1(*data, vo_mult(COEFF3, temp1));
+ *data = temp1;
- *in1 = extract_h((vo_L_sub(*in0, temp2) << 15));
- *in0 = extract_h((vo_L_add(*in0, temp2) << 15));
+ *in1 = extract_h((vo_L_sub(*in0, temp2) << 15));
+ *in0 = extract_h((vo_L_add(*in0, temp2) << 15));
}
/******************************************************************************
@@ -135,36 +135,36 @@ static void filter3(
******************************************************************************/
static Word16 level_calculation( /* return: signal level */
- Word16 data[], /* i : signal buffer */
- Word16 * sub_level, /* i : level calculated at the end of the previous frame*/
- /* o : level of signal calculated from the last */
- /* (count2 - count1) samples */
- Word16 count1, /* i : number of samples to be counted */
- Word16 count2, /* i : number of samples to be counted */
- Word16 ind_m, /* i : step size for the index of the data buffer */
- Word16 ind_a, /* i : starting index of the data buffer */
- Word16 scale /* i : scaling for the level calculation */
- )
+ Word16 data[], /* i : signal buffer */
+ Word16 * sub_level, /* i : level calculated at the end of the previous frame*/
+ /* o : level of signal calculated from the last */
+ /* (count2 - count1) samples */
+ Word16 count1, /* i : number of samples to be counted */
+ Word16 count2, /* i : number of samples to be counted */
+ Word16 ind_m, /* i : step size for the index of the data buffer */
+ Word16 ind_a, /* i : starting index of the data buffer */
+ Word16 scale /* i : scaling for the level calculation */
+ )
{
- Word32 i, l_temp1, l_temp2;
- Word16 level;
+ Word32 i, l_temp1, l_temp2;
+ Word16 level;
- l_temp1 = 0L;
- for (i = count1; i < count2; i++)
- {
- l_temp1 += (abs_s(data[ind_m * i + ind_a])<<1);
- }
+ l_temp1 = 0L;
+ for (i = count1; i < count2; i++)
+ {
+ l_temp1 += (abs_s(data[ind_m * i + ind_a])<<1);
+ }
- l_temp2 = vo_L_add(l_temp1, L_shl(*sub_level, 16 - scale));
- *sub_level = extract_h(L_shl(l_temp1, scale));
+ l_temp2 = vo_L_add(l_temp1, L_shl(*sub_level, 16 - scale));
+ *sub_level = extract_h(L_shl(l_temp1, scale));
- for (i = 0; i < count1; i++)
- {
- l_temp2 += (abs_s(data[ind_m * i + ind_a])<<1);
- }
- level = extract_h(L_shl2(l_temp2, scale));
+ for (i = 0; i < count1; i++)
+ {
+ l_temp2 += (abs_s(data[ind_m * i + ind_a])<<1);
+ }
+ level = extract_h(L_shl2(l_temp2, scale));
- return level;
+ return level;
}
/******************************************************************************
@@ -176,75 +176,75 @@ static Word16 level_calculation( /* return: signal level */
*******************************************************************************/
static void filter_bank(
- VadVars * st, /* i/o : State struct */
- Word16 in[], /* i : input frame */
- Word16 level[] /* o : signal levels at each band */
- )
+ VadVars * st, /* i/o : State struct */
+ Word16 in[], /* i : input frame */
+ Word16 level[] /* o : signal levels at each band */
+ )
{
- Word32 i;
- Word16 tmp_buf[FRAME_LEN];
-
- /* shift input 1 bit down for safe scaling */
- for (i = 0; i < FRAME_LEN; i++)
- {
- tmp_buf[i] = in[i] >> 1;
- }
-
- /* run the filter bank */
- for (i = 0; i < 128; i++)
- {
- filter5(&tmp_buf[2 * i], &tmp_buf[2 * i + 1], st->a_data5[0]);
- }
- for (i = 0; i < 64; i++)
- {
- filter5(&tmp_buf[4 * i], &tmp_buf[4 * i + 2], st->a_data5[1]);
- filter5(&tmp_buf[4 * i + 1], &tmp_buf[4 * i + 3], st->a_data5[2]);
- }
- for (i = 0; i < 32; i++)
- {
- filter5(&tmp_buf[8 * i], &tmp_buf[8 * i + 4], st->a_data5[3]);
- filter5(&tmp_buf[8 * i + 2], &tmp_buf[8 * i + 6], st->a_data5[4]);
- filter3(&tmp_buf[8 * i + 3], &tmp_buf[8 * i + 7], &st->a_data3[0]);
- }
- for (i = 0; i < 16; i++)
- {
- filter3(&tmp_buf[16 * i + 0], &tmp_buf[16 * i + 8], &st->a_data3[1]);
- filter3(&tmp_buf[16 * i + 4], &tmp_buf[16 * i + 12], &st->a_data3[2]);
- filter3(&tmp_buf[16 * i + 6], &tmp_buf[16 * i + 14], &st->a_data3[3]);
- }
-
- for (i = 0; i < 8; i++)
- {
- filter3(&tmp_buf[32 * i + 0], &tmp_buf[32 * i + 16], &st->a_data3[4]);
- filter3(&tmp_buf[32 * i + 8], &tmp_buf[32 * i + 24], &st->a_data3[5]);
- }
-
- /* calculate levels in each frequency band */
-
- /* 4800 - 6400 Hz */
- level[11] = level_calculation(tmp_buf, &st->sub_level[11], 16, 64, 4, 1, 14);
- /* 4000 - 4800 Hz */
- level[10] = level_calculation(tmp_buf, &st->sub_level[10], 8, 32, 8, 7, 15);
- /* 3200 - 4000 Hz */
- level[9] = level_calculation(tmp_buf, &st->sub_level[9],8, 32, 8, 3, 15);
- /* 2400 - 3200 Hz */
- level[8] = level_calculation(tmp_buf, &st->sub_level[8],8, 32, 8, 2, 15);
- /* 2000 - 2400 Hz */
- level[7] = level_calculation(tmp_buf, &st->sub_level[7],4, 16, 16, 14, 16);
- /* 1600 - 2000 Hz */
- level[6] = level_calculation(tmp_buf, &st->sub_level[6],4, 16, 16, 6, 16);
- /* 1200 - 1600 Hz */
- level[5] = level_calculation(tmp_buf, &st->sub_level[5],4, 16, 16, 4, 16);
- /* 800 - 1200 Hz */
- level[4] = level_calculation(tmp_buf, &st->sub_level[4],4, 16, 16, 12, 16);
- /* 600 - 800 Hz */
- level[3] = level_calculation(tmp_buf, &st->sub_level[3],2, 8, 32, 8, 17);
- /* 400 - 600 Hz */
- level[2] = level_calculation(tmp_buf, &st->sub_level[2],2, 8, 32, 24, 17);
- /* 200 - 400 Hz */
- level[1] = level_calculation(tmp_buf, &st->sub_level[1],2, 8, 32, 16, 17);
- /* 0 - 200 Hz */
- level[0] = level_calculation(tmp_buf, &st->sub_level[0],2, 8, 32, 0, 17);
+ Word32 i;
+ Word16 tmp_buf[FRAME_LEN];
+
+ /* shift input 1 bit down for safe scaling */
+ for (i = 0; i < FRAME_LEN; i++)
+ {
+ tmp_buf[i] = in[i] >> 1;
+ }
+
+ /* run the filter bank */
+ for (i = 0; i < 128; i++)
+ {
+ filter5(&tmp_buf[2 * i], &tmp_buf[2 * i + 1], st->a_data5[0]);
+ }
+ for (i = 0; i < 64; i++)
+ {
+ filter5(&tmp_buf[4 * i], &tmp_buf[4 * i + 2], st->a_data5[1]);
+ filter5(&tmp_buf[4 * i + 1], &tmp_buf[4 * i + 3], st->a_data5[2]);
+ }
+ for (i = 0; i < 32; i++)
+ {
+ filter5(&tmp_buf[8 * i], &tmp_buf[8 * i + 4], st->a_data5[3]);
+ filter5(&tmp_buf[8 * i + 2], &tmp_buf[8 * i + 6], st->a_data5[4]);
+ filter3(&tmp_buf[8 * i + 3], &tmp_buf[8 * i + 7], &st->a_data3[0]);
+ }
+ for (i = 0; i < 16; i++)
+ {
+ filter3(&tmp_buf[16 * i + 0], &tmp_buf[16 * i + 8], &st->a_data3[1]);
+ filter3(&tmp_buf[16 * i + 4], &tmp_buf[16 * i + 12], &st->a_data3[2]);
+ filter3(&tmp_buf[16 * i + 6], &tmp_buf[16 * i + 14], &st->a_data3[3]);
+ }
+
+ for (i = 0; i < 8; i++)
+ {
+ filter3(&tmp_buf[32 * i + 0], &tmp_buf[32 * i + 16], &st->a_data3[4]);
+ filter3(&tmp_buf[32 * i + 8], &tmp_buf[32 * i + 24], &st->a_data3[5]);
+ }
+
+ /* calculate levels in each frequency band */
+
+ /* 4800 - 6400 Hz */
+ level[11] = level_calculation(tmp_buf, &st->sub_level[11], 16, 64, 4, 1, 14);
+ /* 4000 - 4800 Hz */
+ level[10] = level_calculation(tmp_buf, &st->sub_level[10], 8, 32, 8, 7, 15);
+ /* 3200 - 4000 Hz */
+ level[9] = level_calculation(tmp_buf, &st->sub_level[9],8, 32, 8, 3, 15);
+ /* 2400 - 3200 Hz */
+ level[8] = level_calculation(tmp_buf, &st->sub_level[8],8, 32, 8, 2, 15);
+ /* 2000 - 2400 Hz */
+ level[7] = level_calculation(tmp_buf, &st->sub_level[7],4, 16, 16, 14, 16);
+ /* 1600 - 2000 Hz */
+ level[6] = level_calculation(tmp_buf, &st->sub_level[6],4, 16, 16, 6, 16);
+ /* 1200 - 1600 Hz */
+ level[5] = level_calculation(tmp_buf, &st->sub_level[5],4, 16, 16, 4, 16);
+ /* 800 - 1200 Hz */
+ level[4] = level_calculation(tmp_buf, &st->sub_level[4],4, 16, 16, 12, 16);
+ /* 600 - 800 Hz */
+ level[3] = level_calculation(tmp_buf, &st->sub_level[3],2, 8, 32, 8, 17);
+ /* 400 - 600 Hz */
+ level[2] = level_calculation(tmp_buf, &st->sub_level[2],2, 8, 32, 24, 17);
+ /* 200 - 400 Hz */
+ level[1] = level_calculation(tmp_buf, &st->sub_level[1],2, 8, 32, 16, 17);
+ /* 0 - 200 Hz */
+ level[0] = level_calculation(tmp_buf, &st->sub_level[0],2, 8, 32, 0, 17);
}
/******************************************************************************
@@ -255,86 +255,86 @@ static void filter_bank(
*******************************************************************************/
static void update_cntrl(
- VadVars * st, /* i/o : State structure */
- Word16 level[] /* i : sub-band levels of the input frame */
- )
+ VadVars * st, /* i/o : State structure */
+ Word16 level[] /* i : sub-band levels of the input frame */
+ )
{
- Word32 i;
- Word16 num, temp, stat_rat, exp, denom;
- Word16 alpha;
-
- /* if a tone has been detected for a while, initialize stat_count */
- if (sub((Word16) (st->tone_flag & 0x7c00), 0x7c00) == 0)
- {
- st->stat_count = STAT_COUNT;
- } else
- {
- /* if 8 last vad-decisions have been "0", reinitialize stat_count */
- if ((st->vadreg & 0x7f80) == 0)
- {
- st->stat_count = STAT_COUNT;
- } else
- {
- stat_rat = 0;
- for (i = 0; i < COMPLEN; i++)
- {
- if(level[i] > st->ave_level[i])
- {
- num = level[i];
- denom = st->ave_level[i];
- } else
- {
- num = st->ave_level[i];
- denom = level[i];
- }
- /* Limit nimimum value of num and denom to STAT_THR_LEVEL */
- if(num < STAT_THR_LEVEL)
- {
- num = STAT_THR_LEVEL;
- }
- if(denom < STAT_THR_LEVEL)
- {
- denom = STAT_THR_LEVEL;
- }
- exp = norm_s(denom);
- denom = denom << exp;
-
- /* stat_rat = num/denom * 64 */
- temp = div_s(num >> 1, denom);
- stat_rat = add1(stat_rat, shr(temp, (8 - exp)));
- }
-
- /* compare stat_rat with a threshold and update stat_count */
- if(stat_rat > STAT_THR)
- {
- st->stat_count = STAT_COUNT;
- } else
- {
- if ((st->vadreg & 0x4000) != 0)
- {
-
- if (st->stat_count != 0)
- {
- st->stat_count = st->stat_count - 1;
- }
- }
- }
- }
- }
-
- /* Update average amplitude estimate for stationarity estimation */
- alpha = ALPHA4;
- if(st->stat_count == STAT_COUNT)
- {
- alpha = 32767;
- } else if ((st->vadreg & 0x4000) == 0)
- {
- alpha = ALPHA5;
- }
- for (i = 0; i < COMPLEN; i++)
- {
- st->ave_level[i] = add1(st->ave_level[i], vo_mult_r(alpha, vo_sub(level[i], st->ave_level[i])));
- }
+ Word32 i;
+ Word16 num, temp, stat_rat, exp, denom;
+ Word16 alpha;
+
+ /* if a tone has been detected for a while, initialize stat_count */
+ if (sub((Word16) (st->tone_flag & 0x7c00), 0x7c00) == 0)
+ {
+ st->stat_count = STAT_COUNT;
+ } else
+ {
+ /* if 8 last vad-decisions have been "0", reinitialize stat_count */
+ if ((st->vadreg & 0x7f80) == 0)
+ {
+ st->stat_count = STAT_COUNT;
+ } else
+ {
+ stat_rat = 0;
+ for (i = 0; i < COMPLEN; i++)
+ {
+ if(level[i] > st->ave_level[i])
+ {
+ num = level[i];
+ denom = st->ave_level[i];
+ } else
+ {
+ num = st->ave_level[i];
+ denom = level[i];
+ }
+ /* Limit nimimum value of num and denom to STAT_THR_LEVEL */
+ if(num < STAT_THR_LEVEL)
+ {
+ num = STAT_THR_LEVEL;
+ }
+ if(denom < STAT_THR_LEVEL)
+ {
+ denom = STAT_THR_LEVEL;
+ }
+ exp = norm_s(denom);
+ denom = denom << exp;
+
+ /* stat_rat = num/denom * 64 */
+ temp = div_s(num >> 1, denom);
+ stat_rat = add1(stat_rat, shr(temp, (8 - exp)));
+ }
+
+ /* compare stat_rat with a threshold and update stat_count */
+ if(stat_rat > STAT_THR)
+ {
+ st->stat_count = STAT_COUNT;
+ } else
+ {
+ if ((st->vadreg & 0x4000) != 0)
+ {
+
+ if (st->stat_count != 0)
+ {
+ st->stat_count = st->stat_count - 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* Update average amplitude estimate for stationarity estimation */
+ alpha = ALPHA4;
+ if(st->stat_count == STAT_COUNT)
+ {
+ alpha = 32767;
+ } else if ((st->vadreg & 0x4000) == 0)
+ {
+ alpha = ALPHA5;
+ }
+ for (i = 0; i < COMPLEN; i++)
+ {
+ st->ave_level[i] = add1(st->ave_level[i], vo_mult_r(alpha, vo_sub(level[i], st->ave_level[i])));
+ }
}
/******************************************************************************
@@ -345,38 +345,38 @@ static void update_cntrl(
*******************************************************************************/
static Word16 hangover_addition( /* return: VAD_flag indicating final VAD decision */
- VadVars * st, /* i/o : State structure */
- Word16 low_power, /* i : flag power of the input frame */
- Word16 hang_len, /* i : hangover length */
- Word16 burst_len /* i : minimum burst length for hangover addition */
- )
+ VadVars * st, /* i/o : State structure */
+ Word16 low_power, /* i : flag power of the input frame */
+ Word16 hang_len, /* i : hangover length */
+ Word16 burst_len /* i : minimum burst length for hangover addition */
+ )
{
- /* if the input power (pow_sum) is lower than a threshold, clear counters and set VAD_flag to "0" */
- if (low_power != 0)
- {
- st->burst_count = 0;
- st->hang_count = 0;
- return 0;
- }
- /* update the counters (hang_count, burst_count) */
- if ((st->vadreg & 0x4000) != 0)
- {
- st->burst_count = st->burst_count + 1;
- if(st->burst_count >= burst_len)
- {
- st->hang_count = hang_len;
- }
- return 1;
- } else
- {
- st->burst_count = 0;
- if (st->hang_count > 0)
- {
- st->hang_count = st->hang_count - 1;
- return 1;
- }
- }
- return 0;
+ /* if the input power (pow_sum) is lower than a threshold, clear counters and set VAD_flag to "0" */
+ if (low_power != 0)
+ {
+ st->burst_count = 0;
+ st->hang_count = 0;
+ return 0;
+ }
+ /* update the counters (hang_count, burst_count) */
+ if ((st->vadreg & 0x4000) != 0)
+ {
+ st->burst_count = st->burst_count + 1;
+ if(st->burst_count >= burst_len)
+ {
+ st->hang_count = hang_len;
+ }
+ return 1;
+ } else
+ {
+ st->burst_count = 0;
+ if (st->hang_count > 0)
+ {
+ st->hang_count = st->hang_count - 1;
+ return 1;
+ }
+ }
+ return 0;
}
/******************************************************************************
@@ -387,66 +387,66 @@ static Word16 hangover_addition( /* return: VAD_flag indica
*******************************************************************************/
static void noise_estimate_update(
- VadVars * st, /* i/o : State structure */
- Word16 level[] /* i : sub-band levels of the input frame */
- )
+ VadVars * st, /* i/o : State structure */
+ Word16 level[] /* i : sub-band levels of the input frame */
+ )
{
- Word32 i;
- Word16 alpha_up, alpha_down, bckr_add = 2;
-
- /* Control update of bckr_est[] */
- update_cntrl(st, level);
-
- /* Choose update speed */
- if ((0x7800 & st->vadreg) == 0)
- {
- alpha_up = ALPHA_UP1;
- alpha_down = ALPHA_DOWN1;
- } else
- {
- if (st->stat_count == 0)
- {
- alpha_up = ALPHA_UP2;
- alpha_down = ALPHA_DOWN2;
- } else
- {
- alpha_up = 0;
- alpha_down = ALPHA3;
- bckr_add = 0;
- }
- }
-
- /* Update noise estimate (bckr_est) */
- for (i = 0; i < COMPLEN; i++)
- {
- Word16 temp;
- temp = (st->old_level[i] - st->bckr_est[i]);
-
- if (temp < 0)
- { /* update downwards */
- st->bckr_est[i] = add1(-2, add(st->bckr_est[i],vo_mult_r(alpha_down, temp)));
- /* limit minimum value of the noise estimate to NOISE_MIN */
- if(st->bckr_est[i] < NOISE_MIN)
- {
- st->bckr_est[i] = NOISE_MIN;
- }
- } else
- { /* update upwards */
- st->bckr_est[i] = add1(bckr_add, add1(st->bckr_est[i],vo_mult_r(alpha_up, temp)));
-
- /* limit maximum value of the noise estimate to NOISE_MAX */
- if(st->bckr_est[i] > NOISE_MAX)
- {
- st->bckr_est[i] = NOISE_MAX;
- }
- }
- }
-
- /* Update signal levels of the previous frame (old_level) */
- for (i = 0; i < COMPLEN; i++)
- {
- st->old_level[i] = level[i];
- }
+ Word32 i;
+ Word16 alpha_up, alpha_down, bckr_add = 2;
+
+ /* Control update of bckr_est[] */
+ update_cntrl(st, level);
+
+ /* Choose update speed */
+ if ((0x7800 & st->vadreg) == 0)
+ {
+ alpha_up = ALPHA_UP1;
+ alpha_down = ALPHA_DOWN1;
+ } else
+ {
+ if (st->stat_count == 0)
+ {
+ alpha_up = ALPHA_UP2;
+ alpha_down = ALPHA_DOWN2;
+ } else
+ {
+ alpha_up = 0;
+ alpha_down = ALPHA3;
+ bckr_add = 0;
+ }
+ }
+
+ /* Update noise estimate (bckr_est) */
+ for (i = 0; i < COMPLEN; i++)
+ {
+ Word16 temp;
+ temp = (st->old_level[i] - st->bckr_est[i]);
+
+ if (temp < 0)
+ { /* update downwards */
+ st->bckr_est[i] = add1(-2, add(st->bckr_est[i],vo_mult_r(alpha_down, temp)));
+ /* limit minimum value of the noise estimate to NOISE_MIN */
+ if(st->bckr_est[i] < NOISE_MIN)
+ {
+ st->bckr_est[i] = NOISE_MIN;
+ }
+ } else
+ { /* update upwards */
+ st->bckr_est[i] = add1(bckr_add, add1(st->bckr_est[i],vo_mult_r(alpha_up, temp)));
+
+ /* limit maximum value of the noise estimate to NOISE_MAX */
+ if(st->bckr_est[i] > NOISE_MAX)
+ {
+ st->bckr_est[i] = NOISE_MAX;
+ }
+ }
+ }
+
+ /* Update signal levels of the previous frame (old_level) */
+ for (i = 0; i < COMPLEN; i++)
+ {
+ st->old_level[i] = level[i];
+ }
}
/******************************************************************************
@@ -457,100 +457,100 @@ static void noise_estimate_update(
*******************************************************************************/
static Word16 vad_decision( /* return value : VAD_flag */
- VadVars * st, /* i/o : State structure */
- Word16 level[COMPLEN], /* i : sub-band levels of the input frame */
- Word32 pow_sum /* i : power of the input frame */
- )
+ VadVars * st, /* i/o : State structure */
+ Word16 level[COMPLEN], /* i : sub-band levels of the input frame */
+ Word32 pow_sum /* i : power of the input frame */
+ )
{
- Word32 i;
- Word32 L_snr_sum;
- Word32 L_temp;
- Word16 vad_thr, temp, noise_level;
- Word16 low_power_flag;
- Word16 hang_len, burst_len;
- Word16 ilog2_speech_level, ilog2_noise_level;
- Word16 temp2;
-
- /* Calculate squared sum of the input levels (level) divided by the background noise components
- * (bckr_est). */
- L_snr_sum = 0;
- for (i = 0; i < COMPLEN; i++)
- {
- Word16 exp;
-
- exp = norm_s(st->bckr_est[i]);
- temp = (st->bckr_est[i] << exp);
- temp = div_s((level[i] >> 1), temp);
- temp = shl(temp, (exp - (UNIRSHFT - 1)));
- L_snr_sum = L_mac(L_snr_sum, temp, temp);
- }
-
- /* Calculate average level of estimated background noise */
- L_temp = 0;
- for (i = 1; i < COMPLEN; i++) /* ignore lowest band */
- {
- L_temp = vo_L_add(L_temp, st->bckr_est[i]);
- }
-
- noise_level = extract_h((L_temp << 12));
- /* if SNR is lower than a threshold (MIN_SPEECH_SNR), and increase speech_level */
- temp = vo_mult(noise_level, MIN_SPEECH_SNR) << 3;
-
- if(st->speech_level < temp)
- {
- st->speech_level = temp;
- }
- ilog2_noise_level = ilog2(noise_level);
-
- /* If SNR is very poor, speech_level is probably corrupted by noise level. This is correctred by
- * subtracting MIN_SPEECH_SNR*noise_level from speech level */
- ilog2_speech_level = ilog2(st->speech_level - temp);
-
- temp = add1(vo_mult(NO_SLOPE, (ilog2_noise_level - NO_P1)), THR_HIGH);
-
- temp2 = add1(SP_CH_MIN, vo_mult(SP_SLOPE, (ilog2_speech_level - SP_P1)));
- if (temp2 < SP_CH_MIN)
- {
- temp2 = SP_CH_MIN;
- }
- if (temp2 > SP_CH_MAX)
- {
- temp2 = SP_CH_MAX;
- }
- vad_thr = temp + temp2;
-
- if(vad_thr < THR_MIN)
- {
- vad_thr = THR_MIN;
- }
- /* Shift VAD decision register */
- st->vadreg = (st->vadreg >> 1);
-
- /* Make intermediate VAD decision */
- if(L_snr_sum > vo_L_mult(vad_thr, (512 * COMPLEN)))
- {
- st->vadreg = (Word16) (st->vadreg | 0x4000);
- }
- /* check if the input power (pow_sum) is lower than a threshold" */
- if(pow_sum < VAD_POW_LOW)
- {
- low_power_flag = 1;
- } else
- {
- low_power_flag = 0;
- }
- /* Update background noise estimates */
- noise_estimate_update(st, level);
-
- /* Calculate values for hang_len and burst_len based on vad_thr */
- hang_len = add1(vo_mult(HANG_SLOPE, (vad_thr - HANG_P1)), HANG_HIGH);
- if(hang_len < HANG_LOW)
- {
- hang_len = HANG_LOW;
- }
- burst_len = add1(vo_mult(BURST_SLOPE, (vad_thr - BURST_P1)), BURST_HIGH);
-
- return (hangover_addition(st, low_power_flag, hang_len, burst_len));
+ Word32 i;
+ Word32 L_snr_sum;
+ Word32 L_temp;
+ Word16 vad_thr, temp, noise_level;
+ Word16 low_power_flag;
+ Word16 hang_len, burst_len;
+ Word16 ilog2_speech_level, ilog2_noise_level;
+ Word16 temp2;
+
+ /* Calculate squared sum of the input levels (level) divided by the background noise components
+ * (bckr_est). */
+ L_snr_sum = 0;
+ for (i = 0; i < COMPLEN; i++)
+ {
+ Word16 exp;
+
+ exp = norm_s(st->bckr_est[i]);
+ temp = (st->bckr_est[i] << exp);
+ temp = div_s((level[i] >> 1), temp);
+ temp = shl(temp, (exp - (UNIRSHFT - 1)));
+ L_snr_sum = L_mac(L_snr_sum, temp, temp);
+ }
+
+ /* Calculate average level of estimated background noise */
+ L_temp = 0;
+ for (i = 1; i < COMPLEN; i++) /* ignore lowest band */
+ {
+ L_temp = vo_L_add(L_temp, st->bckr_est[i]);
+ }
+
+ noise_level = extract_h((L_temp << 12));
+ /* if SNR is lower than a threshold (MIN_SPEECH_SNR), and increase speech_level */
+ temp = vo_mult(noise_level, MIN_SPEECH_SNR) << 3;
+
+ if(st->speech_level < temp)
+ {
+ st->speech_level = temp;
+ }
+ ilog2_noise_level = ilog2(noise_level);
+
+ /* If SNR is very poor, speech_level is probably corrupted by noise level. This is correctred by
+ * subtracting MIN_SPEECH_SNR*noise_level from speech level */
+ ilog2_speech_level = ilog2(st->speech_level - temp);
+
+ temp = add1(vo_mult(NO_SLOPE, (ilog2_noise_level - NO_P1)), THR_HIGH);
+
+ temp2 = add1(SP_CH_MIN, vo_mult(SP_SLOPE, (ilog2_speech_level - SP_P1)));
+ if (temp2 < SP_CH_MIN)
+ {
+ temp2 = SP_CH_MIN;
+ }
+ if (temp2 > SP_CH_MAX)
+ {
+ temp2 = SP_CH_MAX;
+ }
+ vad_thr = temp + temp2;
+
+ if(vad_thr < THR_MIN)
+ {
+ vad_thr = THR_MIN;
+ }
+ /* Shift VAD decision register */
+ st->vadreg = (st->vadreg >> 1);
+
+ /* Make intermediate VAD decision */
+ if(L_snr_sum > vo_L_mult(vad_thr, (512 * COMPLEN)))
+ {
+ st->vadreg = (Word16) (st->vadreg | 0x4000);
+ }
+ /* check if the input power (pow_sum) is lower than a threshold" */
+ if(pow_sum < VAD_POW_LOW)
+ {
+ low_power_flag = 1;
+ } else
+ {
+ low_power_flag = 0;
+ }
+ /* Update background noise estimates */
+ noise_estimate_update(st, level);
+
+ /* Calculate values for hang_len and burst_len based on vad_thr */
+ hang_len = add1(vo_mult(HANG_SLOPE, (vad_thr - HANG_P1)), HANG_HIGH);
+ if(hang_len < HANG_LOW)
+ {
+ hang_len = HANG_LOW;
+ }
+ burst_len = add1(vo_mult(BURST_SLOPE, (vad_thr - BURST_P1)), BURST_HIGH);
+
+ return (hangover_addition(st, low_power_flag, hang_len, burst_len));
}
/******************************************************************************
@@ -566,54 +566,54 @@ static Word16 vad_decision( /* return value : VAD_flag
*******************************************************************************/
static void Estimate_Speech(
- VadVars * st, /* i/o : State structure */
- Word16 in_level /* level of the input frame */
- )
+ VadVars * st, /* i/o : State structure */
+ Word16 in_level /* level of the input frame */
+ )
{
- Word16 alpha;
-
- /* if the required activity count cannot be achieved, reset counters */
- if((st->sp_est_cnt - st->sp_max_cnt) > (SP_EST_COUNT - SP_ACTIVITY_COUNT))
- {
- st->sp_est_cnt = 0;
- st->sp_max = 0;
- st->sp_max_cnt = 0;
- }
- st->sp_est_cnt += 1;
-
- if (((st->vadreg & 0x4000)||(in_level > st->speech_level)) && (in_level > MIN_SPEECH_LEVEL1))
- {
- /* update sp_max */
- if(in_level > st->sp_max)
- {
- st->sp_max = in_level;
- }
- st->sp_max_cnt += 1;
-
- if(st->sp_max_cnt >= SP_ACTIVITY_COUNT)
- {
- Word16 tmp;
- /* update speech estimate */
- tmp = (st->sp_max >> 1); /* scale to get "average" speech level */
-
- /* select update speed */
- if(tmp > st->speech_level)
- {
- alpha = ALPHA_SP_UP;
- } else
- {
- alpha = ALPHA_SP_DOWN;
- }
- if(tmp > MIN_SPEECH_LEVEL2)
- {
- st->speech_level = add1(st->speech_level, vo_mult_r(alpha, vo_sub(tmp, st->speech_level)));
- }
- /* clear all counters used for speech estimation */
- st->sp_max = 0;
- st->sp_max_cnt = 0;
- st->sp_est_cnt = 0;
- }
- }
+ Word16 alpha;
+
+ /* if the required activity count cannot be achieved, reset counters */
+ if((st->sp_est_cnt - st->sp_max_cnt) > (SP_EST_COUNT - SP_ACTIVITY_COUNT))
+ {
+ st->sp_est_cnt = 0;
+ st->sp_max = 0;
+ st->sp_max_cnt = 0;
+ }
+ st->sp_est_cnt += 1;
+
+ if (((st->vadreg & 0x4000)||(in_level > st->speech_level)) && (in_level > MIN_SPEECH_LEVEL1))
+ {
+ /* update sp_max */
+ if(in_level > st->sp_max)
+ {
+ st->sp_max = in_level;
+ }
+ st->sp_max_cnt += 1;
+
+ if(st->sp_max_cnt >= SP_ACTIVITY_COUNT)
+ {
+ Word16 tmp;
+ /* update speech estimate */
+ tmp = (st->sp_max >> 1); /* scale to get "average" speech level */
+
+ /* select update speed */
+ if(tmp > st->speech_level)
+ {
+ alpha = ALPHA_SP_UP;
+ } else
+ {
+ alpha = ALPHA_SP_DOWN;
+ }
+ if(tmp > MIN_SPEECH_LEVEL2)
+ {
+ st->speech_level = add1(st->speech_level, vo_mult_r(alpha, vo_sub(tmp, st->speech_level)));
+ }
+ /* clear all counters used for speech estimation */
+ st->sp_max = 0;
+ st->sp_max_cnt = 0;
+ st->sp_est_cnt = 0;
+ }
+ }
}
/******************************************************************************
@@ -624,30 +624,30 @@ static void Estimate_Speech(
*******************************************************************************/
Word16 wb_vad_init( /* return: non-zero with error, zero for ok. */
- VadVars ** state, /* i/o : State structure */
- VO_MEM_OPERATOR *pMemOP
- )
+ VadVars ** state, /* i/o : State structure */
+ VO_MEM_OPERATOR *pMemOP
+ )
{
- VadVars *s;
-
- if (state == (VadVars **) NULL)
- {
- fprintf(stderr, "vad_init: invalid parameter\n");
- return -1;
- }
- *state = NULL;
-
- /* allocate memory */
- if ((s = (VadVars *) mem_malloc(pMemOP, sizeof(VadVars), 32, VO_INDEX_ENC_AMRWB)) == NULL)
- {
- fprintf(stderr, "vad_init: can not malloc state structure\n");
- return -1;
- }
- wb_vad_reset(s);
-
- *state = s;
-
- return 0;
+ VadVars *s;
+
+ if (state == (VadVars **) NULL)
+ {
+ fprintf(stderr, "vad_init: invalid parameter\n");
+ return -1;
+ }
+ *state = NULL;
+
+ /* allocate memory */
+ if ((s = (VadVars *) mem_malloc(pMemOP, sizeof(VadVars), 32, VO_INDEX_ENC_AMRWB)) == NULL)
+ {
+ fprintf(stderr, "vad_init: can not malloc state structure\n");
+ return -1;
+ }
+ wb_vad_reset(s);
+
+ *state = s;
+
+ return 0;
}
/******************************************************************************
@@ -658,51 +658,51 @@ Word16 wb_vad_init( /* return: non-zero with error, zero
*******************************************************************************/
Word16 wb_vad_reset( /* return: non-zero with error, zero for ok. */
- VadVars * state /* i/o : State structure */
- )
+ VadVars * state /* i/o : State structure */
+ )
{
- Word32 i, j;
-
- if (state == (VadVars *) NULL)
- {
- fprintf(stderr, "vad_reset: invalid parameter\n");
- return -1;
- }
- state->tone_flag = 0;
- state->vadreg = 0;
- state->hang_count = 0;
- state->burst_count = 0;
- state->hang_count = 0;
-
- /* initialize memory used by the filter bank */
- for (i = 0; i < F_5TH_CNT; i++)
- {
- for (j = 0; j < 2; j++)
- {
- state->a_data5[i][j] = 0;
- }
- }
-
- for (i = 0; i < F_3TH_CNT; i++)
- {
- state->a_data3[i] = 0;
- }
-
- /* initialize the rest of the memory */
- for (i = 0; i < COMPLEN; i++)
- {
- state->bckr_est[i] = NOISE_INIT;
- state->old_level[i] = NOISE_INIT;
- state->ave_level[i] = NOISE_INIT;
- state->sub_level[i] = 0;
- }
-
- state->sp_est_cnt = 0;
- state->sp_max = 0;
- state->sp_max_cnt = 0;
- state->speech_level = SPEECH_LEVEL_INIT;
- state->prev_pow_sum = 0;
- return 0;
+ Word32 i, j;
+
+ if (state == (VadVars *) NULL)
+ {
+ fprintf(stderr, "vad_reset: invalid parameter\n");
+ return -1;
+ }
+ state->tone_flag = 0;
+ state->vadreg = 0;
+ state->hang_count = 0;
+ state->burst_count = 0;
+ state->hang_count = 0;
+
+ /* initialize memory used by the filter bank */
+ for (i = 0; i < F_5TH_CNT; i++)
+ {
+ for (j = 0; j < 2; j++)
+ {
+ state->a_data5[i][j] = 0;
+ }
+ }
+
+ for (i = 0; i < F_3TH_CNT; i++)
+ {
+ state->a_data3[i] = 0;
+ }
+
+ /* initialize the rest of the memory */
+ for (i = 0; i < COMPLEN; i++)
+ {
+ state->bckr_est[i] = NOISE_INIT;
+ state->old_level[i] = NOISE_INIT;
+ state->ave_level[i] = NOISE_INIT;
+ state->sub_level[i] = 0;
+ }
+
+ state->sp_est_cnt = 0;
+ state->sp_max = 0;
+ state->sp_max_cnt = 0;
+ state->speech_level = SPEECH_LEVEL_INIT;
+ state->prev_pow_sum = 0;
+ return 0;
}
/******************************************************************************
@@ -713,16 +713,16 @@ Word16 wb_vad_reset( /* return: non-zero with error, zero
*******************************************************************************/
void wb_vad_exit(
- VadVars ** state, /* i/o : State structure */
- VO_MEM_OPERATOR *pMemOP
- )
+ VadVars ** state, /* i/o : State structure */
+ VO_MEM_OPERATOR *pMemOP
+ )
{
- if (state == NULL || *state == NULL)
- return;
- /* deallocate memory */
- mem_free(pMemOP, *state, VO_INDEX_ENC_AMRWB);
- *state = NULL;
- return;
+ if (state == NULL || *state == NULL)
+ return;
+ /* deallocate memory */
+ mem_free(pMemOP, *state, VO_INDEX_ENC_AMRWB);
+ *state = NULL;
+ return;
}
/******************************************************************************
@@ -735,18 +735,18 @@ void wb_vad_exit(
*******************************************************************************/
void wb_vad_tone_detection(
- VadVars * st, /* i/o : State struct */
- Word16 p_gain /* pitch gain */
- )
+ VadVars * st, /* i/o : State struct */
+ Word16 p_gain /* pitch gain */
+ )
{
- /* update tone flag */
- st->tone_flag = (st->tone_flag >> 1);
-
- /* if (pitch_gain > TONE_THR) set tone flag */
- if (p_gain > TONE_THR)
- {
- st->tone_flag = (Word16) (st->tone_flag | 0x4000);
- }
+ /* update tone flag */
+ st->tone_flag = (st->tone_flag >> 1);
+
+ /* if (pitch_gain > TONE_THR) set tone flag */
+ if (p_gain > TONE_THR)
+ {
+ st->tone_flag = (Word16) (st->tone_flag | 0x4000);
+ }
}
/******************************************************************************
@@ -757,50 +757,50 @@ void wb_vad_tone_detection(
*******************************************************************************/
Word16 wb_vad( /* Return value : VAD Decision, 1 = speech, 0 = noise */
- VadVars * st, /* i/o : State structure */
- Word16 in_buf[] /* i : samples of the input frame */
- )
+ VadVars * st, /* i/o : State structure */
+ Word16 in_buf[] /* i : samples of the input frame */
+ )
{
- Word16 level[COMPLEN];
- Word32 i;
- Word16 VAD_flag, temp;
- Word32 L_temp, pow_sum;
-
- /* Calculate power of the input frame. */
- L_temp = 0L;
- for (i = 0; i < FRAME_LEN; i++)
- {
- L_temp = L_mac(L_temp, in_buf[i], in_buf[i]);
- }
-
- /* pow_sum = power of current frame and previous frame */
- pow_sum = L_add(L_temp, st->prev_pow_sum);
-
- /* save power of current frame for next call */
- st->prev_pow_sum = L_temp;
-
- /* If input power is very low, clear tone flag */
- if (pow_sum < POW_TONE_THR)
- {
- st->tone_flag = (Word16) (st->tone_flag & 0x1fff);
- }
- /* Run the filter bank and calculate signal levels at each band */
- filter_bank(st, in_buf, level);
-
- /* compute VAD decision */
- VAD_flag = vad_decision(st, level, pow_sum);
-
- /* Calculate input level */
- L_temp = 0;
- for (i = 1; i < COMPLEN; i++) /* ignore lowest band */
- {
- L_temp = vo_L_add(L_temp, level[i]);
- }
-
- temp = extract_h(L_temp << 12);
-
- Estimate_Speech(st, temp); /* Estimate speech level */
- return (VAD_flag);
+ Word16 level[COMPLEN];
+ Word32 i;
+ Word16 VAD_flag, temp;
+ Word32 L_temp, pow_sum;
+
+ /* Calculate power of the input frame. */
+ L_temp = 0L;
+ for (i = 0; i < FRAME_LEN; i++)
+ {
+ L_temp = L_mac(L_temp, in_buf[i], in_buf[i]);
+ }
+
+ /* pow_sum = power of current frame and previous frame */
+ pow_sum = L_add(L_temp, st->prev_pow_sum);
+
+ /* save power of current frame for next call */
+ st->prev_pow_sum = L_temp;
+
+ /* If input power is very low, clear tone flag */
+ if (pow_sum < POW_TONE_THR)
+ {
+ st->tone_flag = (Word16) (st->tone_flag & 0x1fff);
+ }
+ /* Run the filter bank and calculate signal levels at each band */
+ filter_bank(st, in_buf, level);
+
+ /* compute VAD decision */
+ VAD_flag = vad_decision(st, level, pow_sum);
+
+ /* Calculate input level */
+ L_temp = 0;
+ for (i = 1; i < COMPLEN; i++) /* ignore lowest band */
+ {
+ L_temp = vo_L_add(L_temp, level[i]);
+ }
+
+ temp = extract_h(L_temp << 12);
+
+ Estimate_Speech(st, temp); /* Estimate speech level */
+ return (VAD_flag);
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/weight_a.c b/media/libstagefright/codecs/amrwbenc/src/weight_a.c
index a02b48d..23b774e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/weight_a.c
+++ b/media/libstagefright/codecs/amrwbenc/src/weight_a.c
@@ -18,7 +18,7 @@
* File: weight_a.c *
* *
* Description:Weighting of LPC coefficients *
-* ap[i] = a[i] * (gamma ** i) *
+* ap[i] = a[i] * (gamma ** i) *
* *
************************************************************************/
@@ -26,22 +26,22 @@
#include "basic_op.h"
void Weight_a(
- Word16 a[], /* (i) Q12 : a[m+1] LPC coefficients */
- Word16 ap[], /* (o) Q12 : Spectral expanded LPC coefficients */
- Word16 gamma, /* (i) Q15 : Spectral expansion factor. */
- Word16 m /* (i) : LPC order. */
- )
+ Word16 a[], /* (i) Q12 : a[m+1] LPC coefficients */
+ Word16 ap[], /* (o) Q12 : Spectral expanded LPC coefficients */
+ Word16 gamma, /* (i) Q15 : Spectral expansion factor. */
+ Word16 m /* (i) : LPC order. */
+ )
{
- Word32 num = m - 1, fac;
- *ap++ = *a++;
- fac = gamma;
- do{
- *ap++ =(Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
- fac = (vo_L_mult(fac, gamma) + 0x8000) >> 16;
- }while(--num != 0);
+ Word32 num = m - 1, fac;
+ *ap++ = *a++;
+ fac = gamma;
+ do{
+ *ap++ =(Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
+ fac = (vo_L_mult(fac, gamma) + 0x8000) >> 16;
+ }while(--num != 0);
- *ap++ = (Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
- return;
+ *ap++ = (Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
+ return;
}
diff --git a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
index 0b8d9e2..d0bbee2 100644
--- a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
+++ b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
@@ -23,19 +23,6 @@
#define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/
-const static int distance_tab[9][9] = /* [hp_guess][k] */
-{
- {0, 1, 1, 1, 1, 1, 1, 1, 1},
- {1, 0, 1, 2, 3, 4, 3, 2, 1},
- {1, 0, 0, 0, 1, 2, 3, 2, 1},
- {1, 2, 1, 0, 1, 2, 3, 4, 3},
- {1, 2, 1, 0, 0, 0, 1, 2, 3},
- {1, 4, 3, 2, 1, 0, 1, 2, 3},
- {1, 2, 3, 2, 1, 0, 0, 0, 1},
- {1, 2, 3, 4, 3, 2, 1, 0, 1},
- {1, 0, 1, 2, 3, 2, 1, 0, 0}
-};
-
#define CLIP_RESULT(x) if((uint)x > 0xFF){ \
x = 0xFF & (~(x>>31));}
diff --git a/media/libstagefright/codecs/common/Config.mk b/media/libstagefright/codecs/common/Config.mk
deleted file mode 100644
index a843cef..0000000
--- a/media/libstagefright/codecs/common/Config.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# This configure file is just for Linux projects against Android
-#
-
-VOPRJ :=
-VONJ :=
-
-# WARNING:
-# Using v7 breaks generic build
-ifeq ($(TARGET_ARCH),arm)
-VOTT := v5
-else
-VOTT := pc
-endif
-
-# Do we also need to check on ARCH_ARM_HAVE_ARMV7A? - probably not
-ifeq ($(TARGET_ARCH),arm)
- ifeq ($(ARCH_ARM_HAVE_NEON),true)
- VOTT := v7
- endif
-endif
-
-VOTEST := 0
-
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 948ae29..b3880ee 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -28,19 +28,22 @@ LOCAL_SRC_FILES := \
src/pvmp3_stereo_proc.cpp \
src/pvmp3_reorder.cpp \
-ifeq ($(TARGET_ARCH),arm)
-LOCAL_SRC_FILES += \
+LOCAL_SRC_FILES_arm += \
src/asm/pvmp3_polyphase_filter_window_gcc.s \
src/asm/pvmp3_mdct_18_gcc.s \
src/asm/pvmp3_dct_9_gcc.s \
src/asm/pvmp3_dct_16_gcc.s
-else
-LOCAL_SRC_FILES += \
+LOCAL_SRC_FILES_other_archs := \
src/pvmp3_polyphase_filter_window.cpp \
src/pvmp3_mdct_18.cpp \
src/pvmp3_dct_9.cpp \
src/pvmp3_dct_16.cpp
-endif
+
+LOCAL_SRC_FILES_arm64 := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_mips := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_mips64 := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_x86 := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_x86_64 := $(LOCAL_SRC_FILES_other_archs)
LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright/include \
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 9f7dd59..005956d 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -51,7 +51,9 @@ SoftMP3::SoftMP3(
mSignalledError(false),
mSawInputEos(false),
mSignalledOutputEos(false),
- mOutputPortSettingsChange(NONE) {
+ mOutputPortSettingsChange(NONE),
+ mLastAnchorTimeUs(-1),
+ mNextOutBufferTimeUs(0) {
initPorts();
initDecoder();
}
@@ -112,7 +114,7 @@ void SoftMP3::initPorts() {
void SoftMP3::initDecoder() {
mConfig->equalizerType = flat;
mConfig->crcEnabled = false;
-
+ mConfig->samplingRate = mSamplingRate;
uint32_t memRequirements = pvmp3_decoderMemRequirements();
mDecoderBuf = malloc(memRequirements);
@@ -239,7 +241,7 @@ void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
-
+ int64_t tmpTime = 0;
while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
BufferInfo *inInfo = NULL;
OMX_BUFFERHEADERTYPE *inHeader = NULL;
@@ -254,7 +256,20 @@ void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
if (inHeader) {
if (inHeader->nOffset == 0 && inHeader->nFilledLen) {
- mAnchorTimeUs = inHeader->nTimeStamp;
+ // use new input buffer timestamp as Anchor Time if its
+ // a) first buffer or
+ // b) first buffer post seek or
+ // c) different from last buffer timestamp
+ //If input buffer timestamp is same as last input buffer timestamp then
+ //treat this as a erroneous timestamp and ignore new input buffer
+ //timestamp and use last output buffer timestamp as Anchor Time.
+ if ((mLastAnchorTimeUs != inHeader->nTimeStamp)) {
+ mAnchorTimeUs = inHeader->nTimeStamp;
+ mLastAnchorTimeUs = inHeader->nTimeStamp;
+ } else {
+ mAnchorTimeUs = mNextOutBufferTimeUs;
+ }
+
mNumFramesOutput = 0;
}
@@ -275,7 +290,7 @@ void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
if ((int32)outHeader->nAllocLen < mConfig->outputFrameSize) {
- ALOGE("input buffer too small: got %lu, expected %u",
+ ALOGE("input buffer too small: got %u, expected %u",
outHeader->nAllocLen, mConfig->outputFrameSize);
android_errorWriteLog(0x534e4554, "27793371");
notify(OMX_EventError, OMX_ErrorUndefined, OUTPUT_BUFFER_TOO_SMALL, NULL);
@@ -294,10 +309,13 @@ void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
if (decoderErr != NO_ENOUGH_MAIN_DATA_ERROR
&& decoderErr != SIDE_INFO_ERROR) {
ALOGE("mp3 decoder returned error %d", decoderErr);
-
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
- mSignalledError = true;
- return;
+ if(decoderErr == SYNCH_LOST_ERROR) {
+ mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
+ } else {
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ mSignalledError = true;
+ return;
+ }
}
if (mConfig->outputFrameSize == 0) {
@@ -361,7 +379,7 @@ void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
outHeader->nTimeStamp =
mAnchorTimeUs + (mNumFramesOutput * 1000000ll) / mSamplingRate;
-
+ tmpTime = outHeader->nTimeStamp;
if (inHeader) {
CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
@@ -386,6 +404,10 @@ void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
+
+ if (tmpTime > 0) {
+ mNextOutBufferTimeUs = tmpTime;
+ }
}
void SoftMP3::onPortFlushCompleted(OMX_U32 portIndex) {
@@ -397,6 +419,8 @@ void SoftMP3::onPortFlushCompleted(OMX_U32 portIndex) {
mSignalledError = false;
mSawInputEos = false;
mSignalledOutputEos = false;
+ mLastAnchorTimeUs = -1;
+ mNextOutBufferTimeUs = 0;
}
}
@@ -433,6 +457,8 @@ void SoftMP3::onReset() {
mSawInputEos = false;
mSignalledOutputEos = false;
mOutputPortSettingsChange = NONE;
+ mLastAnchorTimeUs = -1;
+ mNextOutBufferTimeUs = 0;
}
} // namespace android
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h
index 3bfa6c7..5b6af88 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.h
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h
@@ -70,6 +70,9 @@ private:
AWAITING_ENABLED
} mOutputPortSettingsChange;
+ int64_t mLastAnchorTimeUs;
+ int64_t mNextOutBufferTimeUs;
+
void initPorts();
void initDecoder();
void *memsetSafe(OMX_BUFFERHEADERTYPE *outHeader, int c, size_t len);
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 912fac2..444a7fc 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -102,7 +102,6 @@ status_t SoftVPX::destroyDecoder() {
}
bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
- List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
BufferInfo *outInfo = NULL;
OMX_BUFFERHEADERTYPE *outHeader = NULL;
@@ -215,7 +214,6 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
bool EOSseen = false;
- vpx_codec_err_t err;
bool portWillReset = false;
while ((mEOSStatus == INPUT_EOS_SEEN || !inQueue.empty())
@@ -239,8 +237,6 @@ void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
- BufferInfo *outInfo = *outQueue.begin();
- OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
mEOSStatus = INPUT_EOS_SEEN;
EOSseen = true;
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index e63b6b1..a16cbdf 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -84,19 +84,15 @@ MY_OMXDL_ASM_SRC := \
./omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S \
./omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S \
-ifeq ($(TARGET_ARCH),arm)
- ifeq ($(ARCH_ARM_HAVE_NEON),true)
+
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
LOCAL_ARM_NEON := true
-# LOCAL_CFLAGS := -std=c99 -D._NEON -D._OMXDL
- LOCAL_CFLAGS := -DH264DEC_NEON -DH264DEC_OMXDL
- LOCAL_SRC_FILES += $(MY_ASM) $(MY_OMXDL_C_SRC) $(MY_OMXDL_ASM_SRC)
- LOCAL_C_INCLUDES += $(LOCAL_PATH)/./source/arm_neon_asm_gcc
- LOCAL_C_INCLUDES += $(LOCAL_PATH)/./omxdl/arm_neon/api \
+ LOCAL_CFLAGS_arm := -DH264DEC_NEON -DH264DEC_OMXDL
+ LOCAL_SRC_FILES_arm := $(MY_ASM) $(MY_OMXDL_C_SRC) $(MY_OMXDL_ASM_SRC)
+ LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/./source/arm_neon_asm_gcc
+ LOCAL_C_INCLUDES_arm += $(LOCAL_PATH)/./omxdl/arm_neon/api \
$(LOCAL_PATH)/./omxdl/arm_neon/vc/api \
$(LOCAL_PATH)/./omxdl/arm_neon/vc/m4p10/api
- # h264bsdWriteMacroblock.S does not compile with Clang.
- LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
- endif
endif
LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h
index 91e38b8..1992885 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h
@@ -86,7 +86,7 @@ typedef OMX_S16 ARM_BLOCK8x8[64];
/* Alignment operation */
-#define armAlignToBytes(Ptr,N) (Ptr + ( ((N-(int)Ptr)&(N-1)) / sizeof(*Ptr) ))
+#define armAlignToBytes(Ptr,N) (Ptr + ( ((N-(intptr_t)Ptr)&(N-1)) / sizeof(*Ptr) ))
#define armAlignTo2Bytes(Ptr) armAlignToBytes(Ptr,2)
#define armAlignTo4Bytes(Ptr) armAlignToBytes(Ptr,4)
#define armAlignTo8Bytes(Ptr) armAlignToBytes(Ptr,8)
@@ -98,8 +98,8 @@ typedef OMX_S16 ARM_BLOCK8x8[64];
#define armRetDataErrIf(condition, code) if(condition) { return (code); }
#ifndef ALIGNMENT_DOESNT_MATTER
-#define armIsByteAligned(Ptr,N) ((((int)(Ptr)) % N)==0)
-#define armNotByteAligned(Ptr,N) ((((int)(Ptr)) % N)!=0)
+#define armIsByteAligned(Ptr,N) ((((intptr_t)(Ptr)) % N)==0)
+#define armNotByteAligned(Ptr,N) ((((intptr_t)(Ptr)) % N)!=0)
#else
#define armIsByteAligned(Ptr,N) (1)
#define armNotByteAligned(Ptr,N) (0)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S
index 46e0018..e1ffb09 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_Average_4x4_Align0_unsafe
- .func armVCM4P10_Average_4x4_Align0_unsafe
armVCM4P10_Average_4x4_Align0_unsafe:
PUSH {r4-r6,lr}
LDR r7, =0x80808080
@@ -55,10 +54,8 @@ armVCM4P10_Average_4x4_Align0_unsafe:
EOR r4,r4,r7
STR r4,[r2],r3
POP {r4-r6,pc}
- .endfunc
.global armVCM4P10_Average_4x4_Align2_unsafe
- .func armVCM4P10_Average_4x4_Align2_unsafe
armVCM4P10_Average_4x4_Align2_unsafe:
PUSH {r4-r6,lr}
LDR r7, =0x80808080
@@ -99,10 +96,8 @@ armVCM4P10_Average_4x4_Align2_unsafe:
EOR r4,r4,r7
STR r4,[r2],r3
POP {r4-r6,pc}
- .endfunc
.global armVCM4P10_Average_4x4_Align3_unsafe
- .func armVCM4P10_Average_4x4_Align3_unsafe
armVCM4P10_Average_4x4_Align3_unsafe:
PUSH {r4-r6,lr}
LDR r7, =0x80808080
@@ -143,7 +138,6 @@ armVCM4P10_Average_4x4_Align3_unsafe:
EOR r4,r4,r7
STR r4,[r2],r3
POP {r4-r6,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S
index ca64a02..40ea4a9 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_DeblockingChromabSLT4_unsafe
- .func armVCM4P10_DeblockingChromabSLT4_unsafe
armVCM4P10_DeblockingChromabSLT4_unsafe:
VLD1.32 {d18[0]},[r5]!
VSUBL.U8 q11,d5,d9
@@ -50,10 +49,8 @@ armVCM4P10_DeblockingChromabSLT4_unsafe:
VQMOVUN.S16 d29,q14
VQMOVUN.S16 d24,q12
BX lr
- .endfunc
.global armVCM4P10_DeblockingChromabSGE4_unsafe
- .func armVCM4P10_DeblockingChromabSGE4_unsafe
armVCM4P10_DeblockingChromabSGE4_unsafe:
VHADD.U8 d13,d4,d9
VHADD.U8 d31,d8,d5
@@ -63,7 +60,6 @@ armVCM4P10_DeblockingChromabSGE4_unsafe:
VRHADD.U8 d13,d13,d5
VRHADD.U8 d31,d31,d9
BX lr
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S
index 193bc5e..05fb2c5 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_DeblockingLumabSLT4_unsafe
- .func armVCM4P10_DeblockingLumabSLT4_unsafe
armVCM4P10_DeblockingLumabSLT4_unsafe:
VSUBL.U8 q11,d5,d9
VLD1.8 {d18[]},[r5]!
@@ -66,10 +65,8 @@ armVCM4P10_DeblockingLumabSLT4_unsafe:
VBIF d24,d8,d16
VBIF d25,d9,d12
BX lr
- .endfunc
.global armVCM4P10_DeblockingLumabSGE4_unsafe
- .func armVCM4P10_DeblockingLumabSGE4_unsafe
armVCM4P10_DeblockingLumabSGE4_unsafe:
VSHR.U8 d19,d0,#2
VADD.I8 d19,d19,d15
@@ -111,7 +108,6 @@ armVCM4P10_DeblockingLumabSGE4_unsafe:
VBIF d24,d8,d16
VBIF d28,d10,d12
BX lr
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
index 8e0db37..27c0452 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
@@ -42,7 +42,6 @@
.hidden armVCM4P10_ZigZag_4x4
.global armVCM4P10_DecodeCoeffsToPair
- .func armVCM4P10_DecodeCoeffsToPair
armVCM4P10_DecodeCoeffsToPair:
PUSH {r4-r12,lr}
SUB sp,sp,#0x40
@@ -302,7 +301,6 @@ L0x344:
L0x35c:
ADD sp,sp,#0x40
POP {r4-r12,pc}
- .endfunc
.LarmVCM4P10_CAVLCCoeffTokenTables:
.word armVCM4P10_CAVLCCoeffTokenTables-(P0+8)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S
index 7206d76..1de9004 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_HorAlign9x_unsafe
- .func armVCM4P10_InterpolateLuma_HorAlign9x_unsafe
armVCM4P10_InterpolateLuma_HorAlign9x_unsafe:
MOV r12,r8
AND r7,r0,#3
@@ -83,10 +82,8 @@ CopyEnd:
MOV r0,r12
MOV r1,#0xc
BX lr
- .endfunc
.global armVCM4P10_InterpolateLuma_VerAlign4x_unsafe
- .func armVCM4P10_InterpolateLuma_VerAlign4x_unsafe
armVCM4P10_InterpolateLuma_VerAlign4x_unsafe:
AND r7,r0,#3
BIC r0,r0,#3
@@ -132,7 +129,6 @@ CopyVEnd:
SUB r0,r8,#0x1c
MOV r1,#4
BX lr
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S
index e41d662..7ba2890 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_Copy4x4_unsafe
- .func armVCM4P10_InterpolateLuma_Copy4x4_unsafe
armVCM4P10_InterpolateLuma_Copy4x4_unsafe:
PUSH {r4-r6,lr}
AND r12,r0,#3
@@ -114,7 +113,6 @@ Copy4x4Align3:
STR r8,[r2],r3
Copy4x4End:
POP {r4-r6,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S
index c8f5cda..8b2c678 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_HorDiagCopy_unsafe
- .func armVCM4P10_InterpolateLuma_HorDiagCopy_unsafe
armVCM4P10_InterpolateLuma_HorDiagCopy_unsafe:
PUSH {r4-r6,lr}
MOV lr,#4
@@ -57,10 +56,8 @@ LoopStart1:
SUB r0,r7,#0x20
MOV r1,#8
POP {r4-r6,pc}
- .endfunc
.global armVCM4P10_InterpolateLuma_VerDiagCopy_unsafe
- .func armVCM4P10_InterpolateLuma_VerDiagCopy_unsafe
armVCM4P10_InterpolateLuma_VerDiagCopy_unsafe:
PUSH {r4-r6,lr}
LDR r6, =0xfe00fe0
@@ -116,7 +113,6 @@ LoopStart:
SUB r0,r7,#0x18
MOV r1,#4
POP {r4-r6,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S
index f5868c0..77aa927 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe
- .func armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe:
PUSH {r4-r12,lr}
VLD1.8 {d0,d1},[r0],r1
@@ -173,7 +172,6 @@ armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe:
VQMOVN.U16 d4,q2
VQMOVN.U16 d6,q3
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S
index 065995d..e5f7f1c 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe
- .func armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe
armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe:
PUSH {r4-r12,lr}
VLD1.8 {d0,d1},[r0],r1
@@ -128,7 +127,6 @@ armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe:
VQMOVN.U16 d4,q2
VQMOVN.U16 d6,q3
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S
index 1e2d16b..393d385 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe
- .func armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe
armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe:
PUSH {r4-r12,lr}
VLD1.8 {d22,d23},[r0],r1
@@ -81,7 +80,6 @@ armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe:
VQRSHRUN.S16 d26,q13,#5
VQRSHRUN.S16 d28,q14,#5
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S
index c7def2a..698e7b5 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe
- .func armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe
armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe:
PUSH {r4-r12,lr}
VLD1.8 {d7},[r0],r1
@@ -67,7 +66,6 @@ armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe:
VQRSHRUN.S16 d4,q2,#5
VQRSHRUN.S16 d6,q3,#5
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S
index 2f4293f..e469516 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S
@@ -38,7 +38,6 @@ armVCM4P10_WidthBranchTableMVIsZero:
.word WidthIs8MVIsZero-(P0+8)
.global armVCM4P10_Interpolate_Chroma
- .func armVCM4P10_Interpolate_Chroma
armVCM4P10_Interpolate_Chroma:
PUSH {r4-r12,lr}
VPUSH {d8-d15}
@@ -183,7 +182,6 @@ WidthIs2MVIsZero:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S
index d4cedb5..e18bec7 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_TransformResidual4x4
- .func armVCM4P10_TransformResidual4x4
armVCM4P10_TransformResidual4x4:
VPUSH {d8}
VLD4.16 {d0,d1,d2,d3},[r1]
@@ -61,7 +60,6 @@ armVCM4P10_TransformResidual4x4:
VST1.16 {d0,d1,d2,d3},[r0]
VPOP {d8}
BX lr
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S
index 1652dc6..b97efcb 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S
@@ -24,9 +24,9 @@
.arm
.fpu neon
.text
+ .syntax unified
.global armVCM4P10_UnpackBlock4x4
- .func armVCM4P10_UnpackBlock4x4
armVCM4P10_UnpackBlock4x4:
PUSH {r4-r8,lr}
LDR r2,[r0,#0]
@@ -40,16 +40,15 @@ armVCM4P10_UnpackBlock4x4:
STRD r4,r5,[r1,#0x18]
unpackLoop:
TST r3,#0x10
- LDRNESB r5,[r2,#1]
- LDRNEB r4,[r2],#2
+ LDRSBNE r5,[r2,#1]
+ LDRBNE r4,[r2],#2
AND r6,r7,r3,LSL #1
- LDREQSB r4,[r2],#1
+ LDRSBEQ r4,[r2],#1
ORRNE r4,r4,r5,LSL #8
TST r3,#0x20
- LDREQB r3,[r2],#1
+ LDRBEQ r3,[r2],#1
STRH r4,[r1,r6]
BEQ unpackLoop
STR r2,[r0,#0]
POP {r4-r8,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S
index 90b0947..6a99bde 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_DeblockLuma_I
- .func omxVCM4P10_DeblockLuma_I
omxVCM4P10_DeblockLuma_I:
PUSH {r4-r9,lr}
MOVS r6,r0
@@ -76,7 +75,6 @@ L0x64:
BL omxVCM4P10_FilterDeblockingLuma_HorEdge_I
ADD sp,sp,#0xc
POP {r4-r9,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S
index 4a74594..17c5d8b 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_DequantTransformResidualFromPairAndAdd
- .func omxVCM4P10_DequantTransformResidualFromPairAndAdd
omxVCM4P10_DequantTransformResidualFromPairAndAdd:
PUSH {r4-r12,lr}
VPUSH {d8-d9}
@@ -131,7 +130,6 @@ L0x130:
ADD sp,sp,#0x20
VPOP {d8-d9}
POP {r4-r12,pc}
- .endfunc
.LarmVCM4P10_QPModuloTable:
.word armVCM4P10_QPModuloTable-(P0+8)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S
index f20fb78..4a83516 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_FilterDeblockingChroma_HorEdge_I
- .func omxVCM4P10_FilterDeblockingChroma_HorEdge_I
omxVCM4P10_FilterDeblockingChroma_HorEdge_I:
PUSH {r4-r10,lr}
VPUSH {d8-d15}
@@ -96,7 +95,6 @@ L0xe4:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r10,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S
index 003526e..fe10931 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_FilterDeblockingChroma_VerEdge_I
- .func omxVCM4P10_FilterDeblockingChroma_VerEdge_I
omxVCM4P10_FilterDeblockingChroma_VerEdge_I:
PUSH {r4-r12,lr}
VPUSH {d8-d15}
@@ -132,7 +131,6 @@ L0x170:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S
index 7ddc42e..84ffad2 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_FilterDeblockingLuma_HorEdge_I
- .func omxVCM4P10_FilterDeblockingLuma_HorEdge_I
omxVCM4P10_FilterDeblockingLuma_HorEdge_I:
PUSH {r4-r12,lr}
VPUSH {d8-d15}
@@ -116,7 +115,6 @@ L0x130:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S
index f71aceb..f2a3682 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_FilterDeblockingLuma_VerEdge_I
- .func omxVCM4P10_FilterDeblockingLuma_VerEdge_I
omxVCM4P10_FilterDeblockingLuma_VerEdge_I:
PUSH {r4-r12,lr}
VPUSH {d8-d15}
@@ -166,7 +165,6 @@ L0x1f0:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S
index 000fbeb..314eabd 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S
@@ -26,7 +26,6 @@
.text
.global omxVCM4P10_InterpolateLuma
- .func omxVCM4P10_InterpolateLuma
omxVCM4P10_InterpolateLuma:
PUSH {r4-r12,lr}
VPUSH {d8-d15}
@@ -332,7 +331,6 @@ L0x434:
ADD sp,sp,#0x10
VPOP {d8-d15}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S
index 4e2cff6..50d1350 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S
@@ -36,7 +36,6 @@ armVCM4P10_MultiplierTableChroma8x8:
.hword 1, 2, 3,4
.global omxVCM4P10_PredictIntraChroma_8x8
- .func omxVCM4P10_PredictIntraChroma_8x8
omxVCM4P10_PredictIntraChroma_8x8:
PUSH {r4-r10,lr}
VPUSH {d8-d15}
@@ -226,7 +225,6 @@ L0x28c:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r10,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S
index c71c93b..0044636 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S
@@ -42,7 +42,6 @@ armVCM4P10_MultiplierTable16x16:
.global omxVCM4P10_PredictIntra_16x16
- .func omxVCM4P10_PredictIntra_16x16
omxVCM4P10_PredictIntra_16x16:
PUSH {r4-r12,lr}
VPUSH {d8-d15}
@@ -246,7 +245,6 @@ L0x2d4:
MOV r0,#0
VPOP {d8-d15}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S
index cd5d356..d4c8485 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S
@@ -35,7 +35,6 @@ armVCM4P10_pSwitchTable4x4:
.word OMX_VC_4x4_HU-(P0+8)
.global omxVCM4P10_PredictIntra_4x4
- .func omxVCM4P10_PredictIntra_4x4
omxVCM4P10_PredictIntra_4x4:
PUSH {r4-r12,lr}
VPUSH {d8-d12}
@@ -270,6 +269,5 @@ L0x348:
MOV r0,#0
VPOP {d8-d12}
POP {r4-r12,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S
index 5570892..74f5103 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S
@@ -24,9 +24,9 @@
.arm
.fpu neon
.text
+ .syntax unified
.global omxVCM4P10_TransformDequantChromaDCFromPair
- .func omxVCM4P10_TransformDequantChromaDCFromPair
omxVCM4P10_TransformDequantChromaDCFromPair:
push {r4-r10, lr}
ldr r9, [r0,#0]
@@ -36,13 +36,13 @@ omxVCM4P10_TransformDequantChromaDCFromPair:
ldrb r6, [r9], #1
unpackLoop:
tst r6, #0x10
- ldrnesb r5, [r9, #1]
- ldrneb r4, [r9], #2
+ ldrsbne r5, [r9, #1]
+ ldrbne r4, [r9], #2
and r7, r8, r6, lsl #1
- ldreqsb r4, [r9], #1
+ ldrsbeq r4, [r9], #1
orrne r4, r4, r5, lsl #8
tst r6, #0x20
- ldreqb r6, [r9], #1
+ ldrbeq r6, [r9], #1
strh r4, [r1, r7]
beq unpackLoop
ldmia r1, {r3, r4}
@@ -66,7 +66,6 @@ P1: add r6, pc
vst1.16 {d2}, [r1]
mov r0, #0
pop {r4-r10, pc}
- .endfunc
.LarmVCM4P10_QPDivTable:
.word armVCM4P10_QPDivTable-(P0+8)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S
index 5b6eee0..a01030a 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S
@@ -26,7 +26,6 @@
.text
.global armVCM4P10_InvTransformDequantLumaDC4x4
- .func armVCM4P10_InvTransformDequantLumaDC4x4
armVCM4P10_InvTransformDequantLumaDC4x4:
PUSH {r4-r6,lr}
VPUSH {d8-d13}
@@ -73,7 +72,6 @@ P1: ADD r3, pc
VST1.16 {d0,d1,d2,d3},[r0]
VPOP {d8-d13}
POP {r4-r6,pc}
- .endfunc
.LarmVCM4P10_QPDivTable:
.word armVCM4P10_QPDivTable-(P0+8)
@@ -81,7 +79,6 @@ P1: ADD r3, pc
.word armVCM4P10_VMatrixQPModTable-(P1+8)
.global omxVCM4P10_TransformDequantLumaDCFromPair
-.func omxVCM4P10_TransformDequantLumaDCFromPair
omxVCM4P10_TransformDequantLumaDCFromPair:
PUSH {r4-r6,lr}
MOV r4,r1
@@ -92,7 +89,6 @@ omxVCM4P10_TransformDequantLumaDCFromPair:
BL armVCM4P10_InvTransformDequantLumaDC4x4
MOV r0,#0
POP {r4-r6,pc}
- .endfunc
.end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h b/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h
index fbb97e2..7304863 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h
@@ -86,7 +86,7 @@ typedef OMX_S16 ARM_BLOCK8x8[64];
/* Alignment operation */
-#define armAlignToBytes(Ptr,N) (Ptr + ( ((N-(int)Ptr)&(N-1)) / sizeof(*Ptr) ))
+#define armAlignToBytes(Ptr,N) (Ptr + ( ((N-(intptr_t)Ptr)&(N-1)) / sizeof(*Ptr) ))
#define armAlignTo2Bytes(Ptr) armAlignToBytes(Ptr,2)
#define armAlignTo4Bytes(Ptr) armAlignToBytes(Ptr,4)
#define armAlignTo8Bytes(Ptr) armAlignToBytes(Ptr,8)
@@ -98,8 +98,8 @@ typedef OMX_S16 ARM_BLOCK8x8[64];
#define armRetDataErrIf(condition, code) if(condition) { return (code); }
#ifndef ALIGNMENT_DOESNT_MATTER
-#define armIsByteAligned(Ptr,N) ((((int)(Ptr)) % N)==0)
-#define armNotByteAligned(Ptr,N) ((((int)(Ptr)) % N)!=0)
+#define armIsByteAligned(Ptr,N) ((((intptr_t)(Ptr)) % N)==0)
+#define armNotByteAligned(Ptr,N) ((((intptr_t)(Ptr)) % N)!=0)
#else
#define armIsByteAligned(Ptr,N) (1)
#define armNotByteAligned(Ptr,N) (0)
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S
index f39f5c4..969a75c 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S
@@ -31,11 +31,9 @@
.global \name
.endif
.type \name, %function
- .func \name
\name:
.endm
.macro endfunction
- .endfunc
.endm
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S
index c8a940e..3c2752f 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S
@@ -16,7 +16,7 @@
#include "asm_common.S"
- preserve8
+ PRESERVE8
.fpu neon
.text
@@ -29,7 +29,7 @@
/* -- NEON registers -- */
-#define qZero Q0.U8
+#define qZero Q0
/*------------------------------------------------------------------------------
@@ -47,17 +47,17 @@
function h264bsdClearMbLayer, export=1
- VMOV qZero, #0
+ VMOV.I8 qZero, #0
ADD pTmp, pMbLayer, #16
MOV step, #32
SUBS size, size, #64
loop:
- VST1 {qZero}, [pMbLayer], step
+ VST1.8 {qZero}, [pMbLayer], step
SUBS size, size, #64
- VST1 {qZero}, [pTmp], step
- VST1 {qZero}, [pMbLayer], step
- VST1 {qZero}, [pTmp], step
+ VST1.8 {qZero}, [pTmp], step
+ VST1.8 {qZero}, [pMbLayer], step
+ VST1.8 {qZero}, [pTmp], step
BCS loop
BX lr
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S
index 05253d0..b1c9f60 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S
@@ -15,7 +15,7 @@
@
#include "asm_common.S"
- preserve8
+ PRESERVE8
.arm
.text
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S
index 6955b9a..6ed6227 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S
@@ -16,7 +16,7 @@
#include "asm_common.S"
- preserve8
+ PRESERVE8
.fpu neon
.text
@@ -33,12 +33,12 @@
/* -- NEON registers -- */
-#define qTmp0 Q0.U8
-#define qTmp1 Q1.U8
-#define dTmp0 D0.U8
-#define dTmp1 D1.U8
-#define dTmp2 D2.U8
-#define dTmp3 D3.U8
+#define qTmp0 Q0
+#define qTmp1 Q1
+#define dTmp0 D0
+#define dTmp1 D1
+#define dTmp2 D2
+#define dTmp3 D3
/*
void h264bsdFillRow7(const u8 * ref, u8 * fill, i32 left, i32 center,
@@ -74,40 +74,40 @@ switch_center:
B case_8
case_8:
- VLD1 {qTmp0, qTmp1}, [ref]!
+ VLD1.8 {qTmp0, qTmp1}, [ref]!
SUB center, center, #32
- VST1 {qTmp0}, [fill]!
- VST1 {qTmp1}, [fill]!
+ VST1.8 {qTmp0}, [fill]!
+ VST1.8 {qTmp1}, [fill]!
B loop_center
case_7:
- VLD1 {dTmp0,dTmp1,dTmp2}, [ref]!
+ VLD1.8 {dTmp0,dTmp1,dTmp2}, [ref]!
SUB center, center, #28
LDR tmp2, [ref], #4
- VST1 {dTmp0,dTmp1,dTmp2}, [fill]!
+ VST1.8 {dTmp0,dTmp1,dTmp2}, [fill]!
STR tmp2, [fill],#4
B loop_center
case_6:
- VLD1 {dTmp0,dTmp1,dTmp2}, [ref]!
+ VLD1.8 {dTmp0,dTmp1,dTmp2}, [ref]!
SUB center, center, #24
- VST1 {dTmp0,dTmp1,dTmp2}, [fill]!
+ VST1.8 {dTmp0,dTmp1,dTmp2}, [fill]!
B loop_center
case_5:
- VLD1 {qTmp0}, [ref]!
+ VLD1.8 {qTmp0}, [ref]!
SUB center, center, #20
LDR tmp2, [ref], #4
- VST1 {qTmp0}, [fill]!
+ VST1.8 {qTmp0}, [fill]!
STR tmp2, [fill],#4
B loop_center
case_4:
- VLD1 {qTmp0}, [ref]!
+ VLD1.8 {qTmp0}, [ref]!
SUB center, center, #16
- VST1 {qTmp0}, [fill]!
+ VST1.8 {qTmp0}, [fill]!
B loop_center
case_3:
- VLD1 {dTmp0}, [ref]!
+ VLD1.8 {dTmp0}, [ref]!
SUB center, center, #12
LDR tmp2, [ref], #4
- VST1 dTmp0, [fill]!
+ VST1.8 dTmp0, [fill]!
STR tmp2, [fill],#4
B loop_center
case_2:
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S
index b3f3191..aa88471 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S
@@ -16,7 +16,7 @@
#include "asm_common.S"
- preserve8
+ PRESERVE8
.arm
.text
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S
index 495d560..4093b92 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S
@@ -16,8 +16,8 @@
#include "asm_common.S"
- require8
- preserve8
+ REQUIRE8
+ PRESERVE8
.arm
.fpu neon
@@ -34,39 +34,39 @@
/* -- NEON registers -- */
-#define qRow0 Q0.U8
-#define qRow1 Q1.U8
-#define qRow2 Q2.U8
-#define qRow3 Q3.U8
-#define qRow4 Q4.U8
-#define qRow5 Q5.U8
-#define qRow6 Q6.U8
-#define qRow7 Q7.U8
-#define qRow8 Q8.U8
-#define qRow9 Q9.U8
-#define qRow10 Q10.U8
-#define qRow11 Q11.U8
-#define qRow12 Q12.U8
-#define qRow13 Q13.U8
-#define qRow14 Q14.U8
-#define qRow15 Q15.U8
-
-#define dRow0 D0.U8
-#define dRow1 D1.U8
-#define dRow2 D2.U8
-#define dRow3 D3.U8
-#define dRow4 D4.U8
-#define dRow5 D5.U8
-#define dRow6 D6.U8
-#define dRow7 D7.U8
-#define dRow8 D8.U8
-#define dRow9 D9.U8
-#define dRow10 D10.U8
-#define dRow11 D11.U8
-#define dRow12 D12.U8
-#define dRow13 D13.U8
-#define dRow14 D14.U8
-#define dRow15 D15.U8
+#define qRow0 Q0
+#define qRow1 Q1
+#define qRow2 Q2
+#define qRow3 Q3
+#define qRow4 Q4
+#define qRow5 Q5
+#define qRow6 Q6
+#define qRow7 Q7
+#define qRow8 Q8
+#define qRow9 Q9
+#define qRow10 Q10
+#define qRow11 Q11
+#define qRow12 Q12
+#define qRow13 Q13
+#define qRow14 Q14
+#define qRow15 Q15
+
+#define dRow0 D0
+#define dRow1 D1
+#define dRow2 D2
+#define dRow3 D3
+#define dRow4 D4
+#define dRow5 D5
+#define dRow6 D6
+#define dRow7 D7
+#define dRow8 D8
+#define dRow9 D9
+#define dRow10 D10
+#define dRow11 D11
+#define dRow12 D12
+#define dRow13 D13
+#define dRow14 D14
+#define dRow15 D15
/*------------------------------------------------------------------------------
@@ -99,59 +99,58 @@ function h264bsdWriteMacroblock, export=1
@ Write luma
- VLD1 {qRow0, qRow1}, [data]!
+ VLD1.8 {qRow0, qRow1}, [data]!
LSL width, width, #4
- VLD1 {qRow2, qRow3}, [data]!
+ VLD1.8 {qRow2, qRow3}, [data]!
LSR cwidth, width, #1
- VST1 {qRow0}, [luma,:128], width
- VLD1 {qRow4, qRow5}, [data]!
- VST1 {qRow1}, [luma,:128], width
- VLD1 {qRow6, qRow7}, [data]!
- VST1 {qRow2}, [luma,:128], width
- VLD1 {qRow8, qRow9}, [data]!
- VST1 {qRow3}, [luma,:128], width
- VLD1 {qRow10, qRow11}, [data]!
- VST1 {qRow4}, [luma,:128], width
- VLD1 {qRow12, qRow13}, [data]!
- VST1 {qRow5}, [luma,:128], width
- VLD1 {qRow14, qRow15}, [data]!
- VST1 {qRow6}, [luma,:128], width
-
- VLD1 {qRow0, qRow1}, [data]! ;//cb rows 0,1,2,3
- VST1 {qRow7}, [luma,:128], width
- VLD1 {qRow2, qRow3}, [data]! ;//cb rows 4,5,6,7
- VST1 {qRow8}, [luma,:128], width
- VLD1 {qRow4, qRow5}, [data]! ;//cr rows 0,1,2,3
- VST1 {qRow9}, [luma,:128], width
- VLD1 {qRow6, qRow7}, [data]! ;//cr rows 4,5,6,7
- VST1 {qRow10}, [luma,:128], width
- VST1 {dRow0}, [cb,:64], cwidth
- VST1 {dRow8}, [cr,:64], cwidth
- VST1 {qRow11}, [luma,:128], width
- VST1 {dRow1}, [cb,:64], cwidth
- VST1 {dRow9}, [cr,:64], cwidth
- VST1 {qRow12}, [luma,:128], width
- VST1 {dRow2}, [cb,:64], cwidth
- VST1 {dRow10}, [cr,:64], cwidth
- VST1 {qRow13}, [luma,:128], width
- VST1 {dRow3}, [cb,:64], cwidth
- VST1 {dRow11}, [cr,:64], cwidth
- VST1 {qRow14}, [luma,:128], width
- VST1 {dRow4}, [cb,:64], cwidth
- VST1 {dRow12}, [cr,:64], cwidth
- VST1 {qRow15}, [luma]
- VST1 {dRow5}, [cb,:64], cwidth
- VST1 {dRow13}, [cr,:64], cwidth
- VST1 {dRow6}, [cb,:64], cwidth
- VST1 {dRow14}, [cr,:64], cwidth
- VST1 {dRow7}, [cb,:64]
- VST1 {dRow15}, [cr,:64]
+ VST1.8 {qRow0}, [luma,:128], width
+ VLD1.8 {qRow4, qRow5}, [data]!
+ VST1.8 {qRow1}, [luma,:128], width
+ VLD1.8 {qRow6, qRow7}, [data]!
+ VST1.8 {qRow2}, [luma,:128], width
+ VLD1.8 {qRow8, qRow9}, [data]!
+ VST1.8 {qRow3}, [luma,:128], width
+ VLD1.8 {qRow10, qRow11}, [data]!
+ VST1.8 {qRow4}, [luma,:128], width
+ VLD1.8 {qRow12, qRow13}, [data]!
+ VST1.8 {qRow5}, [luma,:128], width
+ VLD1.8 {qRow14, qRow15}, [data]!
+ VST1.8 {qRow6}, [luma,:128], width
+
+ VLD1.8 {qRow0, qRow1}, [data]! ;//cb rows 0,1,2,3
+ VST1.8 {qRow7}, [luma,:128], width
+ VLD1.8 {qRow2, qRow3}, [data]! ;//cb rows 4,5,6,7
+ VST1.8 {qRow8}, [luma,:128], width
+ VLD1.8 {qRow4, qRow5}, [data]! ;//cr rows 0,1,2,3
+ VST1.8 {qRow9}, [luma,:128], width
+ VLD1.8 {qRow6, qRow7}, [data]! ;//cr rows 4,5,6,7
+ VST1.8 {qRow10}, [luma,:128], width
+ VST1.8 {dRow0}, [cb,:64], cwidth
+ VST1.8 {dRow8}, [cr,:64], cwidth
+ VST1.8 {qRow11}, [luma,:128], width
+ VST1.8 {dRow1}, [cb,:64], cwidth
+ VST1.8 {dRow9}, [cr,:64], cwidth
+ VST1.8 {qRow12}, [luma,:128], width
+ VST1.8 {dRow2}, [cb,:64], cwidth
+ VST1.8 {dRow10}, [cr,:64], cwidth
+ VST1.8 {qRow13}, [luma,:128], width
+ VST1.8 {dRow3}, [cb,:64], cwidth
+ VST1.8 {dRow11}, [cr,:64], cwidth
+ VST1.8 {qRow14}, [luma,:128], width
+ VST1.8 {dRow4}, [cb,:64], cwidth
+ VST1.8 {dRow12}, [cr,:64], cwidth
+ VST1.8 {qRow15}, [luma]
+ VST1.8 {dRow5}, [cb,:64], cwidth
+ VST1.8 {dRow13}, [cr,:64], cwidth
+ VST1.8 {dRow6}, [cb,:64], cwidth
+ VST1.8 {dRow14}, [cr,:64], cwidth
+ VST1.8 {dRow7}, [cb,:64]
+ VST1.8 {dRow15}, [cr,:64]
VPOP {q4-q7}
POP {r4-r6,pc}
@ BX lr
- .endfunc
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index 4f7ae95..f90c1b3 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -42,7 +42,8 @@ SoftRaw::SoftRaw(
: SimpleSoftOMXComponent(name, callbacks, appData, component),
mSignalledError(false),
mChannelCount(2),
- mSampleRate(44100) {
+ mSampleRate(44100),
+ mBitsPerSample(16) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
}
@@ -58,7 +59,7 @@ void SoftRaw::initPorts() {
def.eDir = OMX_DirInput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = 32 * 1024;
+ def.nBufferSize = 192 * 1024;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainAudio;
@@ -76,7 +77,7 @@ void SoftRaw::initPorts() {
def.eDir = OMX_DirOutput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = 32 * 1024;
+ def.nBufferSize = 192 * 1024;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainAudio;
@@ -114,7 +115,7 @@ OMX_ERRORTYPE SoftRaw::internalGetParameter(
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
- pcmParams->nBitPerSample = 16;
+ pcmParams->nBitPerSample = mBitsPerSample;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
@@ -166,6 +167,7 @@ OMX_ERRORTYPE SoftRaw::internalSetParameter(
mChannelCount = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
+ mBitsPerSample = pcmParams->nBitPerSample;
return OMX_ErrorNone;
}
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
index 015c4a3..894889f 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.h
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -43,13 +43,14 @@ protected:
private:
enum {
- kNumBuffers = 4
+ kNumBuffers = 8
};
bool mSignalledError;
int32_t mChannelCount;
int32_t mSampleRate;
+ int32_t mBitsPerSample;
void initPorts();
status_t initDecoder();
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 5f4e346..7209796 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -56,7 +56,8 @@ SoftVorbis::SoftVorbis(
mNumFramesLeftOnPage(-1),
mSawInputEos(false),
mSignalledOutputEos(false),
- mOutputPortSettingsChange(NONE) {
+ mOutputPortSettingsChange(NONE),
+ mSignalledError(false) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
}
@@ -267,10 +268,21 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) {
return;
}
+ if (mSignalledError) {
+ return;
+ }
+
if (portIndex == 0 && mInputBufferCount < 2) {
BufferInfo *info = *inQueue.begin();
OMX_BUFFERHEADERTYPE *header = info->mHeader;
+ if (!header || !header->pBuffer) {
+ ALOGE("b/25727575 has happened. report error");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
const uint8_t *data = header->pBuffer + header->nOffset;
size_t size = header->nFilledLen;
if (size < 7) {
@@ -446,6 +458,8 @@ void SoftVorbis::onPortFlushCompleted(OMX_U32 portIndex) {
mNumFramesOutput = 0;
vorbis_dsp_restart(mState);
+ mSawInputEos = false;
+ mSignalledOutputEos = false;
}
}
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
index 1d00816..7decc5a 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
@@ -68,6 +68,8 @@ private:
AWAITING_ENABLED
} mOutputPortSettingsChange;
+ bool mSignalledError;
+
void initPorts();
status_t initDecoder();
bool isConfigured() const;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index e92c192..59af12a 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -150,11 +150,20 @@ void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) {
CHECK(mCropHeight > 0);
CHECK(mConverter == NULL || mConverter->isValid());
+#ifdef EXYNOS4_ENHANCEMENTS
+ CHECK_EQ(0,
+ native_window_set_usage(
+ mNativeWindow.get(),
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
+ | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP
+ | GRALLOC_USAGE_HW_FIMC1));
+#else
CHECK_EQ(0,
native_window_set_usage(
mNativeWindow.get(),
GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
| GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP));
+#endif
CHECK_EQ(0,
native_window_set_scaling_mode(
@@ -252,8 +261,14 @@ std::list<FrameRenderTracker::Info> SoftwareRenderer::render(
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
+
+#ifdef EXYNOS4_ENHANCEMENTS
+ size_t dst_c_stride = buf->stride / 2;
+ size_t dst_c_size = ALIGN(dst_c_stride, 16) * buf->height / 2;
+#else
size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
size_t dst_c_size = dst_c_stride * buf->height / 2;
+#endif
uint8_t *dst_v = dst_y + dst_y_size;
uint8_t *dst_u = dst_v + dst_c_size;
@@ -282,13 +297,25 @@ std::list<FrameRenderTracker::Info> SoftwareRenderer::render(
const uint8_t *src_uv = (const uint8_t *)data
+ mWidth * (mHeight - mCropTop / 2);
- uint8_t *dst_y = (uint8_t *)dst;
+#ifdef EXYNOS4_ENHANCEMENTS
+ void *pYUVBuf[3];
+ CHECK_EQ(0, mapper.unlock(buf->handle));
+ CHECK_EQ(0, mapper.lock(
+ buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_YUV_ADDR, bounds, pYUVBuf));
+
+ size_t dst_c_stride = buf->stride / 2;
+ uint8_t *dst_y = (uint8_t *)pYUVBuf[0];
+ uint8_t *dst_v = (uint8_t *)pYUVBuf[1];
+ uint8_t *dst_u = (uint8_t *)pYUVBuf[2];
+#else
+ uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
size_t dst_c_size = dst_c_stride * buf->height / 2;
uint8_t *dst_v = dst_y + dst_y_size;
uint8_t *dst_u = dst_v + dst_c_size;
+#endif
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 81a6d00..07e26a2 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,7 +16,14 @@
<Included>
<Decoders>
- <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+ <MediaCodec name="OMX.google.mpeg4.decoder">
+ <Type name="video/mp4v-es" />
+ <!--
+ Use Google mpeg4 decoder for mpeg4 DP content which is not
+ supported by HW. A component can be used to support several
+ mimetypes, so non-DP mpeg4 usecases will not be affected by this.
+ -->
+ <Type name="video/mp4v-esdp" />
<!-- profiles and levels: ProfileSimple : Level3 -->
<Limit name="size" min="2x2" max="352x288" />
<Limit name="alignment" value="2x2" />
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index a5b81a8..3ebbbd9 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -29,13 +29,9 @@ ABuffer::ABuffer(size_t capacity)
mInt32Data(0),
mOwnsData(true) {
mData = malloc(capacity);
- if (mData == NULL) {
- mCapacity = 0;
- mRangeLength = 0;
- } else {
- mCapacity = capacity;
- mRangeLength = capacity;
- }
+ CHECK(mData != NULL);
+ mCapacity = capacity;
+ mRangeLength = capacity;
}
ABuffer::ABuffer(void *data, size_t capacity)
diff --git a/media/libstagefright/foundation/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
index b230400..4bcb1f6 100644
--- a/media/libstagefright/foundation/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -1318,7 +1318,8 @@ void ANetworkSession::threadLoop() {
List<sp<Session> > sessionsToAdd;
- for (size_t i = mSessions.size(); res > 0 && i-- > 0;) {
+ for (size_t i = mSessions.size(); res > 0 && i > 0;) {
+ i--;
const sp<Session> &session = mSessions.valueAt(i);
int s = session->socket();
@@ -1409,4 +1410,3 @@ void ANetworkSession::threadLoop() {
}
} // namespace android
-
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 1557401..1b0b1c5 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -54,7 +54,7 @@ const int64_t LiveSession::kReadyMarkUs = 5000000ll;
const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
-struct LiveSession::BandwidthEstimator : public RefBase {
+struct LiveSession::BandwidthEstimator : public LiveSession::BandwidthBaseEstimator {
BandwidthEstimator();
void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
@@ -1064,6 +1064,7 @@ void LiveSession::onMasterPlaylistFetched(const sp<AMessage> &msg) {
itemsWithVideo.push(item);
}
}
+#if 0
// remove the audio-only variants if we have at least one with video
if (!itemsWithVideo.empty()
&& itemsWithVideo.size() < mBandwidthItems.size()) {
@@ -1072,7 +1073,7 @@ void LiveSession::onMasterPlaylistFetched(const sp<AMessage> &msg) {
mBandwidthItems.push(itemsWithVideo[i]);
}
}
-
+#endif
CHECK_GT(mBandwidthItems.size(), 0u);
initialBandwidth = mBandwidthItems[0].mBandwidth;
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 90d56d0..0d504e4 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -73,7 +73,7 @@ struct LiveSession : public AHandler {
const sp<IMediaHTTPService> &httpService);
int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
- status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
+ virtual status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
@@ -117,7 +117,6 @@ protected:
virtual void onMessageReceived(const sp<AMessage> &msg);
-private:
friend struct PlaylistFetcher;
enum {
@@ -142,6 +141,14 @@ private:
static const int64_t kPrepareMarkUs;
static const int64_t kUnderflowMarkUs;
+ struct BandwidthBaseEstimator : public RefBase {
+ virtual void addBandwidthMeasurement(size_t numBytes, int64_t delayUs) = 0;
+ virtual bool estimateBandwidth(
+ int32_t *bandwidth,
+ bool *isStable = NULL,
+ int32_t *shortTermBps = NULL) = 0;
+ };
+
struct BandwidthEstimator;
struct BandwidthItem {
size_t mPlaylistIndex;
@@ -201,7 +208,7 @@ private:
ssize_t mOrigBandwidthIndex;
int32_t mLastBandwidthBps;
bool mLastBandwidthStable;
- sp<BandwidthEstimator> mBandwidthEstimator;
+ sp<BandwidthBaseEstimator> mBandwidthEstimator;
sp<M3UParser> mPlaylist;
int32_t mMaxWidth;
@@ -251,10 +258,10 @@ private:
KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
- sp<PlaylistFetcher> addFetcher(const char *uri);
+ virtual sp<PlaylistFetcher> addFetcher(const char *uri);
void onConnect(const sp<AMessage> &msg);
- void onMasterPlaylistFetched(const sp<AMessage> &msg);
+ virtual void onMasterPlaylistFetched(const sp<AMessage> &msg);
void onSeek(const sp<AMessage> &msg);
bool UriIsSameAsIndex( const AString &uri, int32_t index, bool newUri);
@@ -269,7 +276,7 @@ private:
float getAbortThreshold(
ssize_t currentBWIndex, ssize_t targetBWIndex) const;
void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
- size_t getBandwidthIndex(int32_t bandwidthBps);
+ virtual size_t getBandwidthIndex(int32_t bandwidthBps);
ssize_t getLowestValidBandwidthIndex() const;
HLSTime latestMediaSegmentStartTime() const;
@@ -284,7 +291,7 @@ private:
void onChangeConfiguration2(const sp<AMessage> &msg);
void onChangeConfiguration3(const sp<AMessage> &msg);
- void swapPacketSource(StreamType stream);
+ virtual void swapPacketSource(StreamType stream);
void tryToFinishBandwidthSwitch(const AString &oldUri);
void cancelBandwidthSwitch(bool resume = false);
bool checkSwitchProgress(
@@ -296,7 +303,7 @@ private:
void schedulePollBuffering();
void cancelPollBuffering();
void restartPollBuffering();
- void onPollBuffering();
+ virtual void onPollBuffering();
bool checkBuffering(bool &underflow, bool &ready, bool &down, bool &up);
void startBufferingIfNecessary();
void stopBufferingIfNecessary();
@@ -304,7 +311,7 @@ private:
void finishDisconnect();
- void postPrepared(status_t err);
+ virtual void postPrepared(status_t err);
void postError(status_t err);
DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 72d832e..b030e90 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -975,7 +975,9 @@ bool PlaylistFetcher::initDownloadState(
if (mSegmentStartTimeUs < 0) {
if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
// If this is a live session, start 3 segments from the end on connect
- mSeqNumber = lastSeqNumberInPlaylist - 3;
+ if (!getSeqNumberInLiveStreaming()) {
+ mSeqNumber = lastSeqNumberInPlaylist - 3;
+ }
if (mSeqNumber < firstSeqNumberInPlaylist) {
mSeqNumber = firstSeqNumberInPlaylist;
}
@@ -1418,6 +1420,10 @@ void PlaylistFetcher::onDownloadNext() {
}
}
+ if (checkSwitchBandwidth()) {
+ return;
+ }
+
++mSeqNumber;
// if adapting, pause after found the next starting point
@@ -1628,7 +1634,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
if (mSegmentFirstPTS < 0ll) {
// get the smallest first PTS from all streams present in this parser
- for (size_t i = mPacketSources.size(); i-- > 0;) {
+ for (size_t i = mPacketSources.size(); i > 0;) {
+ i--;
const LiveSession::StreamType stream = mPacketSources.keyAt(i);
if (stream == LiveSession::STREAMTYPE_SUBTITLES) {
ALOGE("MPEG2 Transport streams do not contain subtitles.");
@@ -1683,7 +1690,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
}
status_t err = OK;
- for (size_t i = mPacketSources.size(); i-- > 0;) {
+ for (size_t i = mPacketSources.size(); i > 0;) {
+ i--;
sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
const LiveSession::StreamType stream = mPacketSources.keyAt(i);
@@ -1807,7 +1815,8 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
}
if (err != OK) {
- for (size_t i = mPacketSources.size(); i-- > 0;) {
+ for (size_t i = mPacketSources.size(); i > 0;) {
+ i--;
sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
packetSource->clear();
}
@@ -1902,6 +1911,9 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits(
while (!it.done()) {
size_t length;
const uint8_t *data = it.getData(&length);
+ if (!data) {
+ return ERROR_MALFORMED;
+ }
static const char *kMatchName =
"com.apple.streaming.transportStreamTimestamp";
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index c8ca457..6b60b65 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -89,7 +89,6 @@ protected:
virtual ~PlaylistFetcher();
virtual void onMessageReceived(const sp<AMessage> &msg);
-private:
enum {
kMaxNumRetries = 5,
};
@@ -249,6 +248,8 @@ private:
void updateDuration();
void updateTargetDuration();
+ virtual bool checkSwitchBandwidth() { return false; }
+ virtual bool getSeqNumberInLiveStreaming() { return false; }
DISALLOW_EVIL_CONSTRUCTORS(PlaylistFetcher);
};
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 6e14197..d1fd0d9 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -506,8 +506,9 @@ void ID3::Iterator::getString(String8 *id, String8 *comment) const {
void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
id->setTo("");
- const uint8_t *frameData = mFrameData;
- if (frameData == NULL) {
+ size_t size;
+ const uint8_t *frameData = getData(&size);
+ if ((size == 0) || (frameData == NULL)) {
return;
}
@@ -645,6 +646,11 @@ const uint8_t *ID3::Iterator::getData(size_t *length) const {
return NULL;
}
+ // Prevent integer underflow
+ if (mFrameSize < getHeaderLength()) {
+ return NULL;
+ }
+
*length = mFrameSize - getHeaderLength();
return mFrameData;
@@ -859,6 +865,9 @@ ID3::getAlbumArt(size_t *length, String8 *mime) const {
while (!it.done()) {
size_t size;
const uint8_t *data = it.getData(&size);
+ if (!data) {
+ return NULL;
+ }
if (mVersion == ID3_V2_3 || mVersion == ID3_V2_4) {
uint8_t encoding = data[0];
diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h
index e98ca82..9a0ba2f 100644
--- a/media/libstagefright/include/AACExtractor.h
+++ b/media/libstagefright/include/AACExtractor.h
@@ -21,6 +21,7 @@
#include <media/stagefright/MediaExtractor.h>
#include <utils/Vector.h>
+#include "include/APE.h"
namespace android {
@@ -48,6 +49,9 @@ private:
Vector<uint64_t> mOffsetVector;
int64_t mFrameDurationUs;
+ APE ape;
+ sp<MetaData> mApeMeta;
+
AACExtractor(const AACExtractor &);
AACExtractor &operator=(const AACExtractor &);
};
diff --git a/media/libstagefright/include/APE.h b/media/libstagefright/include/APE.h
new file mode 100644
index 0000000..db49bb0
--- /dev/null
+++ b/media/libstagefright/include/APE.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) Texas Instruments - http://www.ti.com/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef APE_TAG_H_
+
+#define APE_TAG_H_
+
+#include <utils/RefBase.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+class APE{
+public:
+ APE();
+ ~APE();
+ bool isAPE(uint8_t *apeTag) const;
+ bool parseAPE(const sp<DataSource> &source, off64_t offset,
+ sp<MetaData> &meta);
+
+private:
+ uint32_t itemNumber;
+ uint32_t itemFlags;
+ size_t lenValue;
+};
+
+} //namespace android
+
+#endif //APE_TAG_H_
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 758b2c9..1a8e6c8 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -109,6 +109,9 @@ struct AwesomePlayer {
void postAudioTearDown();
status_t dump(int fd, const Vector<String16> &args) const;
+ status_t suspend();
+ status_t resume();
+
private:
friend struct AwesomeEvent;
friend struct PreviewPlayer;
@@ -193,6 +196,7 @@ private:
uint32_t mFlags;
uint32_t mExtractorFlags;
uint32_t mSinceLastDropped;
+ bool mDropFramesDisable; // hevc test
int64_t mTimeSourceDeltaUs;
int64_t mVideoTimeUs;
@@ -355,6 +359,8 @@ private:
bool mAudioTearDownWasPlaying;
int64_t mAudioTearDownPosition;
+ bool mIsFirstFrameAfterResume;
+
status_t setVideoScalingMode(int32_t mode);
status_t setVideoScalingMode_l(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index a29bdf9..1f282ca 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -66,10 +66,13 @@ struct NuCachedSource2 : public DataSource {
String8 *cacheConfig,
bool *disconnectAtHighwatermark);
+ virtual status_t disconnectWhileSuspend();
+ virtual status_t connectWhileResume();
+
protected:
virtual ~NuCachedSource2();
-private:
+protected:
friend struct AHandlerReflector<NuCachedSource2>;
NuCachedSource2(
@@ -123,6 +126,8 @@ private:
bool mDisconnectAtHighwatermark;
+ bool mSuspended;
+
void onMessageReceived(const sp<AMessage> &msg);
void onFetch();
void onRead(const sp<AMessage> &msg);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index bd33ab7..5ba0e8f 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -146,11 +146,11 @@ private:
OMX::node_id mNodeID;
OMX_HANDLETYPE mHandle;
sp<IOMXObserver> mObserver;
- bool mDying;
bool mSailed; // configuration is set (no more meta-mode changes)
bool mQueriedProhibitedExtensions;
SortedVector<OMX_INDEXTYPE> mProhibitedExtensions;
bool mIsSecure;
+ atomic_bool mDying;
// Lock only covers mGraphicBufferSource. We can't always use mLock
// because of rare instances where we'd end up locking it recursively.
diff --git a/media/libstagefright/include/SampleIterator.h b/media/libstagefright/include/SampleIterator.h
index 7053247..4ad7f2e 100644
--- a/media/libstagefright/include/SampleIterator.h
+++ b/media/libstagefright/include/SampleIterator.h
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#ifndef SAMPLE_ITERATOR_H_
+
+#define SAMPLE_ITERATOR_H_
+
#include <utils/Vector.h>
namespace android {
@@ -22,6 +26,7 @@ class SampleTable;
struct SampleIterator {
SampleIterator(SampleTable *table);
+ ~SampleIterator();
status_t seekTo(uint32_t sampleIndex);
@@ -64,6 +69,10 @@ private:
uint32_t mCurrentSampleTime;
uint32_t mCurrentSampleDuration;
+ uint8_t *mSampleCache;
+ uint32_t mSampleCacheSize;
+ uint32_t mCurrentSampleCacheStartIndex;
+
void reset();
status_t findChunkRange(uint32_t sampleIndex);
status_t getChunkOffset(uint32_t chunk, off64_t *offset);
@@ -75,3 +84,4 @@ private:
} // namespace android
+#endif // SAMPLE_ITERATOR_H_
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index 591b38e..6a0a958 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -140,6 +140,7 @@ private:
void onPortFlush(OMX_U32 portIndex, bool sendFlushComplete);
void checkTransitions();
+ void onTransitionError();
DISALLOW_EVIL_CONSTRUCTORS(SimpleSoftOMXComponent);
};
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index ecc2573..b2463e7 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -31,6 +31,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <utils/String8.h>
+#include <media/stagefright/foundation/ABitReader.h>
#include <inttypes.h>
@@ -136,6 +137,7 @@ private:
enum Type {
AVC,
AAC,
+ HEVC,
OTHER
};
@@ -222,17 +224,29 @@ MatroskaSource::MatroskaSource(
mIsAudio = !strncasecmp("audio/", mime, 6);
if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
- mType = AVC;
-
uint32_t dummy;
const uint8_t *avcc;
size_t avccSize;
CHECK(meta->findData(
kKeyAVCC, &dummy, (const void **)&avcc, &avccSize));
- CHECK_GE(avccSize, 5u);
+ if (avccSize < 7) {
+ ALOGW("Invalid AVCC atom in track, size %zu", avccSize);
+ } else {
+ mNALSizeLen = 1 + (avcc[4] & 3);
+ ALOGV("mNALSizeLen = %zu", mNALSizeLen);
+ mType = AVC;
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ mType = HEVC;
+
+ uint32_t type;
+ const uint8_t *data;
+ size_t size;
+ CHECK(meta->findData(kKeyHVCC, &type, (const void **)&data, &size));
- mNALSizeLen = 1 + (avcc[4] & 3);
+ CHECK(size >= 7);
+ mNALSizeLen = 1 + (data[14 + 7] & 3);
ALOGV("mNALSizeLen = %zu", mNALSizeLen);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
mType = AAC;
@@ -521,8 +535,9 @@ status_t MatroskaSource::readBlock() {
const mkvparser::Block *block = mBlockIter.block();
int64_t timeUs = mBlockIter.blockTimeUs();
+ int frameCount = block->GetFrameCount();
- for (int i = 0; i < block->GetFrameCount(); ++i) {
+ for (int i = 0; i < frameCount; ++i) {
const mkvparser::Block::Frame &frame = block->GetFrame(i);
MediaBuffer *mbuf = new MediaBuffer(frame.len);
@@ -534,6 +549,7 @@ status_t MatroskaSource::readBlock() {
mPendingFrames.clear();
mBlockIter.advance();
+ mbuf->release();
return ERROR_IO;
}
@@ -542,6 +558,27 @@ status_t MatroskaSource::readBlock() {
mBlockIter.advance();
+ if (!mBlockIter.eos() && frameCount > 1) {
+ // For files with lacing enabled, we need to amend they kKeyTime of
+ // each frame so that their kKeyTime are advanced accordingly (instead
+ // of being set to the same value). To do this, we need to find out
+ // the duration of the block using the start time of the next block.
+ int64_t duration = mBlockIter.blockTimeUs() - timeUs;
+ int64_t durationPerFrame = duration / frameCount;
+ int64_t durationRemainder = duration % frameCount;
+
+ // We split duration to each of the frame, distributing the remainder (if any)
+ // to the later frames. The later frames are processed first due to the
+ // use of the iterator for the doubly linked list
+ List<MediaBuffer *>::iterator it = mPendingFrames.end();
+ for (int i = frameCount - 1; i >= 0; --i) {
+ --it;
+ int64_t frameRemainder = durationRemainder >= frameCount - i ? 1 : 0;
+ int64_t frameTimeUs = timeUs + durationPerFrame * i + frameRemainder;
+ (*it)->meta_data()->setInt64(kKeyTime, frameTimeUs);
+ }
+ }
+
return OK;
}
@@ -581,7 +618,7 @@ status_t MatroskaSource::read(
MediaBuffer *frame = *mPendingFrames.begin();
mPendingFrames.erase(mPendingFrames.begin());
- if (mType != AVC) {
+ if (mType != AVC && mType != HEVC) {
if (targetSampleTimeUs >= 0ll) {
frame->meta_data()->setInt64(
kKeyTargetTime, targetSampleTimeUs);
@@ -633,9 +670,11 @@ status_t MatroskaSource::read(
if (pass == 1) {
memcpy(&dstPtr[dstOffset], "\x00\x00\x00\x01", 4);
- memcpy(&dstPtr[dstOffset + 4],
- &srcPtr[srcOffset + mNALSizeLen],
- NALsize);
+ if (frame != buffer) {
+ memcpy(&dstPtr[dstOffset + 4],
+ &srcPtr[srcOffset + mNALSizeLen],
+ NALsize);
+ }
}
dstOffset += 4; // 0x00 00 00 01
@@ -657,7 +696,13 @@ status_t MatroskaSource::read(
if (pass == 0) {
dstSize = dstOffset;
- buffer = new MediaBuffer(dstSize);
+ if (dstSize == srcSize && mNALSizeLen == 4) {
+ // In this special case we can re-use the input buffer by substituting
+ // each 4-byte nal size with a 4-byte start code
+ buffer = frame;
+ } else {
+ buffer = new MediaBuffer(dstSize);
+ }
int64_t timeUs;
CHECK(frame->meta_data()->findInt64(kKeyTime, &timeUs));
@@ -671,8 +716,10 @@ status_t MatroskaSource::read(
}
}
- frame->release();
- frame = NULL;
+ if (frame != buffer) {
+ frame->release();
+ frame = NULL;
+ }
if (targetSampleTimeUs >= 0ll) {
buffer->meta_data()->setInt64(
@@ -746,7 +793,12 @@ MatroskaExtractor::MatroskaExtractor(const sp<DataSource> &source)
info->GetWritingAppAsUTF8());
#endif
- addTracks();
+ ret = addTracks();
+ if (ret < 0) {
+ delete mSegment;
+ mSegment = NULL;
+ return;
+ }
}
MatroskaExtractor::~MatroskaExtractor() {
@@ -819,6 +871,17 @@ static void addESDSFromCodecPrivate(
const sp<MetaData> &meta,
bool isAudio, const void *priv, size_t privSize) {
+ if(isAudio) {
+ ABitReader br((const uint8_t *)priv, privSize);
+ uint32_t objectType = br.getBits(5);
+
+ if (objectType == 31) { // AAC-ELD => additional 6 bits
+ objectType = 32 + br.getBits(6);
+ }
+
+ meta->setInt32(kKeyAACAOT, objectType);
+ }
+
int privSizeBytesRequired = bytesForSize(privSize);
int esdsSize2 = 14 + privSizeBytesRequired + privSize;
int esdsSize2BytesRequired = bytesForSize(esdsSize2);
@@ -846,6 +909,8 @@ static void addESDSFromCodecPrivate(
meta->setData(kKeyESDS, 0, esds, esdsSize);
+ updateVideoTrackInfoFromESDS_MPEG4Video(meta);
+
delete[] esds;
esds = NULL;
}
@@ -928,7 +993,7 @@ status_t addVorbisCodecInfo(
return OK;
}
-void MatroskaExtractor::addTracks() {
+int MatroskaExtractor::addTracks() {
const mkvparser::Tracks *tracks = mSegment->GetTracks();
for (size_t index = 0; index < tracks->GetTracksCount(); ++index) {
@@ -979,6 +1044,10 @@ void MatroskaExtractor::addTracks() {
codecID);
continue;
}
+ } else if (!strcmp("V_MPEGH/ISO/HEVC", codecID)) {
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
+ meta->setData(kKeyHVCC, kTypeHVCC, codecPrivate, codecPrivateSize);
+
} else if (!strcmp("V_VP8", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
} else if (!strcmp("V_VP9", codecID)) {
@@ -1000,7 +1069,9 @@ void MatroskaExtractor::addTracks() {
if (!strcmp("A_AAC", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
- CHECK(codecPrivateSize >= 2);
+ if (codecPrivateSize < 2) {
+ return -1;
+ }
addESDSFromCodecPrivate(
meta, true, codecPrivate, codecPrivateSize);
@@ -1045,6 +1116,7 @@ void MatroskaExtractor::addTracks() {
trackInfo->mMeta = meta;
trackInfo->mExtractor = this;
}
+ return 0;
}
void MatroskaExtractor::findThumbnails() {
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index db36bf8..e3a07ec 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -74,7 +74,7 @@ private:
bool mIsWebm;
int64_t mSeekPreRollNs;
- void addTracks();
+ int addTracks();
void findThumbnails();
bool isLiveStreaming() const;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 2f2b115..3ad3118 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -509,7 +509,7 @@ int64_t ATSParser::Program::recoverPTS(uint64_t PTS_33bit) {
mLastRecoveredPTS = static_cast<int64_t>(PTS_33bit);
} else {
mLastRecoveredPTS = static_cast<int64_t>(
- ((mLastRecoveredPTS - PTS_33bit + 0x100000000ll)
+ ((mLastRecoveredPTS - static_cast<int64_t>(PTS_33bit) + 0x100000000ll)
& 0xfffffffe00000000ull) | PTS_33bit);
// We start from 0, but recovered PTS could be slightly below 0.
// Clamp it to 0 as rest of the pipeline doesn't take negative pts.
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index 5f0f567..d16d5df 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -30,6 +30,12 @@ LOCAL_SHARED_LIBRARIES := \
libstagefright_foundation \
libdl
+ifeq ($(call is-vendor-board-platform,QCOM),true)
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_EXTN_FLAC_DECODER)),true)
+ LOCAL_CFLAGS += -DQTI_FLAC_DECODER
+endif
+endif
+
LOCAL_MODULE:= libstagefright_omx
LOCAL_CFLAGS += -Werror -Wall
LOCAL_CLANG := true
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 5898b4e..153a979 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -473,6 +473,10 @@ OMX_ERRORTYPE OMX::OnEvent(
findInstance(node)->onEvent(eEvent, nData1, nData2);
sp<OMX::CallbackDispatcher> dispatcher = findDispatcher(node);
+ if (dispatcher == NULL) {
+ ALOGW("OnEvent Callback dispatcher NULL, skip post");
+ return OMX_ErrorNone;
+ }
// output rendered events are not processed as regular events until they hit the observer
if (eEvent == OMX_EventOutputRendered) {
@@ -518,7 +522,12 @@ OMX_ERRORTYPE OMX::OnEmptyBufferDone(
msg.fenceFd = fenceFd;
msg.u.buffer_data.buffer = buffer;
- findDispatcher(node)->post(msg);
+ sp<OMX::CallbackDispatcher> callbackDispatcher = findDispatcher(node);
+ if (callbackDispatcher != NULL) {
+ callbackDispatcher->post(msg);
+ } else {
+ ALOGW("OnEmptyBufferDone Callback dispatcher NULL, skip post");
+ }
return OMX_ErrorNone;
}
@@ -537,7 +546,12 @@ OMX_ERRORTYPE OMX::OnFillBufferDone(
msg.u.extended_buffer_data.flags = pBuffer->nFlags;
msg.u.extended_buffer_data.timestamp = pBuffer->nTimeStamp;
- findDispatcher(node)->post(msg);
+ sp<OMX::CallbackDispatcher> callbackDispatcher = findDispatcher(node);
+ if (callbackDispatcher != NULL) {
+ callbackDispatcher->post(msg);
+ } else {
+ ALOGW("OnFillBufferDone Callback dispatcher NULL, skip post");
+ }
return OMX_ErrorNone;
}
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index ae3cb33..f7bb733 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -25,52 +25,58 @@
#include <dlfcn.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <cutils/properties.h>
namespace android {
-OMXMaster::OMXMaster()
- : mVendorLibHandle(NULL) {
+OMXMaster::OMXMaster() {
addVendorPlugin();
addPlugin(new SoftOMXPlugin);
+ addUserPlugin();
}
OMXMaster::~OMXMaster() {
clearPlugins();
-
- if (mVendorLibHandle != NULL) {
- dlclose(mVendorLibHandle);
- mVendorLibHandle = NULL;
- }
}
void OMXMaster::addVendorPlugin() {
addPlugin("libstagefrighthw.so");
}
+void OMXMaster::addUserPlugin() {
+ char plugin[PROPERTY_VALUE_MAX];
+ if (property_get("media.sf.omx-plugin", plugin, NULL)) {
+ addPlugin(plugin);
+ }
+}
+
void OMXMaster::addPlugin(const char *libname) {
- mVendorLibHandle = dlopen(libname, RTLD_NOW);
+ void* handle = dlopen(libname, RTLD_NOW);
- if (mVendorLibHandle == NULL) {
+ if (handle == NULL) {
return;
}
typedef OMXPluginBase *(*CreateOMXPluginFunc)();
CreateOMXPluginFunc createOMXPlugin =
(CreateOMXPluginFunc)dlsym(
- mVendorLibHandle, "createOMXPlugin");
+ handle, "createOMXPlugin");
if (!createOMXPlugin)
createOMXPlugin = (CreateOMXPluginFunc)dlsym(
- mVendorLibHandle, "_ZN7android15createOMXPluginEv");
+ handle, "_ZN7android15createOMXPluginEv");
if (createOMXPlugin) {
- addPlugin((*createOMXPlugin)());
+ addPlugin((*createOMXPlugin)(), handle);
}
}
-void OMXMaster::addPlugin(OMXPluginBase *plugin) {
+void OMXMaster::addPlugin(OMXPluginBase *plugin, void *handle) {
+ if (plugin == 0) {
+ return;
+ }
Mutex::Autolock autoLock(mLock);
- mPlugins.push_back(plugin);
+ mPlugins.add(plugin, handle);
OMX_U32 index = 0;
@@ -100,21 +106,32 @@ void OMXMaster::clearPlugins() {
Mutex::Autolock autoLock(mLock);
typedef void (*DestroyOMXPluginFunc)(OMXPluginBase*);
- DestroyOMXPluginFunc destroyOMXPlugin =
- (DestroyOMXPluginFunc)dlsym(
- mVendorLibHandle, "destroyOMXPlugin");
- mPluginByComponentName.clear();
+ for (unsigned int i = 0; i < mPlugins.size(); i++) {
+ OMXPluginBase *plugin = mPlugins.keyAt(i);
+ if (plugin != NULL) {
+ void *handle = mPlugins.valueAt(i);
+
+ if (handle != NULL) {
+ DestroyOMXPluginFunc destroyOMXPlugin =
+ (DestroyOMXPluginFunc)dlsym(
+ handle, "destroyOMXPlugin");
+
+ if (destroyOMXPlugin)
+ destroyOMXPlugin(plugin);
+ else
+ delete plugin;
- for (List<OMXPluginBase *>::iterator it = mPlugins.begin();
- it != mPlugins.end(); ++it) {
- if (destroyOMXPlugin)
- destroyOMXPlugin(*it);
- else
- delete *it;
- *it = NULL;
+ dlclose(handle);
+ } else {
+ delete plugin;
+ }
+
+ plugin = NULL;
+ }
}
+ mPluginByComponentName.clear();
mPlugins.clear();
}
diff --git a/media/libstagefright/omx/OMXMaster.h b/media/libstagefright/omx/OMXMaster.h
index 6069741..c07fed3 100644
--- a/media/libstagefright/omx/OMXMaster.h
+++ b/media/libstagefright/omx/OMXMaster.h
@@ -51,15 +51,14 @@ struct OMXMaster : public OMXPluginBase {
private:
Mutex mLock;
- List<OMXPluginBase *> mPlugins;
+ KeyedVector<OMXPluginBase *, void *> mPlugins;
KeyedVector<String8, OMXPluginBase *> mPluginByComponentName;
KeyedVector<OMX_COMPONENTTYPE *, OMXPluginBase *> mPluginByInstance;
- void *mVendorLibHandle;
-
void addVendorPlugin();
+ void addUserPlugin();
void addPlugin(const char *libname);
- void addPlugin(OMXPluginBase *plugin);
+ void addPlugin(OMXPluginBase *plugin, void *handle = NULL);
void clearPlugins();
OMXMaster(const OMXMaster &);
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 1e76e01..c09064f 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -133,7 +133,8 @@ struct BufferMeta {
}
// check component returns proper range
- sp<ABuffer> codec = getBuffer(header, false /* backup */, true /* limit */);
+ sp<ABuffer> codec = getBuffer(header, false /* backup */,
+ !(header->nFlags & OMX_BUFFERFLAG_EXTRADATA));
memcpy((OMX_U8 *)mMem->pointer() + header->nOffset, codec->data(), codec->size());
}
@@ -143,9 +144,11 @@ struct BufferMeta {
return;
}
+ size_t bytesToCopy = header->nFlags & OMX_BUFFERFLAG_EXTRADATA ?
+ header->nAllocLen - header->nOffset : header->nFilledLen;
memcpy(header->pBuffer + header->nOffset,
(const OMX_U8 *)mMem->pointer() + header->nOffset,
- header->nFilledLen);
+ bytesToCopy);
}
// return either the codec or the backup buffer
@@ -216,7 +219,6 @@ OMXNodeInstance::OMXNodeInstance(
mNodeID(0),
mHandle(NULL),
mObserver(observer),
- mDying(false),
mSailed(false),
mQueriedProhibitedExtensions(false),
mBufferIDCount(0)
@@ -232,6 +234,7 @@ OMXNodeInstance::OMXNodeInstance(
mMetadataType[0] = kMetadataBufferTypeInvalid;
mMetadataType[1] = kMetadataBufferTypeInvalid;
mIsSecure = AString(name).endsWith(".secure");
+ atomic_store(&mDying, false);
}
OMXNodeInstance::~OMXNodeInstance() {
@@ -302,11 +305,12 @@ status_t OMXNodeInstance::freeNode(OMXMaster *master) {
// The code below may trigger some more events to be dispatched
// by the OMX component - we want to ignore them as our client
// does not expect them.
- mDying = true;
+ atomic_store(&mDying, true);
OMX_STATETYPE state;
CHECK_EQ(OMX_GetState(mHandle, &state), OMX_ErrorNone);
switch (state) {
+ case OMX_StatePause:
case OMX_StateExecuting:
{
ALOGV("forcing Executing->Idle");
@@ -1631,6 +1635,9 @@ void OMXNodeInstance::onObserverDied(OMXMaster *master) {
ALOGE("!!! Observer died. Quickly, do something, ... anything...");
// Try to force shutdown of the node and hope for the best.
+ // But allow the component to observe mDying = true first
+ atomic_store(&mDying, true);
+ sleep(2);
freeNode(master);
}
@@ -1705,7 +1712,7 @@ OMX_ERRORTYPE OMXNodeInstance::OnEvent(
OMX_IN OMX_U32 nData2,
OMX_IN OMX_PTR pEventData) {
OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
- if (instance->mDying) {
+ if (atomic_load(&instance->mDying)) {
return OMX_ErrorNone;
}
return instance->owner()->OnEvent(
@@ -1718,7 +1725,7 @@ OMX_ERRORTYPE OMXNodeInstance::OnEmptyBufferDone(
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
- if (instance->mDying) {
+ if (atomic_load(&instance->mDying)) {
return OMX_ErrorNone;
}
int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
@@ -1732,7 +1739,7 @@ OMX_ERRORTYPE OMXNodeInstance::OnFillBufferDone(
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
- if (instance->mDying) {
+ if (atomic_load(&instance->mDying)) {
return OMX_ErrorNone;
}
int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
@@ -1771,7 +1778,8 @@ void OMXNodeInstance::removeActiveBuffer(
void OMXNodeInstance::freeActiveBuffers() {
// Make sure to count down here, as freeBuffer will in turn remove
// the active buffer from the vector...
- for (size_t i = mActiveBuffers.size(); i--;) {
+ for (size_t i = mActiveBuffers.size(); i;) {
+ i--;
freeBuffer(mActiveBuffers[i].mPortIndex, mActiveBuffers[i].mID);
}
}
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 13afd45..2ae807e 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -425,16 +425,43 @@ void SimpleSoftOMXComponent::onSendCommand(
}
}
+void SimpleSoftOMXComponent::onTransitionError() {
+ mState = OMX_StateInvalid;
+ mTargetState = OMX_StateInvalid;
+ notify(OMX_EventError, OMX_CommandStateSet, OMX_StateInvalid, 0);
+}
+
void SimpleSoftOMXComponent::onChangeState(OMX_STATETYPE state) {
+ bool skipTransitions = false;
+
// We shouldn't be in a state transition already.
- CHECK_EQ((int)mState, (int)mTargetState);
+ if (mState != mTargetState) {
+ // Workaround to prevent assertion
+ // XXX CHECK_EQ((int)mState, (int)mTargetState);
+ ALOGW("mState %d != mTargetState %d", mState, mTargetState);
+ skipTransitions = true;
+ onTransitionError();
+ }
switch (mState) {
case OMX_StateLoaded:
- CHECK_EQ((int)state, (int)OMX_StateIdle);
+ if (state != OMX_StateIdle) {
+ // Workaround to prevent assertion
+ // XXX CHECK_EQ((int)state, (int)OMX_StateIdle);
+ ALOGW("In OMX_StateLoaded, state %d != OMX_StateIdle", state);
+ skipTransitions = true;
+ onTransitionError();
+ }
break;
case OMX_StateIdle:
- CHECK(state == OMX_StateLoaded || state == OMX_StateExecuting);
+ if (!(state == OMX_StateLoaded || state == OMX_StateExecuting)) {
+ // Workaround to prevent assertion
+ // XXX CHECK(state == OMX_StateLoaded || state == OMX_StateExecuting);
+ ALOGW("In OMX_StateIdle, state %d != OMX_StateLoaded||OMX_StateExecuting",
+ state);
+ skipTransitions = true;
+ onTransitionError();
+ }
break;
case OMX_StateExecuting:
{
@@ -448,11 +475,20 @@ void SimpleSoftOMXComponent::onChangeState(OMX_STATETYPE state) {
notify(OMX_EventCmdComplete, OMX_CommandStateSet, state, NULL);
break;
}
-
+ case OMX_StateInvalid: {
+ ALOGW("In OMX_StateInvalid, ignore state transition to %d", state);
+ skipTransitions = true;
+ onTransitionError();
+ break;
+ }
default:
TRESPASS();
}
+ if (skipTransitions) {
+ return;
+ }
+
mTargetState = state;
checkTransitions();
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 0f9c00c..4afd5d5 100755
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -59,6 +59,9 @@ static const struct {
{ "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
{ "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
{ "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
+#ifdef QTI_FLAC_DECODER
+ { "OMX.qti.audio.decoder.flac", "flacdec", "audio_decoder.flac" },
+#endif
};
static const size_t kNumComponents =
@@ -74,6 +77,7 @@ OMX_ERRORTYPE SoftOMXPlugin::makeComponentInstance(
OMX_COMPONENTTYPE **component) {
ALOGV("makeComponentInstance '%s'", name);
+ dlerror(); // clear any existing error
for (size_t i = 0; i < kNumComponents; ++i) {
if (strcmp(name, kComponents[i].mName)) {
continue;
@@ -91,6 +95,8 @@ OMX_ERRORTYPE SoftOMXPlugin::makeComponentInstance(
return OMX_ErrorComponentNotFound;
}
+ ALOGV("load component %s for %s", libName.c_str(), name);
+
typedef SoftOMXComponent *(*CreateSoftOMXComponentFunc)(
const char *, const OMX_CALLBACKTYPE *,
OMX_PTR, OMX_COMPONENTTYPE **);
@@ -101,7 +107,8 @@ OMX_ERRORTYPE SoftOMXPlugin::makeComponentInstance(
"_Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPE"
"PvPP17OMX_COMPONENTTYPE");
- if (createSoftOMXComponent == NULL) {
+ if (const char *error = dlerror()) {
+ ALOGE("unable to dlsym %s: %s", libName.c_str(), error);
dlclose(libHandle);
libHandle = NULL;
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 8ea7a6e..dc3ed39 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -606,6 +606,9 @@ const uint8_t *SoftVideoEncoderOMXComponent::extractGraphicBuffer(
break;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
+#ifdef MTK_HARDWARE
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+#endif
ConvertRGB32ToPlanar(
dst, dstStride, dstVStride,
(const uint8_t *)bits, width, height, srcStride,
diff --git a/media/libstagefright/rtsp/AH263Assembler.cpp b/media/libstagefright/rtsp/AH263Assembler.cpp
index 75cd911..28594e2 100644
--- a/media/libstagefright/rtsp/AH263Assembler.cpp
+++ b/media/libstagefright/rtsp/AH263Assembler.cpp
@@ -27,6 +27,8 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/Utils.h>
+#include <mediaplayerservice/AVMediaServiceExtensions.h>
+
namespace android {
AH263Assembler::AH263Assembler(const sp<AMessage> &notify)
@@ -63,7 +65,8 @@ ARTPAssembler::AssemblyStatus AH263Assembler::addPacket(
if ((uint32_t)(*it)->int32Data() >= mNextExpectedSeqNo) {
break;
}
-
+ AVMediaServiceUtils::get()->addH263AdvancedPacket(
+ *it, &mPackets, mAccessUnitRTPTime);
it = queue->erase(it);
}
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index a1a6576..4302aee 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -379,7 +379,10 @@ sp<ABuffer> AMPEG4AudioAssembler::removeLATMFraming(const sp<ABuffer> &buffer) {
unsigned muxSlotLengthBytes = 0;
unsigned tmp;
do {
- CHECK_LT(offset, buffer->size());
+ if (offset >= buffer->size()) {
+ ALOGW("Malformed buffer received");
+ return out;
+ }
tmp = ptr[offset++];
muxSlotLengthBytes += tmp;
} while (tmp == 0xff);
@@ -420,6 +423,11 @@ sp<ABuffer> AMPEG4AudioAssembler::removeLATMFraming(const sp<ABuffer> &buffer) {
CHECK_LE(offset + (mOtherDataLenBits / 8), buffer->size());
offset += mOtherDataLenBits / 8;
}
+
+ if (i < mNumSubFrames && offset >= buffer->size()) {
+ ALOGW("Skip subframes after %d, total %d", (int)i, (int)mNumSubFrames);
+ break;
+ }
}
if (offset < buffer->size()) {
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index cfafaa7..e81325d 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -37,6 +37,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
#include <utils/Vector.h>
namespace android {
@@ -535,6 +536,8 @@ APacketSource::APacketSource(
return;
}
+ updateVideoTrackInfoFromESDS_MPEG4Video(mFormat);
+
mFormat->setInt32(kKeyWidth, width);
mFormat->setInt32(kKeyHeight, height);
} else if (!strncasecmp(desc.c_str(), "mpeg4-generic/", 14)) {
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index a86ab74..e021b29 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -81,7 +81,8 @@ void ARTPConnection::addStream(
const sp<ASessionDescription> &sessionDesc,
size_t index,
const sp<AMessage> &notify,
- bool injected) {
+ bool injected,
+ bool isIPV6) {
sp<AMessage> msg = new AMessage(kWhatAddStream, this);
msg->setInt32("rtp-socket", rtpSocket);
msg->setInt32("rtcp-socket", rtcpSocket);
@@ -89,6 +90,7 @@ void ARTPConnection::addStream(
msg->setSize("index", index);
msg->setMessage("notify", notify);
msg->setInt32("injected", injected);
+ msg->setInt32("isIPV6", isIPV6);
msg->post();
}
@@ -145,6 +147,10 @@ void ARTPConnection::MakePortPair(
TRESPASS();
}
+size_t ARTPConnection::sockAddrSize() {
+ return sizeof(struct sockaddr_in);
+}
+
void ARTPConnection::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatAddStream:
@@ -345,7 +351,7 @@ void ARTPConnection::onPollStreams() {
n = sendto(
s->mRTCPSocket, buffer->data(), buffer->size(), 0,
(const struct sockaddr *)&s->mRemoteRTCPAddr,
- sizeof(s->mRemoteRTCPAddr));
+ sockAddrSize());
} while (n < 0 && errno == EINTR);
if (n <= 0) {
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/ARTPConnection.h
index edbcc35..057007b 100644
--- a/media/libstagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/ARTPConnection.h
@@ -38,7 +38,8 @@ struct ARTPConnection : public AHandler {
int rtpSocket, int rtcpSocket,
const sp<ASessionDescription> &sessionDesc, size_t index,
const sp<AMessage> &notify,
- bool injected);
+ bool injected,
+ bool isIPV6 = false);
void removeStream(int rtpSocket, int rtcpSocket);
@@ -53,8 +54,8 @@ struct ARTPConnection : public AHandler {
protected:
virtual ~ARTPConnection();
virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual size_t sockAddrSize();
-private:
enum {
kWhatAddStream,
kWhatRemoveStream,
@@ -72,7 +73,7 @@ private:
bool mPollEventPending;
int64_t mLastReceiverReportTimeUs;
- void onAddStream(const sp<AMessage> &msg);
+ virtual void onAddStream(const sp<AMessage> &msg);
void onRemoveStream(const sp<AMessage> &msg);
void onPollStreams();
void onInjectPacket(const sp<AMessage> &msg);
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 855ffdc..b2c1fd1 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -26,6 +26,7 @@
#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <mediaplayerservice/AVMediaServiceExtensions.h>
#include <arpa/inet.h>
#include <fcntl.h>
@@ -170,7 +171,7 @@ bool ARTSPConnection::ParseURL(
}
}
- const char *colonPos = strchr(host->c_str(), ':');
+ const char *colonPos = AVMediaServiceUtils::get()->parseURL(host);
if (colonPos != NULL) {
unsigned long x;
@@ -252,6 +253,11 @@ void ARTSPConnection::onConnect(const sp<AMessage> &msg) {
ALOGV("user = '%s', pass = '%s'", mUser.c_str(), mPass.c_str());
}
+ performConnect(reply, host, port);
+}
+
+void ARTSPConnection::performConnect(const sp<AMessage> &reply,
+ AString host, unsigned port) {
struct hostent *ent = gethostbyname(host.c_str());
if (ent == NULL) {
ALOGE("Unknown host %s", host.c_str());
@@ -381,7 +387,12 @@ void ARTSPConnection::onCompleteConnection(const sp<AMessage> &msg) {
socklen_t optionLen = sizeof(err);
CHECK_EQ(getsockopt(mSocket, SOL_SOCKET, SO_ERROR, &err, &optionLen), 0);
CHECK_EQ(optionLen, (socklen_t)sizeof(err));
+ performCompleteConnection(msg, err);
+}
+void ARTSPConnection::performCompleteConnection(const sp<AMessage> &msg, int err) {
+ sp<AMessage> reply;
+ CHECK(msg->findMessage("reply", &reply));
if (err != 0) {
ALOGE("err = %d (%s)", err, strerror(err));
diff --git a/media/libstagefright/rtsp/ARTSPConnection.h b/media/libstagefright/rtsp/ARTSPConnection.h
index 1fe9c99..c4ebe1d 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.h
+++ b/media/libstagefright/rtsp/ARTSPConnection.h
@@ -42,6 +42,8 @@ struct ARTSPConnection : public AHandler {
void observeBinaryData(const sp<AMessage> &reply);
+ virtual bool isIPV6() { return false; }
+
static bool ParseURL(
const char *url, AString *host, unsigned *port, AString *path,
AString *user, AString *pass);
@@ -49,8 +51,11 @@ struct ARTSPConnection : public AHandler {
protected:
virtual ~ARTSPConnection();
virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual void performConnect(const sp<AMessage> &reply,
+ AString host, unsigned port);
+ virtual void performCompleteConnection(const sp<AMessage> &msg,
+ int err);
-private:
enum State {
DISCONNECTED,
CONNECTING,
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 47573c3..497fae6 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -23,7 +23,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
-
+#include <mediaplayerservice/AVMediaServiceExtensions.h>
#include <stdlib.h>
namespace android {
@@ -268,7 +268,8 @@ bool ASessionDescription::getDurationUs(int64_t *durationUs) const {
}
float from, to;
- if (!parseNTPRange(value.c_str() + 4, &from, &to)) {
+ if (!AVMediaServiceUtils::get()->parseNTPRange(
+ value.c_str() + 4, &from, &to)) {
return false;
}
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index c5e8c35..8bc4295 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -22,8 +22,10 @@ LOCAL_SRC_FILES:= \
LOCAL_SHARED_LIBRARIES += libcrypto
LOCAL_C_INCLUDES:= \
- $(TOP)/frameworks/av/media/libstagefright \
- $(TOP)/frameworks/native/include/media/openmax
+ $(TOP)/frameworks/av/media/libstagefright \
+ $(TOP)/frameworks/av/media/libavextensions \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/av/media/libmediaplayerservice \
LOCAL_MODULE:= libstagefright_rtsp
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 0d0baf3..70063b1 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -18,7 +18,9 @@
#define MY_HANDLER_H_
+#ifndef LOG_NDEBUG
//#define LOG_NDEBUG 0
+#endif
#ifndef LOG_TAG
#define LOG_TAG "MyHandler"
@@ -32,6 +34,7 @@
#include "ASessionDescription.h"
#include <ctype.h>
+#include <cutils/properties.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -39,6 +42,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <mediaplayerservice/AVMediaServiceExtensions.h>
#include <arpa/inet.h>
#include <sys/socket.h>
@@ -64,6 +68,8 @@ static int64_t kDefaultKeepAliveTimeoutUs = 60000000ll;
static int64_t kPauseDelayUs = 3000000ll;
+static int64_t kTearDownTimeoutUs = 3000000ll;
+
namespace android {
static bool GetAttribute(const char *s, const char *key, AString *value) {
@@ -105,6 +111,8 @@ struct MyHandler : public AHandler {
kWhatEOS = 'eos!',
kWhatSeekDiscontinuity = 'seeD',
kWhatNormalPlayTimeMapping = 'nptM',
+ kWhatCancelCheck = 'canC',
+ kWhatByeReceived = 'byeR',
};
MyHandler(
@@ -115,8 +123,6 @@ struct MyHandler : public AHandler {
mUIDValid(uidValid),
mUID(uid),
mNetLooper(new ALooper),
- mConn(new ARTSPConnection(mUIDValid, mUID)),
- mRTPConn(new ARTPConnection),
mOriginalSessionURL(url),
mSessionURL(url),
mSetupTracksSuccessful(false),
@@ -140,11 +146,22 @@ struct MyHandler : public AHandler {
mPausing(false),
mPauseGeneration(0),
mPlayResponseParsed(false) {
+ mConn = AVMediaServiceFactory::get()->createARTSPConnection(
+ mUIDValid, uid);
+ mRTPConn = AVMediaServiceFactory::get()->createARTPConnection();
mNetLooper->setName("rtsp net");
mNetLooper->start(false /* runOnCallingThread */,
false /* canCallJava */,
PRIORITY_HIGHEST);
+ char value[PROPERTY_VALUE_MAX] = {0};
+ property_get("rtsp.transport.TCP", value, "false");
+ if (!strcasecmp(value, "true")) {
+ mTryTCPInterleaving = true;
+ } else {
+ mTryTCPInterleaving = false;
+ }
+
// Strip any authentication info from the session url, we don't
// want to transmit user/pass in cleartext.
AString host, path, user, pass;
@@ -244,6 +261,11 @@ struct MyHandler : public AHandler {
msg->post();
}
+ void cancelTimeoutCheck() {
+ sp<AMessage> msg = new AMessage('canC', this);
+ msg->post();
+ }
+
static void addRR(const sp<ABuffer> &buf) {
uint8_t *ptr = buf->data() + buf->size();
ptr[0] = 0x80 | 0;
@@ -703,6 +725,7 @@ struct MyHandler : public AHandler {
timeoutSecs);
}
}
+ AVMediaServiceUtils::get()->setServerTimeoutUs(mKeepAliveTimeoutUs);
i = mSessionID.find(";");
if (i >= 0) {
@@ -722,16 +745,19 @@ struct MyHandler : public AHandler {
// We are going to continue even if we were
// unable to poke a hole into the firewall...
- pokeAHole(
+ AVMediaServiceUtils::get()->pokeAHole(
+ this,
track->mRTPSocket,
track->mRTCPSocket,
- transport);
+ transport,
+ mSessionHost);
}
mRTPConn->addStream(
track->mRTPSocket, track->mRTCPSocket,
mSessionDesc, index,
- notify, track->mUsingInterleavedTCP);
+ notify, track->mUsingInterleavedTCP,
+ mConn->isIPV6());
mSetupTracksSuccessful = true;
} else {
@@ -774,6 +800,7 @@ struct MyHandler : public AHandler {
request.append(mSessionID);
request.append("\r\n");
+ AVMediaServiceUtils::get()->appendRange(&request);
request.append("\r\n");
sp<AMessage> reply = new AMessage('play', this);
@@ -922,6 +949,15 @@ struct MyHandler : public AHandler {
request.append("\r\n");
mConn->sendRequest(request.c_str(), reply);
+
+ // If the response of teardown hasn't been received in 3 seconds,
+ // post 'tear' message to avoid ANR.
+ if (!msg->findInt32("reconnect", &reconnect) || !reconnect) {
+ sp<AMessage> teardown = new AMessage('tear', this);
+ teardown->setInt32("result", -ECONNABORTED);
+ teardown->post(kTearDownTimeoutUs);
+ }
+
break;
}
@@ -1020,6 +1056,13 @@ struct MyHandler : public AHandler {
int32_t eos;
if (msg->findInt32("eos", &eos)) {
ALOGI("received BYE on track index %zu", trackIndex);
+ char value[PROPERTY_VALUE_MAX] = {0};
+ if (property_get("rtcp.bye.notify", value, "false")
+ && !strcasecmp(value, "true")) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatByeReceived);
+ msg->post();
+ }
if (!mAllTracksHaveTime && dataReceivedOnAllChannels()) {
ALOGI("No time established => fake existing data");
@@ -1386,6 +1429,13 @@ struct MyHandler : public AHandler {
break;
}
+ case 'canC':
+ {
+ ALOGV("cancel checking timeout");
+ mCheckGeneration++;
+ break;
+ }
+
default:
TRESPASS();
break;
@@ -1471,7 +1521,9 @@ struct MyHandler : public AHandler {
size_t trackIndex = 0;
while (trackIndex < mTracks.size()
- && !(val == mTracks.editItemAt(trackIndex).mURL)) {
+ && !(AVMediaServiceUtils::get()->parseTrackURL(
+ mTracks.editItemAt(trackIndex).mURL, val)
+ || val == mTracks.editItemAt(trackIndex).mURL)) {
++trackIndex;
}
CHECK_LT(trackIndex, mTracks.size());
@@ -1648,8 +1700,9 @@ private:
request.append(interleaveIndex + 1);
} else {
unsigned rtpPort;
- ARTPConnection::MakePortPair(
- &info->mRTPSocket, &info->mRTCPSocket, &rtpPort);
+ AVMediaServiceUtils::get()->makePortPair(
+ &info->mRTPSocket, &info->mRTCPSocket, &rtpPort,
+ mConn->isIPV6());
if (mUIDValid) {
HTTPBase::RegisterSocketUserTag(info->mRTPSocket, mUID,
@@ -1860,7 +1913,7 @@ private:
mLastMediaTimeUs = mediaTimeUs;
}
- if (mediaTimeUs < 0) {
+ if (mediaTimeUs < 0 && !mSeekable) {
ALOGV("dropping early accessUnit.");
return false;
}
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index fb28624..6f17747 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -33,6 +33,10 @@ LOCAL_SHARED_LIBRARIES:= \
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
LOCAL_CLANG := true
+ifeq ($(BOARD_NO_INTRA_MACROBLOCK_MODE_SUPPORT),true)
+LOCAL_CFLAGS += -DBOARD_NO_INTRA_MACROBLOCK_MODE_SUPPORT
+endif
+
LOCAL_MODULE:= libstagefright_wfd
LOCAL_MODULE_TAGS:= optional
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 471152e..9a2d08a 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -173,8 +173,10 @@ status_t Converter::initEncoder() {
mOutputFormat->setInt32("frame-rate", 30);
mOutputFormat->setInt32("i-frame-interval", 15); // Iframes every 15 secs
+#ifndef BOARD_NO_INTRA_MACROBLOCK_MODE_SUPPORT
// Configure encoder to use intra macroblock refresh mode
mOutputFormat->setInt32("intra-refresh-mode", OMX_VIDEO_IntraRefreshCyclic);
+#endif
int width, height, mbs;
if (!mOutputFormat->findInt32("width", &width)
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 7e017b9..ac25582 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -51,6 +51,16 @@ LOCAL_C_INCLUDES := \
frameworks/av/services/radio \
external/sonic
+ifneq ($(BOARD_NUMBER_OF_CAMERAS),)
+ LOCAL_CFLAGS += -DMAX_CAMERAS=$(BOARD_NUMBER_OF_CAMERAS)
+endif
+
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_LISTEN)),true)
+ LOCAL_SHARED_LIBRARIES += liblisten
+ LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio/audio-listen
+ LOCAL_CFLAGS += -DAUDIO_LISTEN_ENABLED
+endif
+
LOCAL_MODULE:= mediaserver
LOCAL_32_BIT_ONLY := true
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 8cc9508..f785c0d 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -40,6 +40,10 @@
#include "SoundTriggerHwService.h"
#include "RadioService.h"
+#ifdef AUDIO_LISTEN_ENABLED
+#include "ListenService.h"
+#endif
+
using namespace android;
int main(int argc __unused, char** argv)
@@ -139,6 +143,10 @@ int main(int argc __unused, char** argv)
MediaPlayerService::instantiate();
ResourceManagerService::instantiate();
CameraService::instantiate();
+#ifdef AUDIO_LISTEN_ENABLED
+ ALOGI("ListenService instantiated");
+ ListenService::instantiate();
+#endif
AudioPolicyService::instantiate();
SoundTriggerHwService::instantiate();
RadioService::instantiate();
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 07199e3..d5f8ff4 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -95,6 +95,7 @@ static const MtpEventCode kSupportedEventCodes[] = {
MTP_EVENT_STORE_ADDED,
MTP_EVENT_STORE_REMOVED,
MTP_EVENT_DEVICE_PROP_CHANGED,
+ MTP_EVENT_OBJECT_PROP_CHANGED,
};
MtpServer::MtpServer(int fd, MtpDatabase* database, bool ptp,
@@ -136,20 +137,9 @@ void MtpServer::removeStorage(MtpStorage* storage) {
}
MtpStorage* MtpServer::getStorage(MtpStorageID id) {
- if (id == 0)
- return mStorages[0];
- for (size_t i = 0; i < mStorages.size(); i++) {
- MtpStorage* storage = mStorages[i];
- if (storage->getStorageID() == id)
- return storage;
- }
- return NULL;
-}
+ Mutex::Autolock autoLock(mMutex);
-bool MtpServer::hasStorage(MtpStorageID id) {
- if (id == 0 || id == 0xFFFFFFFF)
- return mStorages.size() > 0;
- return (getStorage(id) != NULL);
+ return getStorageLocked(id);
}
void MtpServer::run() {
@@ -214,10 +204,11 @@ void MtpServer::run() {
mResponse.setTransactionID(transaction);
ALOGV("sending response %04X", mResponse.getResponseCode());
ret = mResponse.write(fd);
+ const int savedErrno = errno;
mResponse.dump();
if (ret < 0) {
ALOGE("request write returned %d, errno: %d", ret, errno);
- if (errno == ECANCELED) {
+ if (savedErrno == ECANCELED) {
// return to top of loop and wait for next command
continue;
}
@@ -253,6 +244,28 @@ void MtpServer::sendObjectRemoved(MtpObjectHandle handle) {
sendEvent(MTP_EVENT_OBJECT_REMOVED, handle);
}
+void MtpServer::sendObjectUpdated(MtpObjectHandle handle) {
+ ALOGV("sendObjectUpdated %d\n", handle);
+ sendEvent(MTP_EVENT_OBJECT_PROP_CHANGED, handle);
+}
+
+MtpStorage* MtpServer::getStorageLocked(MtpStorageID id) {
+ if (id == 0)
+ return mStorages.empty() ? NULL : mStorages[0];
+ for (size_t i = 0; i < mStorages.size(); i++) {
+ MtpStorage* storage = mStorages[i];
+ if (storage->getStorageID() == id)
+ return storage;
+ }
+ return NULL;
+}
+
+bool MtpServer::hasStorage(MtpStorageID id) {
+ if (id == 0 || id == 0xFFFFFFFF)
+ return mStorages.size() > 0;
+ return (getStorageLocked(id) != NULL);
+}
+
void MtpServer::sendStoreAdded(MtpStorageID id) {
ALOGV("sendStoreAdded %08X\n", id);
sendEvent(MTP_EVENT_STORE_ADDED, id);
@@ -536,7 +549,7 @@ MtpResponseCode MtpServer::doGetStorageInfo() {
return MTP_RESPONSE_INVALID_PARAMETER;
MtpStorageID id = mRequest.getParameter(1);
- MtpStorage* storage = getStorage(id);
+ MtpStorage* storage = getStorageLocked(id);
if (!storage)
return MTP_RESPONSE_INVALID_STORAGE_ID;
@@ -787,15 +800,19 @@ MtpResponseCode MtpServer::doGetObject() {
// then transfer the file
int ret = ioctl(mFD, MTP_SEND_FILE_WITH_HEADER, (unsigned long)&mfr);
- ALOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
- close(mfr.fd);
if (ret < 0) {
- if (errno == ECANCELED)
- return MTP_RESPONSE_TRANSACTION_CANCELLED;
- else
- return MTP_RESPONSE_GENERAL_ERROR;
+ if (errno == ECANCELED) {
+ result = MTP_RESPONSE_TRANSACTION_CANCELLED;
+ } else {
+ result = MTP_RESPONSE_GENERAL_ERROR;
+ }
+ } else {
+ result = MTP_RESPONSE_OK;
}
- return MTP_RESPONSE_OK;
+
+ ALOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
+ close(mfr.fd);
+ return result;
}
MtpResponseCode MtpServer::doGetThumb() {
@@ -864,14 +881,15 @@ MtpResponseCode MtpServer::doGetPartialObject(MtpOperationCode operation) {
// transfer the file
int ret = ioctl(mFD, MTP_SEND_FILE_WITH_HEADER, (unsigned long)&mfr);
ALOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
- close(mfr.fd);
+ result = MTP_RESPONSE_OK;
if (ret < 0) {
if (errno == ECANCELED)
- return MTP_RESPONSE_TRANSACTION_CANCELLED;
+ result = MTP_RESPONSE_TRANSACTION_CANCELLED;
else
- return MTP_RESPONSE_GENERAL_ERROR;
+ result = MTP_RESPONSE_GENERAL_ERROR;
}
- return MTP_RESPONSE_OK;
+ close(mfr.fd);
+ return result;
}
MtpResponseCode MtpServer::doSendObjectInfo() {
@@ -882,7 +900,7 @@ MtpResponseCode MtpServer::doSendObjectInfo() {
if (mRequest.getParameterCount() < 2)
return MTP_RESPONSE_INVALID_PARAMETER;
MtpStorageID storageID = mRequest.getParameter(1);
- MtpStorage* storage = getStorage(storageID);
+ MtpStorage* storage = getStorageLocked(storageID);
MtpObjectHandle parent = mRequest.getParameter(2);
if (!storage)
return MTP_RESPONSE_INVALID_STORAGE_ID;
@@ -985,6 +1003,7 @@ MtpResponseCode MtpServer::doSendObject() {
MtpResponseCode result = MTP_RESPONSE_OK;
mode_t mask;
int ret, initialData;
+ bool isCanceled = false;
if (mSendObjectHandle == kInvalidObjectHandle) {
ALOGE("Expected SendObjectInfo before SendObject");
@@ -1032,6 +1051,10 @@ MtpResponseCode MtpServer::doSendObject() {
ALOGV("receiving %s\n", (const char *)mSendObjectFilePath);
// transfer the file
ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+ if ((ret < 0) && (errno == ECANCELED)) {
+ isCanceled = true;
+ }
+
ALOGV("MTP_RECEIVE_FILE returned %d\n", ret);
}
}
@@ -1039,7 +1062,7 @@ MtpResponseCode MtpServer::doSendObject() {
if (ret < 0) {
unlink(mSendObjectFilePath);
- if (errno == ECANCELED)
+ if (isCanceled)
result = MTP_RESPONSE_TRANSACTION_CANCELLED;
else
result = MTP_RESPONSE_GENERAL_ERROR;
@@ -1208,6 +1231,7 @@ MtpResponseCode MtpServer::doSendPartialObject() {
length -= initialData;
}
+ bool isCanceled = false;
if (ret < 0) {
ALOGE("failed to write initial data");
} else {
@@ -1219,12 +1243,15 @@ MtpResponseCode MtpServer::doSendPartialObject() {
// transfer the file
ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+ if ((ret < 0) && (errno == ECANCELED)) {
+ isCanceled = true;
+ }
ALOGV("MTP_RECEIVE_FILE returned %d", ret);
}
}
if (ret < 0) {
mResponse.setParameter(1, 0);
- if (errno == ECANCELED)
+ if (isCanceled)
return MTP_RESPONSE_TRANSACTION_CANCELLED;
else
return MTP_RESPONSE_GENERAL_ERROR;
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index b3a11e0..a79e199 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -95,8 +95,6 @@ public:
virtual ~MtpServer();
MtpStorage* getStorage(MtpStorageID id);
- inline bool hasStorage() { return mStorages.size() > 0; }
- bool hasStorage(MtpStorageID id);
void addStorage(MtpStorage* storage);
void removeStorage(MtpStorage* storage);
@@ -105,8 +103,13 @@ public:
void sendObjectAdded(MtpObjectHandle handle);
void sendObjectRemoved(MtpObjectHandle handle);
void sendDevicePropertyChanged(MtpDeviceProperty property);
+ void sendObjectUpdated(MtpObjectHandle handle);
private:
+ MtpStorage* getStorageLocked(MtpStorageID id);
+ inline bool hasStorage() { return mStorages.size() > 0; }
+ bool hasStorage(MtpStorageID id);
+
void sendStoreAdded(MtpStorageID id);
void sendStoreRemoved(MtpStorageID id);
void sendEvent(MtpEventCode code, uint32_t param1);
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 0667bdd..ebf3601 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -19,8 +19,6 @@
#include <stdio.h>
#include <time.h>
-#include <../private/bionic_time.h> /* TODO: switch this code to icu4c! */
-
#include "MtpUtils.h"
namespace android {
@@ -32,38 +30,40 @@ representation, YYYY shall be replaced by the year, MM replaced by the month (01
DD replaced by the day (01-31), T is a constant character 'T' delimiting time from date,
hh is replaced by the hour (00-23), mm is replaced by the minute (00-59), and ss by the
second (00-59). The ".s" is optional, and represents tenths of a second.
+This is followed by a UTC offset given as "[+-]zzzz" or the literal "Z", meaning UTC.
*/
bool parseDateTime(const char* dateTime, time_t& outSeconds) {
int year, month, day, hour, minute, second;
- struct tm tm;
-
if (sscanf(dateTime, "%04d%02d%02dT%02d%02d%02d",
- &year, &month, &day, &hour, &minute, &second) != 6)
+ &year, &month, &day, &hour, &minute, &second) != 6)
return false;
- const char* tail = dateTime + 15;
+
// skip optional tenth of second
- if (tail[0] == '.' && tail[1])
- tail += 2;
- //FIXME - support +/-hhmm
- bool useUTC = (tail[0] == 'Z');
+ const char* tail = dateTime + 15;
+ if (tail[0] == '.' && tail[1]) tail += 2;
- // hack to compute timezone
- time_t dummy;
- localtime_r(&dummy, &tm);
+ // FIXME: "Z" means UTC, but non-"Z" doesn't mean local time.
+ // It might be that you're in Asia/Seoul on vacation and your Android
+ // device has noticed this via the network, but your camera was set to
+ // America/Los_Angeles once when you bought it and doesn't know where
+ // it is right now, so the camera says "20160106T081700-0800" but we
+ // just ignore the "-0800" and assume local time which is actually "+0900".
+ // I think to support this (without switching to Java or using icu4c)
+ // you'd want to always use timegm(3) and then manually add/subtract
+ // the UTC offset parsed from the string (taking care of wrapping).
+ // mktime(3) ignores the tm_gmtoff field, so you can't let it do the work.
+ bool useUTC = (tail[0] == 'Z');
+ struct tm tm = {};
tm.tm_sec = second;
tm.tm_min = minute;
tm.tm_hour = hour;
tm.tm_mday = day;
tm.tm_mon = month - 1; // mktime uses months in 0 - 11 range
tm.tm_year = year - 1900;
- tm.tm_wday = 0;
tm.tm_isdst = -1;
- if (useUTC)
- outSeconds = mktime(&tm);
- else
- outSeconds = mktime_tz(&tm, tm.tm_zone);
+ outSeconds = useUTC ? timegm(&tm) : mktime(&tm);
return true;
}
@@ -73,7 +73,7 @@ void formatDateTime(time_t seconds, char* buffer, int bufferLength) {
localtime_r(&seconds, &tm);
snprintf(buffer, bufferLength, "%04d%02d%02dT%02d%02d%02d",
- tm.tm_year + 1900,
+ tm.tm_year + 1900,
tm.tm_mon + 1, // localtime_r uses months in 0 - 11 range
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
}
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
index 7f9cd7a..355c231 100644
--- a/media/utils/BatteryNotifier.cpp
+++ b/media/utils/BatteryNotifier.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#define LOG_TAG "BatteryNotifier"
+//#define LOG_NDEBUG 0
+
#include "include/mediautils/BatteryNotifier.h"
#include <binder/IServiceManager.h>
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 9b4ba79..8ea26d3 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -1,3 +1,23 @@
+#
+# This file was modified by DTS, Inc. The portions of the
+# code that are surrounded by "DTS..." are copyrighted and
+# licensed separately, as follows:
+#
+# (C) 2015 DTS, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
@@ -60,6 +80,14 @@ LOCAL_STATIC_LIBRARIES := \
libcpustats \
libmedia_helper
+#QTI Resampler
+ifeq ($(call is-vendor-board-platform,QCOM), true)
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_EXTN_RESAMPLER)), true)
+LOCAL_CFLAGS += -DQTI_RESAMPLER
+endif
+endif
+#QTI Resampler
+
LOCAL_MODULE:= libaudioflinger
LOCAL_32_BIT_ONLY := true
@@ -78,6 +106,11 @@ LOCAL_SRC_FILES += \
LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
LOCAL_CFLAGS += -fvisibility=hidden
+ifeq ($(strip $(BOARD_USES_SRS_TRUEMEDIA)),true)
+LOCAL_SHARED_LIBRARIES += libsrsprocessing
+LOCAL_CFLAGS += -DSRS_PROCESSING
+LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio/audio-effects
+endif
include $(BUILD_SHARED_LIBRARY)
@@ -123,7 +156,26 @@ LOCAL_C_INCLUDES := \
LOCAL_SHARED_LIBRARIES := \
libcutils \
libdl \
- liblog
+ liblog \
+ libaudioutils
+
+#QTI Resampler
+ifeq ($(call is-vendor-board-platform,QCOM),true)
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_EXTN_RESAMPLER)),true)
+ifdef TARGET_2ND_ARCH
+LOCAL_SRC_FILES_$(TARGET_2ND_ARCH) += AudioResamplerQTI.cpp.arm
+LOCAL_C_INCLUDES_$(TARGET_2ND_ARCH) += $(TARGET_OUT_HEADERS)/mm-audio/audio-src
+LOCAL_SHARED_LIBRARIES_$(TARGET_2ND_ARCH) += libqct_resampler
+LOCAL_CFLAGS_$(TARGET_2ND_ARCH) += -DQTI_RESAMPLER
+else
+LOCAL_SRC_FILES += AudioResamplerQTI.cpp.arm
+LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/mm-audio/audio-src
+LOCAL_SHARED_LIBRARIES += libqct_resampler
+LOCAL_CFLAGS += -DQTI_RESAMPLER
+endif
+endif
+endif
+#QTI Resampler
LOCAL_MODULE := libaudioresampler
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index fab1ef5..23215dd 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -13,6 +13,25 @@
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
+**
+** This file was modified by DTS, Inc. The portions of the
+** code that are surrounded by "DTS..." are copyrighted and
+** licensed separately, as follows:
+**
+** (C) 2015 DTS, Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+**
*/
@@ -64,6 +83,9 @@
#include <media/nbaio/PipeReader.h>
#include <media/AudioParameter.h>
#include <private/android_filesystem_config.h>
+#ifdef SRS_PROCESSING
+#include "postpro_patch.h"
+#endif
// ----------------------------------------------------------------------------
@@ -131,6 +153,14 @@ const char *formatToString(audio_format_t format) {
case AUDIO_FORMAT_OPUS: return "opus";
case AUDIO_FORMAT_AC3: return "ac-3";
case AUDIO_FORMAT_E_AC3: return "e-ac-3";
+ case AUDIO_FORMAT_PCM_OFFLOAD:
+ switch (format) {
+ case AUDIO_FORMAT_PCM_16_BIT_OFFLOAD: return "pcm-16bit-offload";
+ case AUDIO_FORMAT_PCM_24_BIT_OFFLOAD: return "pcm-24bit-offload";
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -1043,6 +1073,13 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
Mutex::Autolock _l(mLock);
status_t final_result = NO_ERROR;
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_PARAMS_SET(keyValuePairs);
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+ thread->setPostPro();
+ }
+#endif
{
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_PARAMETER;
@@ -1053,9 +1090,24 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
}
mHardwareStatus = AUDIO_HW_IDLE;
}
- // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
+
AudioParameter param = AudioParameter(keyValuePairs);
- String8 value;
+ String8 value, key;
+ key = String8("SND_CARD_STATUS");
+ if (param.get(key, value) == NO_ERROR) {
+ ALOGV("Set keySoundCardStatus:%s", value.string());
+ if ((value.find("OFFLINE", 0) != -1) ) {
+ ALOGV("OFFLINE detected - call InvalidateTracks()");
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+ if( thread->getOutput()->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ){
+ thread->invalidateTracks(AUDIO_STREAM_MUSIC);
+ }
+ }
+ }
+ }
+
+ // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
if (param.get(String8(AUDIO_PARAMETER_KEY_BT_NREC), value) == NO_ERROR) {
bool btNrecIsOff = (value == AUDIO_PARAMETER_VALUE_OFF);
if (mBtNrecIsOff != btNrecIsOff) {
@@ -1122,6 +1174,9 @@ String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& k
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
String8 out_s8;
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_PARAMS_GET(keys, out_s8);
+#endif
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
char *s;
@@ -1347,6 +1402,12 @@ sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(int sessionId,
+void AudioFlinger::PlaybackThread::setPostPro()
+{
+ Mutex::Autolock _l(mLock);
+ if (mType == OFFLOAD)
+ broadcast_l();
+}
// ----------------------------------------------------------------------------
AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
@@ -1822,7 +1883,11 @@ sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_
|| !isValidPcmSinkFormat(config->format)
|| !isValidPcmSinkChannelMask(config->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
+ ALOGV("openOutput_l() created direct output: ID %d thread %p ", *output, thread);
+ //Check if this is DirectPCM, if so
+ if (flags & AUDIO_OUTPUT_FLAG_DIRECT_PCM) {
+ thread->mIsDirectPcm = true;
+ }
} else {
thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
@@ -2964,6 +3029,7 @@ void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_hand
bool firstRead = true;
#define TEE_SINK_READ 1024 // frames per I/O operation
void *buffer = malloc(TEE_SINK_READ * frameSize);
+ ALOG_ASSERT(buffer != NULL);
for (;;) {
size_t count = TEE_SINK_READ;
ssize_t actual = teeSource->read(buffer, count,
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 8a9a837..bb9d4e5 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -85,6 +85,9 @@ static const bool kUseFloat = true;
// Set to default copy buffer size in frames for input processing.
static const size_t kCopyBufferFrameCount = 256;
+#ifdef QTI_RESAMPLER
+#define QTI_RESAMPLER_MAX_SAMPLERATE 192000
+#endif
namespace android {
// ----------------------------------------------------------------------------
@@ -305,6 +308,11 @@ bool AudioMixer::setChannelMasks(int name,
void AudioMixer::track_t::unprepareForDownmix() {
ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
+ if (mPostDownmixReformatBufferProvider != NULL) {
+ delete mPostDownmixReformatBufferProvider;
+ mPostDownmixReformatBufferProvider = NULL;
+ reconfigureBufferProviders();
+ }
mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
if (downmixerBufferProvider != NULL) {
// this track had previously been configured with a downmixer, delete it
@@ -360,18 +368,9 @@ status_t AudioMixer::track_t::prepareForDownmix()
void AudioMixer::track_t::unprepareForReformat() {
ALOGV("AudioMixer::unprepareForReformat(%p)", this);
- bool requiresReconfigure = false;
if (mReformatBufferProvider != NULL) {
delete mReformatBufferProvider;
mReformatBufferProvider = NULL;
- requiresReconfigure = true;
- }
- if (mPostDownmixReformatBufferProvider != NULL) {
- delete mPostDownmixReformatBufferProvider;
- mPostDownmixReformatBufferProvider = NULL;
- requiresReconfigure = true;
- }
- if (requiresReconfigure) {
reconfigureBufferProviders();
}
}
@@ -779,6 +778,14 @@ bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSam
// but if none exists, it is the channel count (1 for mono).
const int resamplerChannelCount = downmixerBufferProvider != NULL
? mMixerChannelCount : channelCount;
+#ifdef QTI_RESAMPLER
+ if ((trackSampleRate <= QTI_RESAMPLER_MAX_SAMPLERATE) &&
+ (trackSampleRate > devSampleRate * 2) &&
+ ((devSampleRate == 48000)||(devSampleRate == 44100)) &&
+ (resamplerChannelCount <= 2)) {
+ quality = AudioResampler::QTI_QUALITY;
+ }
+#endif
ALOGVV("Creating resampler:"
" format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
@@ -1644,6 +1651,9 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
// Note: In case of later int16_t sink output,
// conversion and clamping is done by memcpy_to_i16_from_float().
} while (--outFrames);
+ //assign fout to out, when no more frames are available, so that 0s
+ //can be filled at the right place
+ out = (int32_t *)fout;
break;
case AUDIO_FORMAT_PCM_16_BIT:
if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 7165c6c..f0ae4ec 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -137,6 +137,7 @@ public:
case AUDIO_FORMAT_PCM_8_BIT:
case AUDIO_FORMAT_PCM_16_BIT:
case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_8_24_BIT:
case AUDIO_FORMAT_PCM_32_BIT:
case AUDIO_FORMAT_PCM_FLOAT:
return true;
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index e49b7b1..ab3294a 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -28,6 +28,10 @@
#include "AudioResamplerCubic.h"
#include "AudioResamplerDyn.h"
+#ifdef QTI_RESAMPLER
+#include "AudioResamplerQTI.h"
+#endif
+
#ifdef __arm__
#define ASM_ARM_RESAMP1 // enable asm optimisation for ResamplerOrder1
#endif
@@ -90,6 +94,9 @@ bool AudioResampler::qualityIsSupported(src_quality quality)
case DYN_LOW_QUALITY:
case DYN_MED_QUALITY:
case DYN_HIGH_QUALITY:
+#ifdef QTI_RESAMPLER
+ case QTI_QUALITY:
+#endif
return true;
default:
return false;
@@ -110,7 +117,11 @@ void AudioResampler::init_routine()
if (*endptr == '\0') {
defaultQuality = (src_quality) l;
ALOGD("forcing AudioResampler quality to %d", defaultQuality);
+#ifdef QTI_RESAMPLER
+ if (defaultQuality < DEFAULT_QUALITY || defaultQuality > QTI_QUALITY) {
+#else
if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) {
+#endif
defaultQuality = DEFAULT_QUALITY;
}
}
@@ -129,6 +140,9 @@ uint32_t AudioResampler::qualityMHz(src_quality quality)
case HIGH_QUALITY:
return 20;
case VERY_HIGH_QUALITY:
+#ifdef QTI_RESAMPLER
+ case QTI_QUALITY: //for QTI_QUALITY, currently assuming same as VHQ
+#endif
return 34;
case DYN_LOW_QUALITY:
return 4;
@@ -204,6 +218,11 @@ AudioResampler* AudioResampler::create(audio_format_t format, int inChannelCount
case DYN_HIGH_QUALITY:
quality = DYN_MED_QUALITY;
break;
+#ifdef QTI_RESAMPLER
+ case QTI_QUALITY:
+ quality = DYN_HIGH_QUALITY;
+ break;
+#endif
}
}
pthread_mutex_unlock(&mutex);
@@ -250,6 +269,12 @@ AudioResampler* AudioResampler::create(audio_format_t format, int inChannelCount
}
}
break;
+#ifdef QTI_RESAMPLER
+ case QTI_QUALITY:
+ ALOGV("Create QTI_QUALITY Resampler = %d",quality);
+ resampler = new AudioResamplerQTI(format, inChannelCount, sampleRate);
+ break;
+#endif
}
// initialize resampler
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index a8e3e6f..6669a85 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -47,6 +47,9 @@ public:
DYN_LOW_QUALITY=5,
DYN_MED_QUALITY=6,
DYN_HIGH_QUALITY=7,
+#ifdef QTI_RESAMPLER
+ QTI_QUALITY=8,
+#endif
};
static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
diff --git a/services/audioflinger/AudioResamplerQTI.cpp b/services/audioflinger/AudioResamplerQTI.cpp
new file mode 100644
index 0000000..0d57e09
--- /dev/null
+++ b/services/audioflinger/AudioResamplerQTI.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2014, The Linux Foundation. All rights reserved.
+ * Not a Contribution.
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AudioResamplerQTI.h"
+#include "QCT_Resampler.h"
+#include <sys/time.h>
+#include <audio_utils/primitives.h>
+
+namespace android {
+AudioResamplerQTI::AudioResamplerQTI(int format,
+ int inChannelCount, int32_t sampleRate)
+ :AudioResampler(inChannelCount, sampleRate, QTI_QUALITY),
+ mOutFrameCount(0), mTmpBuf(0), mResamplerOutBuf(0), mFrameIndex(0)
+{
+ stateSize = QCT_Resampler::MemAlloc(format, inChannelCount, sampleRate, sampleRate);
+ mState = new int16_t[stateSize];
+ mVolume[0] = mVolume[1] = 0;
+ mBuffer.frameCount = 0;
+}
+
+AudioResamplerQTI::~AudioResamplerQTI()
+{
+ if (mState) {
+ delete [] mState;
+ }
+ if (mTmpBuf) {
+ delete [] mTmpBuf;
+ }
+ if(mResamplerOutBuf) {
+ delete [] mResamplerOutBuf;
+ }
+}
+
+size_t AudioResamplerQTI::resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider)
+{
+ int16_t vl = mVolume[0];
+ int16_t vr = mVolume[1];
+ int32_t *pBuf;
+
+ int64_t tempL, tempR;
+ size_t inFrameRequest;
+ size_t inFrameCount = getNumInSample(outFrameCount);
+ size_t index = 0;
+ size_t frameIndex = mFrameIndex;
+ size_t out_count = outFrameCount * 2;
+ float *fout = reinterpret_cast<float *>(out);
+
+ if (mChannelCount == 1) {
+ inFrameRequest = inFrameCount;
+ } else {
+ inFrameRequest = inFrameCount * 2;
+ }
+
+ if (mOutFrameCount < outFrameCount) {
+ mOutFrameCount = outFrameCount;
+ if (mTmpBuf) {
+ delete [] mTmpBuf;
+ }
+ if(mResamplerOutBuf) {
+ delete [] mResamplerOutBuf;
+ }
+ mTmpBuf = new int32_t[inFrameRequest + 16];
+ mResamplerOutBuf = new int32_t[out_count];
+ }
+
+ if (mChannelCount == 1) {
+ // buffer is empty, fetch a new one
+ while (index < inFrameCount) {
+ if (!mBuffer.frameCount) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ frameIndex = 0;
+ }
+
+ if (mBuffer.raw == NULL) {
+ while (index < inFrameCount) {
+ mTmpBuf[index++] = 0;
+ }
+ QCT_Resampler::Resample90dB(mState, mTmpBuf, mResamplerOutBuf, inFrameCount, outFrameCount);
+ goto resample_exit;
+ }
+
+ mTmpBuf[index++] = clampq4_27_from_float(*((float *)mBuffer.raw + frameIndex++));
+
+ if (frameIndex >= mBuffer.frameCount) {
+ provider->releaseBuffer(&mBuffer);
+ }
+ }
+
+ QCT_Resampler::Resample90dB(mState, mTmpBuf, mResamplerOutBuf, inFrameCount, outFrameCount);
+ } else {
+ pBuf = &mTmpBuf[inFrameCount];
+ // buffer is empty, fetch a new one
+ while (index < inFrameCount) {
+ if (!mBuffer.frameCount) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ frameIndex = 0;
+ }
+ if (mBuffer.raw == NULL) {
+ while (index < inFrameCount) {
+ mTmpBuf[index] = 0;
+ pBuf[index++] = 0;
+ }
+ QCT_Resampler::Resample90dB(mState, mTmpBuf, mResamplerOutBuf, inFrameCount, outFrameCount);
+ goto resample_exit;
+ }
+
+ mTmpBuf[index] = clampq4_27_from_float(*((float *)mBuffer.raw + frameIndex++));
+ pBuf[index++] = clampq4_27_from_float(*((float *)mBuffer.raw + frameIndex++));
+ if (frameIndex >= mBuffer.frameCount * 2) {
+ provider->releaseBuffer(&mBuffer);
+ }
+ }
+
+ QCT_Resampler::Resample90dB(mState, mTmpBuf, mResamplerOutBuf, inFrameCount, outFrameCount);
+ }
+
+resample_exit:
+ for (int i = 0; i < out_count; i += 2) {
+ // Multiplying q4.27 data with u4.12 gain could result in 39 fractional bit data(27+12)
+ // To get back the 27 fractional bit format output data, do right shift by 12
+ tempL = (int64_t)mResamplerOutBuf[i] * vl;
+ tempR = (int64_t)mResamplerOutBuf[i+1] * vr;
+ fout[i] += float_from_q4_27((int32_t)(tempL>>12));
+ fout[i+1] += float_from_q4_27((int32_t)(tempR>>12));
+ }
+
+ mFrameIndex = frameIndex;
+ return index;
+}
+
+void AudioResamplerQTI::setSampleRate(int32_t inSampleRate)
+{
+ if (mInSampleRate != inSampleRate) {
+ mInSampleRate = inSampleRate;
+ init();
+ }
+}
+
+void AudioResamplerQTI::init()
+{
+ QCT_Resampler::Init(mState, mChannelCount, mInSampleRate, mSampleRate, 1/*32bit in*/);
+}
+
+size_t AudioResamplerQTI::getNumInSample(size_t outFrameCount)
+{
+ size_t size = (size_t)QCT_Resampler::GetNumInSamp(mState, outFrameCount);
+ return size;
+}
+
+void AudioResamplerQTI::reset()
+{
+ AudioResampler::reset();
+}
+
+}; // namespace android
diff --git a/services/audioflinger/AudioResamplerQTI.h b/services/audioflinger/AudioResamplerQTI.h
new file mode 100644
index 0000000..1cf93fc
--- /dev/null
+++ b/services/audioflinger/AudioResamplerQTI.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014, The Linux Foundation. All rights reserved.
+ * Not a Contribution.
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <cutils/log.h>
+
+#include "AudioResampler.h"
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+class AudioResamplerQTI : public AudioResampler {
+public:
+ AudioResamplerQTI(int format, int inChannelCount, int32_t sampleRate);
+ ~AudioResamplerQTI();
+ size_t resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+ void setSampleRate(int32_t inSampleRate);
+ size_t getNumInSample(size_t outFrameCount);
+
+ int16_t *mState;
+ int32_t *mTmpBuf;
+ int32_t *mResamplerOutBuf;
+ size_t mFrameIndex;
+ size_t stateSize;
+ size_t mOutFrameCount;
+
+ static const int kNumTmpBufSize = 1024;
+
+ void init();
+ void reset();
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
index a8be206..434a514 100644
--- a/services/audioflinger/BufferProviders.cpp
+++ b/services/audioflinger/BufferProviders.cpp
@@ -24,6 +24,7 @@
#include <media/EffectsFactoryApi.h>
#include <utils/Log.h>
+#include <media/stagefright/foundation/ADebug.h>
#include "Configuration.h"
#include "BufferProviders.h"
@@ -205,6 +206,7 @@ DownmixerBufferProvider::DownmixerBufferProvider(
const int downmixParamSize =
sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+ CHECK(param != NULL);
param->psize = sizeof(downmix_params_t);
const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
memcpy(param->data, &downmixParam, param->psize);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index f87b8f5..5505d2e 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -318,6 +318,7 @@ void AudioFlinger::EffectModule::reset_l()
status_t AudioFlinger::EffectModule::configure()
{
status_t status;
+ status_t cmdStatus = 0;
sp<ThreadBase> thread;
uint32_t size;
audio_channel_mask_t channelMask;
@@ -383,7 +384,6 @@ status_t AudioFlinger::EffectModule::configure()
ALOGV("configure() %p thread %p buffer %p framecount %d",
this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
- status_t cmdStatus;
size = sizeof(int);
status = (*mEffectInterface)->command(mEffectInterface,
EFFECT_CMD_SET_CONFIG,
@@ -434,7 +434,7 @@ status_t AudioFlinger::EffectModule::init()
if (mEffectInterface == NULL) {
return NO_INIT;
}
- status_t cmdStatus;
+ status_t cmdStatus = 0;
uint32_t size = sizeof(status_t);
status_t status = (*mEffectInterface)->command(mEffectInterface,
EFFECT_CMD_INIT,
@@ -476,7 +476,7 @@ status_t AudioFlinger::EffectModule::start_l()
if (mStatus != NO_ERROR) {
return mStatus;
}
- status_t cmdStatus;
+ status_t cmdStatus = 0;
uint32_t size = sizeof(status_t);
status_t status = (*mEffectInterface)->command(mEffectInterface,
EFFECT_CMD_ENABLE,
@@ -706,7 +706,7 @@ status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right,
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
(mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
- status_t cmdStatus;
+ status_t cmdStatus = 0;
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
@@ -741,7 +741,7 @@ status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device)
}
status_t status = NO_ERROR;
if ((mDescriptor.flags & EFFECT_FLAG_DEVICE_MASK) == EFFECT_FLAG_DEVICE_IND) {
- status_t cmdStatus;
+ status_t cmdStatus = 0;
uint32_t size = sizeof(status_t);
uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
EFFECT_CMD_SET_INPUT_DEVICE;
@@ -763,7 +763,7 @@ status_t AudioFlinger::EffectModule::setMode(audio_mode_t mode)
}
status_t status = NO_ERROR;
if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) {
- status_t cmdStatus;
+ status_t cmdStatus = 0;
uint32_t size = sizeof(status_t);
status = (*mEffectInterface)->command(mEffectInterface,
EFFECT_CMD_SET_AUDIO_MODE,
@@ -1142,13 +1142,15 @@ status_t AudioFlinger::EffectHandle::enable()
mEnabled = false;
} else {
if (thread != 0) {
- if (thread->type() == ThreadBase::OFFLOAD) {
+ if ((thread->type() == ThreadBase::OFFLOAD) ||
+ (thread->type() == ThreadBase::DIRECT && thread->mIsDirectPcm)) {
PlaybackThread *t = (PlaybackThread *)thread.get();
Mutex::Autolock _l(t->mLock);
t->broadcast_l();
}
if (!mEffect->isOffloadable()) {
- if (thread->type() == ThreadBase::OFFLOAD) {
+ if (thread->type() == ThreadBase::OFFLOAD ||
+ (thread->type() == ThreadBase::DIRECT && thread->mIsDirectPcm)) {
PlaybackThread *t = (PlaybackThread *)thread.get();
t->invalidateTracks(AUDIO_STREAM_MUSIC);
}
@@ -1185,7 +1187,8 @@ status_t AudioFlinger::EffectHandle::disable()
sp<ThreadBase> thread = mEffect->thread().promote();
if (thread != 0) {
thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
- if (thread->type() == ThreadBase::OFFLOAD) {
+ if ((thread->type() == ThreadBase::OFFLOAD) ||
+ (thread->type() == ThreadBase::DIRECT && thread->mIsDirectPcm)){
PlaybackThread *t = (PlaybackThread *)thread.get();
Mutex::Autolock _l(t->mLock);
t->broadcast_l();
@@ -1468,8 +1471,10 @@ void AudioFlinger::EffectChain::process_l()
(mSessionId == AUDIO_SESSION_OUTPUT_STAGE);
// never process effects when:
// - on an OFFLOAD thread
+ // - on DIRECT thread with directPcm flag enabled
// - no more tracks are on the session and the effect tail has been rendered
- bool doProcess = (thread->type() != ThreadBase::OFFLOAD);
+ bool doProcess = ((thread->type() != ThreadBase::OFFLOAD) &&
+ (!(thread->type() == ThreadBase::DIRECT && thread->mIsDirectPcm)));
if (!isGlobalSession) {
bool tracksOnSession = (trackCnt() != 0);
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index 1bba5f6..7c8a25f 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -25,6 +25,7 @@
#include <media/AudioBufferProvider.h>
#include <utils/Log.h>
#include <utils/Trace.h>
+#include "AudioFlinger.h"
#include "FastCapture.h"
namespace android {
@@ -105,7 +106,7 @@ void FastCapture::onStateChange()
mFormat = mInputSource->format();
mSampleRate = Format_sampleRate(mFormat);
unsigned channelCount = Format_channelCount(mFormat);
- ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
+ ALOG_ASSERT(channelCount >= 1 && channelCount <= 8);
}
dumpState->mSampleRate = mSampleRate;
eitherChanged = true;
diff --git a/services/audioflinger/FastCaptureDumpState.cpp b/services/audioflinger/FastCaptureDumpState.cpp
index 53eeba5..de4a6db 100644
--- a/services/audioflinger/FastCaptureDumpState.cpp
+++ b/services/audioflinger/FastCaptureDumpState.cpp
@@ -15,7 +15,7 @@
*/
#define LOG_TAG "FastCaptureDumpState"
-//define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include "Configuration.h"
#include <utils/Log.h>
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 45c68b5..2bc8066 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -334,6 +334,11 @@ void FastMixer::onWork()
if ((command & FastMixerState::MIX) && (mMixer != NULL) && mIsWarm) {
ALOG_ASSERT(mMixerBuffer != NULL);
+
+ // AudioMixer::mState.enabledTracks is undefined if mState.hook == process__validate,
+ // so we keep a side copy of enabledTracks
+ bool anyEnabledTracks = false;
+
// for each track, update volume and check for underrun
unsigned currentTrackMask = current->mTrackMask;
while (currentTrackMask != 0) {
@@ -392,11 +397,13 @@ void FastMixer::onWork()
underruns.mBitFields.mPartial++;
underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
mMixer->enable(name);
+ anyEnabledTracks = true;
}
} else {
underruns.mBitFields.mFull++;
underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
mMixer->enable(name);
+ anyEnabledTracks = true;
}
ftDump->mUnderruns = underruns;
ftDump->mFramesReady = framesReady;
@@ -407,9 +414,14 @@ void FastMixer::onWork()
pts = AudioBufferProvider::kInvalidPTS;
}
- // process() is CPU-bound
- mMixer->process(pts);
- mMixerBufferState = MIXED;
+ if (anyEnabledTracks) {
+ // process() is CPU-bound
+ mMixer->process(pts);
+ mMixerBufferState = MIXED;
+ } else if (mMixerBufferState != ZEROED) {
+ mMixerBufferState = UNDEFINED;
+ }
+
} else if (mMixerBufferState == MIXED) {
mMixerBufferState = UNDEFINED;
}
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index 2e68dad..031ff05 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -106,6 +106,14 @@ bool captureAudioOutputAllowed() {
return ok;
}
+bool accessFmRadioAllowed() {
+ static const String16 sAccessFmRadio("android.permission.ACCESS_FM_RADIO");
+ // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+ bool ok = PermissionCache::checkCallingPermission(sAccessFmRadio);
+ if (!ok) ALOGE("Request requires android.permission.ACCESS_FM_RADIO");
+ return ok;
+}
+
bool captureHotwordAllowed() {
static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
// IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index fba6dce..dffb114 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -21,6 +21,7 @@ namespace android {
extern pid_t getpid_cached;
bool recordingAllowed(const String16& opPackageName);
+bool accessFmRadioAllowed();
bool captureAudioOutputAllowed();
bool captureHotwordAllowed();
bool settingsAllowed();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 71fc498..e5e8bdb 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -13,6 +13,25 @@
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
+**
+** This file was modified by DTS, Inc. The portions of the
+** code that are surrounded by "DTS..." are copyrighted and
+** licensed separately, as follows:
+**
+** (C) 2015 DTS, Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+**
*/
@@ -72,6 +91,9 @@
#include <cpustats/ThreadCpuUsage.h>
#endif
+#ifdef SRS_PROCESSING
+#include "postpro_patch.h"
+#endif
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -544,6 +566,7 @@ AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio
mSystemReady(systemReady)
{
memset(&mPatch, 0, sizeof(struct audio_patch));
+ mIsDirectPcm = false;
}
AudioFlinger::ThreadBase::~ThreadBase()
@@ -1154,7 +1177,8 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
// Reject any effect on Direct output threads for now, since the format of
// mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
- if (mType == DIRECT) {
+ // Exception: allow effects for Direct PCM
+ if (mType == DIRECT && !mIsDirectPcm) {
ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s",
desc->name, mThreadName);
lStatus = BAD_VALUE;
@@ -1163,7 +1187,7 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
// Reject any effect on mixer or duplicating multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
- if ((mType == MIXER || mType == DUPLICATING) && mChannelCount != FCC_2) {
+ if ((mType == MIXER || mType == DUPLICATING) && mChannelCount > FCC_2) {
ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) %s threads",
desc->name, mChannelCount, mType == MIXER ? "MIXER" : "DUPLICATING");
lStatus = BAD_VALUE;
@@ -1171,12 +1195,17 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
}
// Allow global effects only on offloaded and mixer threads
+ // Exception: allow effects for Direct PCM
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
switch (mType) {
case MIXER:
case OFFLOAD:
break;
case DIRECT:
+ if (mIsDirectPcm) {
+ // Allow effects when direct PCM enabled on Direct output
+ break;
+ }
case DUPLICATING:
case RECORD:
default:
@@ -1229,7 +1258,13 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
if (lStatus != NO_ERROR) {
goto Exit;
}
- effect->setOffloaded(mType == OFFLOAD, mId);
+
+ bool setVal = false;
+ if (mType == OFFLOAD || (mType == DIRECT && mIsDirectPcm)) {
+ setVal = true;
+ }
+
+ effect->setOffloaded(setVal, mId);
lStatus = chain->addEffect_l(effect);
if (lStatus != NO_ERROR) {
@@ -1313,7 +1348,13 @@ status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
return BAD_VALUE;
}
- effect->setOffloaded(mType == OFFLOAD, mId);
+ bool setval = false;
+
+ if ((mType == OFFLOAD) || (mType == DIRECT && mIsDirectPcm)) {
+ setval = true;
+ }
+
+ effect->setOffloaded(setval, mId);
status_t status = chain->addEffect_l(effect);
if (status != NO_ERROR) {
@@ -1589,6 +1630,7 @@ void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>&
dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer);
dprintf(fd, " Effect buffer: %p\n", mEffectBuffer);
dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask);
+ dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
AudioStreamOut *output = mOutput;
audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
String8 flagsAsString = outputFlagsToString(flags);
@@ -2166,6 +2208,7 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l()
kUseFastMixer == FastMixer_Dynamic)) {
size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000;
size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000;
+
// round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
maxNormalFrameCount = maxNormalFrameCount & ~15;
@@ -2181,19 +2224,6 @@ void AudioFlinger::PlaybackThread::readOutputParameters_l()
} else {
multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
}
- } else {
- // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL
- // SRC (it would be unusual for the normal sink buffer size to not be a multiple of fast
- // track, but we sometimes have to do this to satisfy the maximum frame count
- // constraint)
- // FIXME this rounding up should not be done if no HAL SRC
- uint32_t truncMult = (uint32_t) multiplier;
- if ((truncMult & 1)) {
- if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) {
- ++truncMult;
- }
- }
- multiplier = (double) truncMult;
}
}
mNormalFrameCount = multiplier * mFrameCount;
@@ -2513,7 +2543,8 @@ The derived values that are cached:
- mSinkBufferSize from frame count * frame size
- mActiveSleepTimeUs from activeSleepTimeUs()
- mIdleSleepTimeUs from idleSleepTimeUs()
- - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only)
+ - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only) or forced to at least
+ kDefaultStandbyTimeInNsecs when connected to an A2DP device.
- maxPeriod from frame count and sample rate (MIXER only)
The parameters that affect these derived values are:
@@ -2532,13 +2563,21 @@ void AudioFlinger::PlaybackThread::cacheParameters_l()
mSinkBufferSize = mNormalFrameCount * mFrameSize;
mActiveSleepTimeUs = activeSleepTimeUs();
mIdleSleepTimeUs = idleSleepTimeUs();
+
+ // make sure standby delay is not too short when connected to an A2DP sink to avoid
+ // truncating audio when going to standby.
+ mStandbyDelayNs = AudioFlinger::mStandbyTimeInNsecs;
+ if ((mOutDevice & AUDIO_DEVICE_OUT_ALL_A2DP) != 0) {
+ if (mStandbyDelayNs < kDefaultStandbyTimeInNsecs) {
+ mStandbyDelayNs = kDefaultStandbyTimeInNsecs;
+ }
+ }
}
-void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+void AudioFlinger::PlaybackThread::invalidateTracks_l(audio_stream_type_t streamType)
{
ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
this, streamType, mTracks.size());
- Mutex::Autolock _l(mLock);
size_t size = mTracks.size();
for (size_t i = 0; i < size; i++) {
@@ -2549,6 +2588,12 @@ void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamTy
}
}
+void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+{
+ Mutex::Autolock _l(mLock);
+ invalidateTracks_l(streamType);
+}
+
status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
{
int session = chain->sessionId();
@@ -2720,6 +2765,19 @@ bool AudioFlinger::PlaybackThread::threadLoop()
const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
acquireWakeLock();
+#ifdef SRS_PROCESSING
+ String8 bt_param = String8("bluetooth_enabled=0");
+ POSTPRO_PATCH_PARAMS_SET(bt_param);
+ if (mType == MIXER) {
+ POSTPRO_PATCH_OUTPROC_PLAY_INIT(this, myName);
+ } else if (mType == OFFLOAD) {
+ POSTPRO_PATCH_OUTPROC_DIRECT_INIT(this, myName);
+ POSTPRO_PATCH_OUTPROC_PLAY_ROUTE_BY_VALUE(this, mOutDevice);
+ } else if (mType == DIRECT) {
+ POSTPRO_PATCH_OUTPROC_DIRECT_INIT(this, myName);
+ POSTPRO_PATCH_OUTPROC_PLAY_ROUTE_BY_VALUE(this, mOutDevice);
+ }
+#endif
// mNBLogWriter->log can only be called while thread mutex mLock is held.
// So if you need to log when mutex is unlocked, set logString to a non-NULL string,
@@ -2895,7 +2953,8 @@ bool AudioFlinger::PlaybackThread::threadLoop()
}
// only process effects if we're going to write
- if (mSleepTimeUs == 0 && mType != OFFLOAD) {
+ if (mSleepTimeUs == 0 && mType != OFFLOAD &&
+ !(mType == DIRECT && mIsDirectPcm)) {
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
}
@@ -2905,12 +2964,18 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// was read from audio track: process only updates effect state
// and thus does have to be synchronized with audio writes but may have
// to be called while waiting for async write callback
- if (mType == OFFLOAD) {
+ if ((mType == OFFLOAD) || (mType == DIRECT && mIsDirectPcm)) {
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
}
}
-
+#ifdef SRS_PROCESSING
+ // Offload thread
+ if (mType == OFFLOAD) {
+ char buffer[2];
+ POSTPRO_PATCH_OUTPROC_DIRECT_SAMPLES(this, AUDIO_FORMAT_PCM_16_BIT, (int16_t *) buffer, 2, 48000, 2);
+ }
+#endif
// Only if the Effects buffer is enabled and there is data in the
// Effects buffer (buffer valid), we need to
// copy into the sink buffer.
@@ -2928,6 +2993,11 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// mSleepTimeUs == 0 means we must write to audio hardware
if (mSleepTimeUs == 0) {
ssize_t ret = 0;
+#ifdef SRS_PROCESSING
+ if (mType == MIXER && mMixerStatus == MIXER_TRACKS_READY) {
+ POSTPRO_PATCH_OUTPROC_PLAY_SAMPLES(this, mFormat, mSinkBuffer, mSinkBufferSize, mSampleRate, mChannelCount);
+ }
+#endif
if (mBytesRemaining) {
ret = threadLoop_write();
if (ret < 0) {
@@ -2971,8 +3041,9 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// the app won't fill fast enough to handle the sudden draw).
const int32_t deltaMs = delta / 1000000;
- const int32_t throttleMs = mHalfBufferMs - deltaMs;
- if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
+ const int32_t halfBufferMs = mHalfBufferMs / (mEffectBufferValid ? 4 : 1);
+ const int32_t throttleMs = halfBufferMs - deltaMs;
+ if ((signed)halfBufferMs >= throttleMs && throttleMs > 0) {
usleep(throttleMs * 1000);
// notify of throttle start on verbose log
ALOGV_IF(mThreadThrottleEndMs == mThreadThrottleTimeMs,
@@ -3023,7 +3094,15 @@ bool AudioFlinger::PlaybackThread::threadLoop()
threadLoop_standby();
mStandby = true;
}
-
+#ifdef SRS_PROCESSING
+ if (mType == MIXER) {
+ POSTPRO_PATCH_OUTPROC_PLAY_EXIT(this, myName);
+ } else if (mType == OFFLOAD) {
+ POSTPRO_PATCH_OUTPROC_DIRECT_EXIT(this, myName);
+ } else if (mType == DIRECT) {
+ POSTPRO_PATCH_OUTPROC_DIRECT_EXIT(this, myName);
+ }
+#endif
releaseWakeLock();
mWakeLockUids.clear();
mActiveTracksGeneration++;
@@ -3117,6 +3196,10 @@ status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_pat
type |= patch->sinks[i].ext.device.type;
}
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_OUTPROC_PLAY_ROUTE_BY_VALUE(this, type);
+#endif
+
#ifdef ADD_BATTERY_DATA
// when changing the audio output device, call addBatteryData to notify
// the change
@@ -3300,11 +3383,15 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud
}
if (initFastMixer) {
audio_format_t fastMixerFormat;
+#ifdef LEGACY_ALSA_AUDIO
+ fastMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
+#else
if (mMixerBufferEnabled && mEffectBufferEnabled) {
fastMixerFormat = AUDIO_FORMAT_PCM_FLOAT;
} else {
fastMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
}
+#endif
if (mFormat != fastMixerFormat) {
// change our Sink format to accept our intermediate precision
mFormat = fastMixerFormat;
@@ -4248,6 +4335,7 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa
status_t& status)
{
bool reconfig = false;
+ bool a2dpDeviceChanged = false;
status = NO_ERROR;
@@ -4268,6 +4356,9 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa
AudioParameter param = AudioParameter(keyValuePair);
int value;
+#ifdef SRS_PROCESSING
+ POSTPRO_PATCH_OUTPROC_PLAY_ROUTE(this, param, value);
+#endif
if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
reconfig = true;
}
@@ -4324,6 +4415,8 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa
// forward device change to effects that have requested to be
// aware of attached audio device.
if (value != AUDIO_DEVICE_NONE) {
+ a2dpDeviceChanged =
+ (mOutDevice & AUDIO_DEVICE_OUT_ALL_A2DP) != (value & AUDIO_DEVICE_OUT_ALL_A2DP);
mOutDevice = value;
for (size_t i = 0; i < mEffectChains.size(); i++) {
mEffectChains[i]->setDevice_l(mOutDevice);
@@ -4367,7 +4460,7 @@ bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePa
sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
}
- return reconfig;
+ return reconfig || a2dpDeviceChanged;
}
@@ -4382,8 +4475,12 @@ void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& ar
dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
// Make a non-atomic copy of fast mixer dump state so it won't change underneath us
- const FastMixerDumpState copy(mFastMixerDumpState);
- copy.dump(fd);
+ // while we are dumping it. It may be inconsistent, but it won't mutate!
+ // This is a large object so we place it on the heap.
+ // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+ const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+ copy->dump(fd);
+ delete copy;
#ifdef STATE_QUEUE_DUMP
// Similar for state queue
@@ -4775,6 +4872,10 @@ bool AudioFlinger::DirectOutputThread::shouldStandby_l()
bool trackPaused = false;
bool trackStopped = false;
+ if ((mType == DIRECT) && audio_is_linear_pcm(mFormat) && !usesHwAvSync()) {
+ return !mStandby;
+ }
+
// do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
// after a timeout and we will enter standby then.
if (mTracks.size() > 0) {
@@ -4803,15 +4904,19 @@ bool AudioFlinger::DirectOutputThread::checkForNewParameter_l(const String8& key
status_t& status)
{
bool reconfig = false;
+ bool a2dpDeviceChanged = false;
status = NO_ERROR;
AudioParameter param = AudioParameter(keyValuePair);
int value;
+
if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
// forward device change to effects that have requested to be
// aware of attached audio device.
if (value != AUDIO_DEVICE_NONE) {
+ a2dpDeviceChanged =
+ (mOutDevice & AUDIO_DEVICE_OUT_ALL_A2DP) != (value & AUDIO_DEVICE_OUT_ALL_A2DP);
mOutDevice = value;
for (size_t i = 0; i < mEffectChains.size(); i++) {
mEffectChains[i]->setDevice_l(mOutDevice);
@@ -4844,7 +4949,7 @@ bool AudioFlinger::DirectOutputThread::checkForNewParameter_l(const String8& key
}
}
- return reconfig;
+ return reconfig || a2dpDeviceChanged;
}
uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const
@@ -4891,6 +4996,8 @@ void AudioFlinger::DirectOutputThread::cacheParameters_l()
mStandbyDelayNs = 0;
} else if ((mType == OFFLOAD) && !audio_is_linear_pcm(mFormat)) {
mStandbyDelayNs = kOffloadStandbyDelayNs;
+ } else if (mType == DIRECT && mIsDirectPcm) {
+ mStandbyDelayNs = kOffloadStandbyDelayNs;
} else {
mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
}
@@ -5062,15 +5169,9 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr
if (track->isInvalid()) {
ALOGW("An invalidated track shouldn't be in active list");
tracksToRemove->add(track);
- continue;
- }
-
- if (track->mState == TrackBase::IDLE) {
+ } else if (track->mState == TrackBase::IDLE) {
ALOGW("An idle track shouldn't be in active list");
- continue;
- }
-
- if (track->isPausing()) {
+ } else if (track->isPausing()) {
track->setPaused();
if (last) {
if (mHwSupportsPause && !mHwPaused) {
@@ -5093,7 +5194,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr
if (last) {
mFlushPending = true;
}
- } else if (track->isResumePending()){
+ } else if (track->isResumePending()) {
track->resumeAck();
if (last) {
if (mPausedBytesRemaining) {
@@ -5269,6 +5370,13 @@ void AudioFlinger::OffloadThread::flushHw_l()
}
}
+void AudioFlinger::OffloadThread::invalidateTracks(audio_stream_type_t streamType)
+{
+ Mutex::Autolock _l(mLock);
+ mFlushPending = true;
+ PlaybackThread::invalidateTracks_l(streamType);
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
@@ -5295,6 +5403,8 @@ void AudioFlinger::DuplicatingThread::threadLoop_mix()
} else {
if (mMixerBufferValid) {
memset(mMixerBuffer, 0, mMixerBufferSize);
+ } else if (mEffectBufferValid) {
+ memset(mEffectBuffer, 0, mEffectBufferSize);
} else {
memset(mSinkBuffer, 0, mSinkBufferSize);
}
@@ -5316,7 +5426,11 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
} else if (mBytesWritten != 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
writeFrames = mNormalFrameCount;
- memset(mSinkBuffer, 0, mSinkBufferSize);
+ if (mMixerBufferValid) {
+ memset(mMixerBuffer, 0, mMixerBufferSize);
+ } else {
+ memset(mSinkBuffer, 0, mSinkBufferSize);
+ }
} else {
// flush remaining overflow buffers in output tracks
writeFrames = 0;
@@ -6356,9 +6470,13 @@ void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& a
dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
- // Make a non-atomic copy of fast capture dump state so it won't change underneath us
- const FastCaptureDumpState copy(mFastCaptureDumpState);
- copy.dump(fd);
+ // Make a non-atomic copy of fast capture dump state so it won't change underneath us
+ // while we are dumping it. It may be inconsistent, but it won't mutate!
+ // This is a large object so we place it on the heap.
+ // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+ const FastCaptureDumpState *copy = new FastCaptureDumpState(mFastCaptureDumpState);
+ copy->dump(fd);
+ delete copy;
}
void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -6556,7 +6674,11 @@ size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
break;
}
// format convert to destination buffer
+#ifdef LEGACY_ALSA_AUDIO
+ convert(dst, buffer.raw, buffer.frameCount);
+#else
convertNoResampler(dst, buffer.raw, buffer.frameCount);
+#endif
dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
i -= buffer.frameCount;
@@ -6576,7 +6698,11 @@ size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
memset(mBuf, 0, frames * mBufFrameSize);
frames = mResampler->resample((int32_t*)mBuf, frames, provider);
// format convert to destination buffer
+#ifdef LEGACY_ALSA_AUDIO
+ convert(dst, mBuf, frames);
+#else
convertResampler(dst, mBuf, frames);
+#endif
}
return frames;
}
@@ -6677,6 +6803,56 @@ status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters(
return NO_ERROR;
}
+#ifdef LEGACY_ALSA_AUDIO
+void AudioFlinger::RecordThread::RecordBufferConverter::convert(
+ void *dst, /*const*/ void *src, size_t frames)
+{
+ // check if a memcpy will do
+ if (mResampler == NULL
+ && mSrcChannelCount == mDstChannelCount
+ && mSrcFormat == mDstFormat) {
+ memcpy(dst, src,
+ frames * mDstChannelCount * audio_bytes_per_sample(mDstFormat));
+ return;
+ }
+ // reallocate buffer if needed
+ if (mBufFrameSize != 0 && mBufFrames < frames) {
+ free(mBuf);
+ mBufFrames = frames;
+ (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+ }
+ // do processing
+ if (mResampler != NULL) {
+ // src channel count is always >= 2.
+ void *dstBuf = mBuf != NULL ? mBuf : dst;
+ // ditherAndClamp() works as long as all buffers returned by
+ // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
+ if (mDstChannelCount == 1) {
+ // the resampler always outputs stereo samples.
+ // FIXME: this rewrites back into src
+ ditherAndClamp((int32_t *)src, (const int32_t *)src, frames);
+ downmix_to_mono_i16_from_stereo_i16((int16_t *)dstBuf,
+ (const int16_t *)src, frames);
+ } else {
+ ditherAndClamp((int32_t *)dstBuf, (const int32_t *)src, frames);
+ }
+ } else if (mSrcChannelCount != mDstChannelCount) {
+ void *dstBuf = mBuf != NULL ? mBuf : dst;
+ if (mSrcChannelCount == 1) {
+ upmix_to_stereo_i16_from_mono_i16((int16_t *)dstBuf, (const int16_t *)src,
+ frames);
+ } else {
+ downmix_to_mono_i16_from_stereo_i16((int16_t *)dstBuf,
+ (const int16_t *)src, frames);
+ }
+ }
+ if (mSrcFormat != mDstFormat) {
+ void *srcBuf = mBuf != NULL ? mBuf : src;
+ memcpy_by_audio_format(dst, mDstFormat, srcBuf, mSrcFormat,
+ frames * mDstChannelCount);
+ }
+}
+#else
void AudioFlinger::RecordThread::RecordBufferConverter::convertNoResampler(
void *dst, const void *src, size_t frames)
{
@@ -6750,6 +6926,7 @@ void AudioFlinger::RecordThread::RecordBufferConverter::convertResampler(
memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
frames * mDstChannelCount);
}
+#endif
bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
status_t& status)
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 46ac300..8fab1e4 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -13,6 +13,24 @@
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
+**
+** This file was modified by DTS, Inc. The portions of the
+** code that are surrounded by "DTS..." are copyrighted and
+** licensed separately, as follows:
+**
+** (C) 2015 DTS, Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
*/
#ifndef INCLUDING_FROM_AUDIOFLINGER_H
@@ -457,6 +475,7 @@ protected:
static const size_t kLogSize = 4 * 1024;
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
+ bool mIsDirectPcm; // flag to indicate unique Direct thread
};
// --- PlaybackThread ---
@@ -536,7 +555,7 @@ public:
void setMasterVolume(float value);
void setMasterMute(bool muted);
-
+ void setPostPro();
void setStreamVolume(audio_stream_type_t stream, float value);
void setStreamMute(audio_stream_type_t stream, bool muted);
@@ -598,7 +617,8 @@ public:
virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
// called with AudioFlinger lock held
- void invalidateTracks(audio_stream_type_t streamType);
+ void invalidateTracks_l(audio_stream_type_t streamType);
+ virtual void invalidateTracks(audio_stream_type_t streamType);
virtual size_t frameCount() const { return mNormalFrameCount; }
@@ -981,6 +1001,7 @@ protected:
virtual bool waitingAsyncCallback();
virtual bool waitingAsyncCallback_l();
+ virtual void invalidateTracks(audio_stream_type_t streamType);
private:
size_t mPausedWriteLength; // length in bytes of write interrupted by pause
@@ -1159,11 +1180,16 @@ public:
}
private:
+#ifdef LEGACY_ALSA_AUDIO
+ // internal convert function for format and channel mask.
+ void convert(void *dst, /*const*/ void *src, size_t frames);
+#else
// format conversion when not using resampler
void convertNoResampler(void *dst, const void *src, size_t frames);
// format conversion when using resampler; modifies src in-place
void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
+#endif
// user provided information
audio_channel_mask_t mSrcChannelMask;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 0e24b52..f3b5375 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -24,6 +24,7 @@
#include <math.h>
#include <sys/syscall.h>
#include <utils/Log.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <private/media/AudioTrackShared.h>
@@ -706,10 +707,11 @@ status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t ev
mState = state;
}
}
- // track was already in the active list, not a problem
- if (status == ALREADY_EXISTS) {
- status = NO_ERROR;
- } else {
+ // If track was already in the active list, not a problem unless
+ // track is fast and sharedBuffer is used and frameReady has already become 0.
+ // In such case we need to call obtainbuffer() to refresh the framesReady value.
+ if ((status != ALREADY_EXISTS) ||
+ (isFastTrack() && (mSharedBuffer != 0) && (framesReady() == 0))) {
// Acknowledge any pending flush(), so that subsequent new data isn't discarded.
// It is usually unsafe to access the server proxy from a binder thread.
// But in this case we know the mixer thread (whether normal mixer or fast mixer)
@@ -720,6 +722,9 @@ status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t ev
buffer.mFrameCount = 1;
(void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
}
+
+ if (status == ALREADY_EXISTS)
+ status = NO_ERROR;
} else {
status = BAD_VALUE;
}
@@ -1775,6 +1780,7 @@ bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frame
if (mBufferQueue.size() < kMaxOverFlowBuffers) {
pInBuffer = new Buffer;
pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
+ CHECK(pInBuffer->mBuffer != NULL);
pInBuffer->frameCount = inBuffer.frameCount;
pInBuffer->raw = pInBuffer->mBuffer;
memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 5b38e1c..69fc0e8 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -40,6 +40,10 @@ LOCAL_SHARED_LIBRARIES += \
libaudiopolicymanager
endif
+ifeq ($(BOARD_HAVE_PRE_KITKAT_AUDIO_POLICY_BLOB),true)
+ LOCAL_CFLAGS += -DHAVE_PRE_KITKAT_AUDIO_POLICY_BLOB
+endif
+
LOCAL_STATIC_LIBRARIES := \
libmedia_helper \
libaudiopolicycomponents
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index c1e7bc0..93e6266 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -331,6 +331,9 @@ public:
virtual audio_unique_id_t newAudioUniqueId() = 0;
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
+
+ virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& streamInfo, bool added) = 0;
+
};
extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 8728ff3..5ef9b38 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -31,6 +31,25 @@ LOCAL_C_INCLUDES += \
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH)/include
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_FLAC_OFFLOAD)),true)
+LOCAL_CFLAGS += -DFLAC_OFFLOAD_ENABLED
+endif
+ifneq ($(strip $(AUDIO_FEATURE_ENABLED_PROXY_DEVICE)),false)
+LOCAL_CFLAGS += -DAUDIO_EXTN_AFE_PROXY_ENABLED
+endif
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_WMA_OFFLOAD)),true)
+LOCAL_CFLAGS += -DWMA_OFFLOAD_ENABLED
+endif
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_ALAC_OFFLOAD)),true)
+LOCAL_CFLAGS += -DALAC_OFFLOAD_ENABLED
+endif
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_APE_OFFLOAD)),true)
+LOCAL_CFLAGS += -DAPE_OFFLOAD_ENABLED
+endif
+ifeq ($(strip $(AUDIO_FEATURE_ENABLED_AAC_ADTS_OFFLOAD)),true)
+LOCAL_CFLAGS += -DAAC_ADTS_OFFLOAD_ENABLED
+endif
+
LOCAL_MODULE := libaudiopolicycomponents
include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 50f622d..e1c2999 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -72,6 +72,7 @@ public:
sp<AudioPort> mPort;
audio_devices_t mDevice; // current device this output is routed to
audio_patch_handle_t mPatchHandle;
+ audio_io_handle_t mIoHandle; // output handle
uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
nsecs_t mStopTime[AUDIO_STREAM_CNT];
float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume in dB
@@ -116,7 +117,6 @@ public:
virtual void toAudioPort(struct audio_port *port) const;
const sp<IOProfile> mProfile; // I/O profile this output derives from
- audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
audio_output_flags_t mFlags; //
AudioMix *mPolicyMix; // non NULL when used by a dynamic policy
diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
index 78d2cdf..6f80435 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
@@ -74,6 +74,9 @@ const StringToEnum sDeviceTypeToEnumTable[] = {
STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
STRING_TO_ENUM(AUDIO_DEVICE_OUT_IP),
+#ifdef AUDIO_EXTN_AFE_PROXY_ENABLED
+ STRING_TO_ENUM(AUDIO_DEVICE_OUT_PROXY),
+#endif
STRING_TO_ENUM(AUDIO_DEVICE_IN_AMBIENT),
STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
@@ -96,6 +99,9 @@ const StringToEnum sDeviceTypeToEnumTable[] = {
STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
STRING_TO_ENUM(AUDIO_DEVICE_IN_IP),
+#ifdef LEGACY_ALSA_AUDIO
+ STRING_TO_ENUM(AUDIO_DEVICE_IN_COMMUNICATION),
+#endif
};
const StringToEnum sDeviceNameToEnumTable[] = {
@@ -153,6 +159,7 @@ const StringToEnum sDeviceNameToEnumTable[] = {
const StringToEnum sOutputFlagNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+ STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT_PCM),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
@@ -162,6 +169,7 @@ const StringToEnum sOutputFlagNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_TTS),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_RAW),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
+ STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_VOIP_RX),
};
const StringToEnum sInputFlagNameToEnumTable[] = {
@@ -198,6 +206,33 @@ const StringToEnum sFormatNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
STRING_TO_ENUM(AUDIO_FORMAT_DTS),
STRING_TO_ENUM(AUDIO_FORMAT_DTS_HD),
+#ifdef FLAC_OFFLOAD_ENABLED
+ STRING_TO_ENUM(AUDIO_FORMAT_FLAC),
+#endif
+#ifdef WMA_OFFLOAD_ENABLED
+ STRING_TO_ENUM(AUDIO_FORMAT_WMA),
+ STRING_TO_ENUM(AUDIO_FORMAT_WMA_PRO),
+#endif
+ STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT_OFFLOAD),
+ STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_OFFLOAD),
+#ifdef ALAC_OFFLOAD_ENABLED
+ STRING_TO_ENUM(AUDIO_FORMAT_ALAC),
+#endif
+#ifdef APE_OFFLOAD_ENABLED
+ STRING_TO_ENUM(AUDIO_FORMAT_APE),
+#endif
+#ifdef AAC_ADTS_OFFLOAD_ENABLED
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_MAIN),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_LC),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_SSR),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_LTP),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V1),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_SCALABLE),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_ERLC),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_LD),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V2),
+ STRING_TO_ENUM(AUDIO_FORMAT_AAC_ADTS_ELD),
+#endif
};
const StringToEnum sOutChannelsNameToEnumTable[] = {
@@ -206,12 +241,22 @@ const StringToEnum sOutChannelsNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_CHANNEL_OUT_QUAD),
STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_2POINT1),
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_SURROUND),
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_PENTA),
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_6POINT1),
};
const StringToEnum sInChannelsNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_5POINT1),
+#ifdef LEGACY_ALSA_AUDIO
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
+ STRING_TO_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
+#endif
};
const StringToEnum sIndexChannelsNameToEnumTable[] = {
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index c9783a1..396541b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -21,6 +21,7 @@
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
#include <utils/Errors.h>
+#include <utils/Thread.h>
namespace android {
@@ -66,6 +67,8 @@ private:
* Maximum memory allocated to audio effects in KB
*/
static const uint32_t MAX_EFFECTS_MEMORY = 512;
+
+ Mutex mLock;
};
}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index a278375..cefbe79 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -33,7 +33,7 @@ namespace android {
AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
AudioPolicyClientInterface *clientInterface)
- : mPort(port), mDevice(AUDIO_DEVICE_NONE),
+ : mPort(port), mDevice(AUDIO_DEVICE_NONE), mIoHandle(0),
mPatchHandle(0), mClientInterface(clientInterface), mId(0)
{
// clear usage count for all stream types
@@ -223,7 +223,7 @@ void AudioOutputDescriptor::log(const char* indent)
SwAudioOutputDescriptor::SwAudioOutputDescriptor(
const sp<IOProfile>& profile, AudioPolicyClientInterface *clientInterface)
: AudioOutputDescriptor(profile, clientInterface),
- mProfile(profile), mIoHandle(0), mLatency(0),
+ mProfile(profile), mLatency(0),
mFlags((audio_output_flags_t)0), mPolicyMix(NULL),
mOutput1(0), mOutput2(0), mDirectOpenCount(0), mGlobalRefCount(0)
{
@@ -428,7 +428,11 @@ audio_io_handle_t SwAudioOutputCollection::getA2dpOutput() const
return this->keyAt(i);
}
}
+#ifdef LEGACY_ALSA_AUDIO
+ return 1;
+#else
return 0;
+#endif
}
sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getPrimaryOutput() const
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 33d838d..6a0d079 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -56,6 +56,7 @@ status_t EffectDescriptorCollection::registerEffect(const effect_descriptor_t *d
int session,
int id)
{
+ Mutex::Autolock _l(mLock);
if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
desc->name, desc->memoryUsage);
@@ -80,6 +81,7 @@ status_t EffectDescriptorCollection::registerEffect(const effect_descriptor_t *d
status_t EffectDescriptorCollection::unregisterEffect(int id)
{
+ Mutex::Autolock _l(mLock);
ssize_t index = indexOfKey(id);
if (index < 0) {
ALOGW("unregisterEffect() unknown effect ID %d", id);
@@ -106,6 +108,7 @@ status_t EffectDescriptorCollection::unregisterEffect(int id)
status_t EffectDescriptorCollection::setEffectEnabled(int id, bool enabled)
{
+ Mutex::Autolock _l(mLock);
ssize_t index = indexOfKey(id);
if (index < 0) {
ALOGW("unregisterEffect() unknown effect ID %d", id);
@@ -148,6 +151,7 @@ status_t EffectDescriptorCollection::setEffectEnabled(const sp<EffectDescriptor>
bool EffectDescriptorCollection::isNonOffloadableEffectEnabled()
{
+ Mutex::Autolock _l(mLock);
for (size_t i = 0; i < size(); i++) {
sp<EffectDescriptor> effectDesc = valueAt(i);
if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
@@ -172,6 +176,7 @@ uint32_t EffectDescriptorCollection::getMaxEffectsMemory() const
status_t EffectDescriptorCollection::dump(int fd)
{
+ Mutex::Autolock _l(mLock);
const size_t SIZE = 256;
char buffer[SIZE];
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
index b682e2c..4ca27c2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
@@ -35,7 +35,10 @@ namespace android {
StreamDescriptor::StreamDescriptor()
: mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
{
- mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
+ // Initialize the current stream's index to mIndexMax so volume isn't 0 in
+ // cases where the Java layer doesn't call into the audio policy service to
+ // set the default volume.
+ mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, mIndexMax);
}
int StreamDescriptor::getVolumeIndex(audio_devices_t device) const
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk
index c402fd5..be86231 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk
@@ -4,4 +4,4 @@ LOCAL_PATH := $(call my-dir)
# Recursive call sub-folder Android.mk
#######################################################################
-include $(call all-makefiles-under,$(LOCAL_PATH))
+include $(LOCAL_PATH)/plugin/Android.mk
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
index bea2c19..a929435 100755
--- a/services/audiopolicy/engineconfigurable/src/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Stream.cpp
@@ -98,13 +98,13 @@ float Element<audio_stream_type_t>::volIndexToDb(Volume::device_category deviceC
if (it == mVolumeProfiles.end()) {
ALOGE("%s: device category %d not found for stream %s", __FUNCTION__, deviceCategory,
getName().c_str());
- return 1.0f;
+ return 0.0f;
}
const VolumeCurvePoints curve = mVolumeProfiles[deviceCategory];
if (curve.size() != Volume::VOLCNT) {
ALOGE("%s: invalid profile for category %d and for stream %s", __FUNCTION__, deviceCategory,
getName().c_str());
- return 1.0f;
+ return 0.0f;
}
// the volume index in the UI is relative to the min and max volume indices for this stream type
@@ -113,7 +113,7 @@ float Element<audio_stream_type_t>::volIndexToDb(Volume::device_category deviceC
if (mIndexMax - mIndexMin == 0) {
ALOGE("%s: Invalid volume indexes Min=Max=%d", __FUNCTION__, mIndexMin);
- return 1.0f;
+ return 0.0f;
}
int volIdx = (nbSteps * (indexInUi - mIndexMin)) /
(mIndexMax - mIndexMin);
@@ -121,7 +121,7 @@ float Element<audio_stream_type_t>::volIndexToDb(Volume::device_category deviceC
// find what part of the curve this index volume belongs to, or if it's out of bounds
int segment = 0;
if (volIdx < curve[Volume::VOLMIN].mIndex) { // out of bounds
- return 0.0f;
+ return VOLUME_MIN_DB;
} else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
segment = 0;
} else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
@@ -129,7 +129,7 @@ float Element<audio_stream_type_t>::volIndexToDb(Volume::device_category deviceC
} else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
segment = 2;
} else { // out of bounds
- return 1.0f;
+ return 0.0f;
}
// linear interpolation in the attenuation table in dB
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index 8d43b89..f6ffa2a 100755
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -31,6 +31,11 @@ LOCAL_C_INCLUDES := \
$(call include-path-for, bionic) \
$(TOPDIR)frameworks/av/services/audiopolicy/common/include
+ifeq ($(call is-vendor-board-platform,QCOM),true)
+ifneq ($(strip $(AUDIO_FEATURE_ENABLED_PROXY_DEVICE)),false)
+LOCAL_CFLAGS += -DAUDIO_EXTN_AFE_PROXY_ENABLED
+endif
+endif
LOCAL_MODULE := libaudiopolicyenginedefault
LOCAL_MODULE_TAGS := optional
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 0686414..627e1d3 100755
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -355,7 +355,11 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
// - cannot route from voice call RX OR
// - audio HAL version is < 3.0 and TX device is on the primary HW module
if (getPhoneState() == AUDIO_MODE_IN_CALL) {
+#ifdef LEGACY_ALSA_AUDIO
+ audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_CALL);
+#else
audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+#endif
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
audio_devices_t availPrimaryInputDevices =
availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
@@ -408,9 +412,10 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
- if (device) break;
}
+ // Allow voice call on USB ANLG DOCK headset
+ device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+ if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
if (device) break;
device = mApmObserver->getDefaultOutputDevice()->type();
@@ -450,6 +455,13 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
}
break;
}
+
+ if (isInCall() && (device == AUDIO_DEVICE_NONE)) {
+ // when in call, get the device for Phone strategy
+ device = getDeviceForStrategy(STRATEGY_PHONE);
+ break;
+ }
+
break;
case STRATEGY_SONIFICATION:
@@ -498,6 +510,13 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
case STRATEGY_REROUTING:
case STRATEGY_MEDIA: {
uint32_t device2 = AUDIO_DEVICE_NONE;
+
+ if (isInCall() && (device == AUDIO_DEVICE_NONE)) {
+ // when in call, get the device for Phone strategy
+ device = getDeviceForStrategy(STRATEGY_PHONE);
+ break;
+ }
+
if (strategy != STRATEGY_SONIFICATION) {
// no sonification on remote submix (e.g. WFD)
if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
@@ -541,14 +560,23 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
}
- if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
+ if ((strategy != STRATEGY_SONIFICATION) && (device == AUDIO_DEVICE_NONE)
+ && (device2 == AUDIO_DEVICE_NONE)) {
// no sonification on aux digital (e.g. HDMI)
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
}
if ((device2 == AUDIO_DEVICE_NONE) &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
+ (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)
+ && (strategy != STRATEGY_SONIFICATION)) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
}
+#ifdef AUDIO_EXTN_AFE_PROXY_ENABLED
+ if ((strategy != STRATEGY_SONIFICATION) && (device == AUDIO_DEVICE_NONE)
+ && (device2 == AUDIO_DEVICE_NONE)) {
+ // no sonification on WFD sink
+ device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_PROXY;
+ }
+#endif
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
}
@@ -591,9 +619,11 @@ audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
{
- const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+#ifndef LEGACY_ALSA_AUDIO
+ const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+#endif
audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
uint32_t device = AUDIO_DEVICE_NONE;
@@ -623,6 +653,9 @@ audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) cons
break;
case AUDIO_SOURCE_VOICE_COMMUNICATION:
+#ifdef LEGACY_ALSA_AUDIO
+ device = AUDIO_DEVICE_IN_COMMUNICATION;
+#else
// Allow only use of devices on primary input if in call and HAL does not support routing
// to voice call path.
if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
@@ -660,6 +693,7 @@ audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) cons
}
break;
}
+#endif
break;
case AUDIO_SOURCE_VOICE_RECOGNITION:
@@ -671,6 +705,8 @@ audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) cons
device = AUDIO_DEVICE_IN_WIRED_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
device = AUDIO_DEVICE_IN_USB_DEVICE;
+ } else if (availableDeviceTypes & AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET) {
+ device = AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
device = AUDIO_DEVICE_IN_BUILTIN_MIC;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 5ff1c0b..13499ae 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -351,6 +351,14 @@ void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs
AUDIO_OUTPUT_FLAG_NONE,
AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
+ // close active input (if any) before opening new input
+ audio_io_handle_t activeInput = mInputs.getActiveInput();
+ if (activeInput != 0) {
+ ALOGV("updateCallRouting() close active input before opening new input");
+ sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
+ stopInput(activeInput, activeDesc->mSessions.itemAt(0));
+ releaseInput(activeInput, activeDesc->mSessions.itemAt(0));
+ }
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
"updateCallRouting() RX device output is duplicated");
@@ -608,7 +616,8 @@ sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
// only retain flags that will drive the direct output profile selection
// if explicitly requested
static const uint32_t kRelevantFlags =
- (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
+ AUDIO_OUTPUT_FLAG_VOIP_RX);
flags =
(audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
@@ -1356,6 +1365,12 @@ status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
ALOGW("getInputForAttr() could not find device for source %d", inputSource);
return BAD_VALUE;
}
+ // block request to open input on USB during voice call
+ if((AUDIO_MODE_IN_CALL == mEngine->getPhoneState()) &&
+ (device == AUDIO_DEVICE_IN_USB_DEVICE)) {
+ ALOGV("getInputForAttr(): blocking the request to open input on USB device");
+ return BAD_VALUE;
+ }
if (policyMix != NULL) {
address = policyMix->mRegistrationId;
if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
@@ -1376,20 +1391,6 @@ status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
} else {
*inputType = API_INPUT_LEGACY;
}
- // adapt channel selection to input source
- switch (inputSource) {
- case AUDIO_SOURCE_VOICE_UPLINK:
- channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
- break;
- case AUDIO_SOURCE_VOICE_DOWNLINK:
- channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK;
- break;
- case AUDIO_SOURCE_VOICE_CALL:
- channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
- break;
- default:
- break;
- }
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
@@ -1802,6 +1803,7 @@ audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
audio_io_handle_t outputOffloaded = 0;
audio_io_handle_t outputDeepBuffer = 0;
+ audio_io_handle_t outputDirectPcm = 0;
for (size_t i = 0; i < outputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
@@ -1809,6 +1811,9 @@ audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
outputOffloaded = outputs[i];
}
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT_PCM) != 0) {
+ outputDirectPcm = outputs[i];
+ }
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
outputDeepBuffer = outputs[i];
}
@@ -1819,6 +1824,9 @@ audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
if (outputOffloaded != 0) {
return outputOffloaded;
}
+ if (outputDirectPcm != 0) {
+ return outputDirectPcm;
+ }
if (outputDeepBuffer != 0) {
return outputDeepBuffer;
}
@@ -3810,7 +3818,7 @@ void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
{
audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/);
audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/);
- SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mPreviousOutputs);
+ SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mOutputs);
SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(newDevice, mOutputs);
// also take into account external policy-related changes: add all outputs which are
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index bbdf396..c40a435 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -350,7 +350,7 @@ protected:
// handle special cases for sonification strategy while in call: mute streams or replace by
// a special tone in the device used for communication
- void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
+ virtual void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
audio_mode_t getPhoneState();
@@ -397,7 +397,7 @@ protected:
// must be called every time a condition that affects the device choice for a given output is
// changed: connected device, phone state, force use, output start, output stop..
// see getDeviceForStrategy() for the use of fromCache parameter
- audio_devices_t getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+ virtual audio_devices_t getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
bool fromCache);
// updates cache of device used by all strategies (mDeviceForStrategy[])
@@ -484,11 +484,11 @@ protected:
// if argument "device" is different from AUDIO_DEVICE_NONE, startSource() will force
// the re-evaluation of the output device.
- status_t startSource(sp<AudioOutputDescriptor> outputDesc,
+ virtual status_t startSource(sp<AudioOutputDescriptor> outputDesc,
audio_stream_type_t stream,
audio_devices_t device,
uint32_t *delayMs);
- status_t stopSource(sp<AudioOutputDescriptor> outputDesc,
+ virtual status_t stopSource(sp<AudioOutputDescriptor> outputDesc,
audio_stream_type_t stream,
bool forceDeviceUpdate);
@@ -571,7 +571,7 @@ protected:
// Audio Policy Engine Interface.
AudioPolicyManagerInterface *mEngine;
-private:
+protected:
// updates device caching and output for streams that can influence the
// routing of notifications
void handleNotificationRoutingForStream(audio_stream_type_t stream);
@@ -586,7 +586,7 @@ private:
SortedVector<audio_io_handle_t>& outputs /*out*/);
uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
// internal method to return the output handle for the given device and format
- audio_io_handle_t getOutputForDevice(
+ virtual audio_io_handle_t getOutputForDevice(
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
@@ -610,7 +610,7 @@ private:
AudioMix **policyMix = NULL);
// Called by setDeviceConnectionState().
- status_t setDeviceConnectionStateInt(audio_devices_t device,
+ virtual status_t setDeviceConnectionStateInt(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
const char *device_name);
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 489a9be..82720f4 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -219,6 +219,12 @@ void AudioPolicyService::AudioPolicyClient::onDynamicPolicyMixStateUpdate(
mAudioPolicyService->onDynamicPolicyMixStateUpdate(regId, state);
}
+void AudioPolicyService::AudioPolicyClient::onOutputSessionEffectsUpdate(
+ sp<AudioSessionInfo>& info, bool added)
+{
+ mAudioPolicyService->onOutputSessionEffectsUpdate(info, added);
+}
+
audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId()
{
return AudioSystem::newAudioUniqueId();
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
index a79f8ae..36c85f1 100644
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
@@ -125,8 +125,13 @@ audio_io_handle_t aps_open_output_on_module(void *service __unused,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo)
{
+#ifdef HAVE_PRE_KITKAT_AUDIO_POLICY_BLOB
+ return open_output(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+ pLatencyMs, flags, NULL);
+#else
return open_output(module, pDevices, pSamplingRate, pFormat, pChannelMask,
pLatencyMs, flags, offloadInfo);
+#endif
}
audio_io_handle_t aps_open_dup_output(void *service __unused,
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 282ddeb..d6fabfe 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -28,6 +28,7 @@
#include <utils/Vector.h>
#include <utils/SortedVector.h>
#include <cutils/config_utils.h>
+#include "AudioPolicyService.h"
#include "AudioPolicyEffects.h"
#include "ServiceUtilities.h"
@@ -37,10 +38,13 @@ namespace android {
// AudioPolicyEffects Implementation
// ----------------------------------------------------------------------------
-AudioPolicyEffects::AudioPolicyEffects()
+AudioPolicyEffects::AudioPolicyEffects(AudioPolicyService *audioPolicyService) :
+ mAudioPolicyService(audioPolicyService)
{
// load automatic audio effect modules
- if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
+ if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE2, R_OK) == 0) {
+ loadAudioEffectConfig(AUDIO_EFFECT_VENDOR_CONFIG_FILE2);
+ } else if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
loadAudioEffectConfig(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
} else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
loadAudioEffectConfig(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
@@ -224,6 +228,8 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output,
{
status_t status = NO_ERROR;
+ ALOGV("addOutputSessionEffects %d", audioSession);
+
Mutex::Autolock _l(mLock);
// create audio processors according to stream
// FIXME: should we have specific post processing settings for internal streams?
@@ -231,6 +237,22 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output,
if (stream >= AUDIO_STREAM_PUBLIC_CNT) {
stream = AUDIO_STREAM_MUSIC;
}
+
+ // send the streaminfo notification only once
+ ssize_t sidx = mOutputAudioSessionInfo.indexOfKey(audioSession);
+ if (sidx >= 0) {
+ // AudioSessionInfo is existing and we just need to increase ref count
+ sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(sidx);
+ info->mRefCount++;
+
+ if (info->mRefCount == 1) {
+ mAudioPolicyService->onOutputSessionEffectsUpdate(info, true);
+ }
+ ALOGV("addOutputSessionEffects(): session info %d refCount=%d", audioSession, info->mRefCount);
+ } else {
+ ALOGV("addOutputSessionEffects(): no output stream info found for stream");
+ }
+
ssize_t index = mOutputStreams.indexOfKey(stream);
if (index < 0) {
ALOGV("addOutputSessionEffects(): no output processing needed for this stream");
@@ -273,6 +295,86 @@ status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output,
return status;
}
+status_t AudioPolicyEffects::releaseOutputAudioSessionInfo(audio_io_handle_t /* output */,
+ audio_stream_type_t stream,
+ int session)
+{
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ ssize_t idx = mOutputAudioSessionInfo.indexOfKey(session);
+ if (idx >= 0) {
+ sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(idx);
+ if (info->mRefCount == 0) {
+ mOutputAudioSessionInfo.removeItemsAt(idx);
+ }
+ ALOGV("releaseOutputAudioSessionInfo() sessionId=%d refcount=%d",
+ session, info->mRefCount);
+ } else {
+ ALOGV("releaseOutputAudioSessionInfo() no session info found");
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::updateOutputAudioSessionInfo(audio_io_handle_t /* output */,
+ audio_stream_type_t stream,
+ int session,
+ audio_output_flags_t flags,
+ audio_channel_mask_t channelMask, uid_t uid)
+{
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ // TODO: Handle other stream types based on client registration
+ if (stream != AUDIO_STREAM_MUSIC) {
+ return NO_ERROR;
+ }
+
+ // update AudioSessionInfo. This is used in the stream open/close path
+ // to notify userspace applications about session creation and
+ // teardown, allowing the app to make decisions about effects for
+ // a particular stream. This is independent of the current
+ // output_session_processing feature which forcibly attaches a
+ // static list of effects to a stream.
+ ssize_t idx = mOutputAudioSessionInfo.indexOfKey(session);
+ sp<AudioSessionInfo> info;
+ if (idx < 0) {
+ info = new AudioSessionInfo(session, stream, flags, channelMask, uid);
+ mOutputAudioSessionInfo.add(session, info);
+ } else {
+ // the streaminfo may actually change
+ info = mOutputAudioSessionInfo.valueAt(idx);
+ info->mFlags = flags;
+ info->mChannelMask = channelMask;
+ }
+
+ ALOGV("updateOutputAudioSessionInfo() sessionId=%d, flags=0x%x, channelMask=0x%x uid=%d refCount=%d",
+ info->mSessionId, info->mFlags, info->mChannelMask, info->mUid, info->mRefCount);
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions)
+{
+ ALOGV("listAudioSessions() streams %d", streams);
+
+ for (unsigned int i = 0; i < mOutputAudioSessionInfo.size(); i++) {
+ sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(i);
+ if (streams == -1 || info->mStream == streams) {
+ sessions.push_back(info);
+ }
+ }
+
+ return NO_ERROR;
+}
+
status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t output,
audio_stream_type_t stream,
int audioSession)
@@ -282,7 +384,19 @@ status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t outpu
(void) stream; // argument not used for now
Mutex::Autolock _l(mLock);
- ssize_t index = mOutputSessions.indexOfKey(audioSession);
+ ssize_t index = mOutputAudioSessionInfo.indexOfKey(audioSession);
+ if (index >= 0) {
+ sp<AudioSessionInfo> info = mOutputAudioSessionInfo.valueAt(index);
+ info->mRefCount--;
+ if (info->mRefCount == 0) {
+ mAudioPolicyService->onOutputSessionEffectsUpdate(info, false);
+ }
+ ALOGV("releaseOutputSessionEffects(): session=%d refCount=%d", info->mSessionId, info->mRefCount);
+ } else {
+ ALOGV("releaseOutputSessionEffects: no stream info was attached to this stream");
+ }
+
+ index = mOutputSessions.indexOfKey(audioSession);
if (index < 0) {
ALOGV("releaseOutputSessionEffects: no output processing was attached to this stream");
return NO_ERROR;
@@ -442,6 +556,7 @@ effect_param_t *AudioPolicyEffects::loadEffectParameter(cnode *root)
size_t curSize = sizeof(effect_param_t);
size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int);
effect_param_t *fx_param = (effect_param_t *)malloc(totSize);
+ CHECK(fx_param != NULL);
param = config_find(root, PARAM_TAG);
value = config_find(root, VALUE_TAG);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 3dec437..a95d49f 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -27,8 +27,12 @@
#include <utils/Vector.h>
#include <utils/SortedVector.h>
+#include <media/stagefright/foundation/ADebug.h>
+
namespace android {
+class AudioPolicyService;
+
// ----------------------------------------------------------------------------
// AudioPolicyEffects class
@@ -42,7 +46,7 @@ public:
// The constructor will parse audio_effects.conf
// First it will look whether vendor specific file exists,
// otherwise it will parse the system default file.
- AudioPolicyEffects();
+ AudioPolicyEffects(AudioPolicyService *audioPolicyService);
virtual ~AudioPolicyEffects();
// NOTE: methods on AudioPolicyEffects should never be called with the AudioPolicyService
@@ -82,6 +86,19 @@ public:
audio_stream_type_t stream,
int audioSession);
+ status_t updateOutputAudioSessionInfo(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ int audioSession,
+ audio_output_flags_t flags,
+ audio_channel_mask_t channelMask, uid_t uid);
+
+ status_t releaseOutputAudioSessionInfo(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ int audioSession);
+
+ status_t listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions);
+
private:
// class to store the description of an effects and its parameters
@@ -102,6 +119,7 @@ private:
((origParam->psize + 3) & ~3) +
((origParam->vsize + 3) & ~3);
effect_param_t *dupParam = (effect_param_t *) malloc(origSize);
+ CHECK(dupParam != NULL);
memcpy(dupParam, origParam, origSize);
// This works because the param buffer allocation is also done by
// multiples of 4 bytes originally. In theory we should memcpy only
@@ -189,6 +207,10 @@ private:
KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
// Automatic output effects are unique for audiosession ID
KeyedVector< int32_t, EffectVector* > mOutputSessions;
+ // Stream info for session events
+ KeyedVector< int32_t, sp<AudioSessionInfo> > mOutputAudioSessionInfo;
+
+ AudioPolicyService *mAudioPolicyService;
};
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index ca365a5..b23c35e 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -161,19 +161,32 @@ status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
- ALOGV("getOutput()");
- Mutex::Autolock _l(mLock);
+ ALOGV("getOutputForAttr()");
+ status_t status = NO_ERROR;
+ sp<AudioPolicyEffects> audioPolicyEffects;
+ {
+ Mutex::Autolock _l(mLock);
- // if the caller is us, trust the specified uid
- if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
- uid_t newclientUid = IPCThreadState::self()->getCallingUid();
- if (uid != (uid_t)-1 && uid != newclientUid) {
- ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
+ // if the caller is us, trust the specified uid
+ if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
+ uid_t newclientUid = IPCThreadState::self()->getCallingUid();
+ if (uid != (uid_t)-1 && uid != newclientUid) {
+ ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
+ }
+ uid = newclientUid;
}
- uid = newclientUid;
+ status = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
+ format, channelMask, flags, selectedDeviceId, offloadInfo);
+
+ audioPolicyEffects = mAudioPolicyEffects;
}
- return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
- format, channelMask, flags, selectedDeviceId, offloadInfo);
+
+ if (status == NO_ERROR && audioPolicyEffects != 0) {
+ audioPolicyEffects->updateOutputAudioSessionInfo(*output,
+ *stream, session, flags, channelMask, uid);
+ }
+
+ return status;
}
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -187,6 +200,20 @@ status_t AudioPolicyService::startOutput(audio_io_handle_t output,
return NO_INIT;
}
ALOGV("startOutput()");
+ return mOutputCommandThread->startOutputCommand(output, stream, session);
+}
+
+status_t AudioPolicyService::doStartOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session)
+{
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+ return BAD_VALUE;
+ }
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ ALOGV("doStartOutput()");
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
@@ -255,8 +282,16 @@ void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
audio_session_t session)
{
ALOGV("doReleaseOutput from tid %d", gettid());
- Mutex::Autolock _l(mLock);
- mAudioPolicyManager->releaseOutput(output, stream, session);
+ sp<AudioPolicyEffects>audioPolicyEffects;
+ {
+ Mutex::Autolock _l(mLock);
+ audioPolicyEffects = mAudioPolicyEffects;
+ mAudioPolicyManager->releaseOutput(output, stream, session);
+ }
+ if (audioPolicyEffects != 0) {
+ audioPolicyEffects->releaseOutputAudioSessionInfo(output,
+ stream, session);
+ }
}
status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
@@ -281,6 +316,11 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
return BAD_VALUE;
}
+
+ if ((attr->source == AUDIO_SOURCE_FM_TUNER) && !accessFmRadioAllowed()) {
+ return BAD_VALUE;
+ }
+
sp<AudioPolicyEffects>audioPolicyEffects;
status_t status;
AudioPolicyInterface::input_type_t inputType;
@@ -463,6 +503,7 @@ audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stre
if (mAudioPolicyManager == NULL) {
return AUDIO_DEVICE_NONE;
}
+ Mutex::Autolock _l(mLock);
return mAudioPolicyManager->getDevicesForStream(stream);
}
@@ -702,4 +743,25 @@ status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
return mAudioPolicyManager->stopAudioSource(handle);
}
+status_t AudioPolicyService::listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions)
+{
+ sp<AudioPolicyEffects> audioPolicyEffects;
+ {
+ Mutex::Autolock _l(mLock);
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ audioPolicyEffects = mAudioPolicyEffects;
+ }
+
+ if (audioPolicyEffects != 0) {
+ return audioPolicyEffects->listAudioSessions(streams, sessions);
+ }
+
+ // no errors here if effects are not available
+ return NO_ERROR;
+}
+
+
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index 13af3ef..da7f45d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -158,13 +158,27 @@ status_t AudioPolicyService::startOutput(audio_io_handle_t output,
return NO_INIT;
}
ALOGV("startOutput()");
- // create audio processors according to stream
+ return mOutputCommandThread->startOutputCommand(output, stream, session);
+}
+
+status_t AudioPolicyService::doStartOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session)
+{
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+ return BAD_VALUE;
+ }
+ if (mpAudioPolicy == NULL) {
+ return NO_INIT;
+ }
+ ALOGV("doStartOutput()");
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
audioPolicyEffects = mAudioPolicyEffects;
}
if (audioPolicyEffects != 0) {
+ // create audio processors according to stream
status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
if (status != NO_ERROR && status != ALREADY_EXISTS) {
ALOGW("Failed to add effects on session %d", session);
@@ -261,6 +275,15 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
return BAD_VALUE;
}
+ if ((inputSource == AUDIO_SOURCE_FM_TUNER) && !accessFmRadioAllowed()) {
+ return BAD_VALUE;
+ }
+
+#ifdef HAVE_PRE_KITKAT_AUDIO_POLICY_BLOB
+ if (inputSource == AUDIO_SOURCE_HOTWORD)
+ inputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
+#endif
+
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
@@ -510,6 +533,9 @@ status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
{
+#ifdef HAVE_PRE_KITKAT_AUDIO_POLICY_BLOB
+ return false;
+#else
if (mpAudioPolicy == NULL) {
ALOGV("mpAudioPolicy == NULL");
return false;
@@ -521,6 +547,7 @@ bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
}
return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
+#endif
}
status_t AudioPolicyService::listAudioPorts(audio_port_role_t role __unused,
@@ -619,4 +646,9 @@ status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
return INVALID_OPERATION;
}
+status_t AudioPolicyService::listAudioSessions(audio_stream_type_t streams,
+ Vector< sp<AudioSessionInfo>> &sessions)
+{
+ return INVALID_OPERATION;
+}
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index c77cc45..79370f4 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -116,7 +116,7 @@ void AudioPolicyService::onFirstRef()
#endif
}
// load audio processing modules
- sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects();
+ sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects(this);
{
Mutex::Autolock _l(mLock);
mAudioPolicyEffects = audioPolicyEffects;
@@ -254,6 +254,21 @@ status_t AudioPolicyService::clientSetAudioPortConfig(const struct audio_port_co
return mAudioCommandThread->setAudioPortConfigCommand(config, delayMs);
}
+void AudioPolicyService::onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added)
+{
+ ALOGV("AudioPolicyService::onOutputSessionEffectsUpdate(%d, %d, %d)",
+ info->mStream, info->mSessionId, added);
+ mOutputCommandThread->effectSessionUpdateCommand(info, added);
+}
+
+void AudioPolicyService::doOnOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added)
+{
+ Mutex::Autolock _l(mNotificationClientsLock);
+ for (size_t i = 0; i < mNotificationClients.size(); i++) {
+ mNotificationClients.valueAt(i)->onOutputSessionEffectsUpdate(info, added);
+ }
+}
+
AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
uid_t uid)
@@ -289,6 +304,14 @@ void AudioPolicyService::NotificationClient::onAudioPatchListUpdate()
}
}
+void AudioPolicyService::NotificationClient::onOutputSessionEffectsUpdate(
+ sp<AudioSessionInfo>& info, bool added)
+{
+ if (mAudioPolicyServiceClient != 0) {
+ mAudioPolicyServiceClient->onOutputSessionEffectsUpdate(info, added);
+ }
+}
+
void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
String8 regId, int32_t state)
{
@@ -478,6 +501,19 @@ bool AudioPolicyService::AudioCommandThread::threadLoop()
data->mVolume);
command->mStatus = AudioSystem::setVoiceVolume(data->mVolume);
}break;
+ case START_OUTPUT: {
+ StartOutputData *data = (StartOutputData *)command->mParam.get();
+ ALOGV("AudioCommandThread() processing start output %d",
+ data->mIO);
+ svc = mService.promote();
+ if (svc == 0) {
+ command->mStatus = UNKNOWN_ERROR;
+ break;
+ }
+ mLock.unlock();
+ command->mStatus = svc->doStartOutput(data->mIO, data->mStream, data->mSession);
+ mLock.lock();
+ }break;
case STOP_OUTPUT: {
StopOutputData *data = (StopOutputData *)command->mParam.get();
ALOGV("AudioCommandThread() processing stop output %d",
@@ -566,6 +602,21 @@ bool AudioPolicyService::AudioCommandThread::threadLoop()
svc->doOnDynamicPolicyMixStateUpdate(data->mRegId, data->mState);
mLock.lock();
} break;
+ case EFFECT_SESSION_UPDATE: {
+ EffectSessionUpdateData *data =
+ (EffectSessionUpdateData *)command->mParam.get();
+ ALOGV("AudioCommandThread() processing effect session update %d %d %d",
+ data->mAudioSessionInfo->mStream, data->mAudioSessionInfo->mSessionId,
+ data->mAdded);
+ svc = mService.promote();
+ if (svc == 0) {
+ break;
+ }
+ mLock.unlock();
+ svc->doOnOutputSessionEffectsUpdate(data->mAudioSessionInfo, data->mAdded);
+ mLock.lock();
+ } break;
+
default:
ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
}
@@ -714,6 +765,22 @@ status_t AudioPolicyService::AudioCommandThread::voiceVolumeCommand(float volume
return sendCommand(command, delayMs);
}
+status_t AudioPolicyService::AudioCommandThread::startOutputCommand(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session)
+{
+ sp<AudioCommand> command = new AudioCommand();
+ command->mCommand = START_OUTPUT;
+ sp<StartOutputData> data = new StartOutputData();
+ data->mIO = output;
+ data->mStream = stream;
+ data->mSession = session;
+ command->mParam = data;
+ command->mWaitStatus = true;
+ ALOGV("AudioCommandThread() adding start output %d", output);
+ return sendCommand(command);
+}
+
void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session)
@@ -822,6 +889,20 @@ void AudioPolicyService::AudioCommandThread::dynamicPolicyMixStateUpdateCommand(
sendCommand(command);
}
+void AudioPolicyService::AudioCommandThread::effectSessionUpdateCommand(
+ sp<AudioSessionInfo>& streamInfo, bool added)
+{
+ sp<AudioCommand> command = new AudioCommand();
+ command->mCommand = EFFECT_SESSION_UPDATE;
+ EffectSessionUpdateData *data = new EffectSessionUpdateData();
+ data->mAudioSessionInfo = streamInfo;
+ data->mAdded = added;
+ command->mParam = data;
+ ALOGV("AudioCommandThread() sending effect session update (id=%d) for stream %d (added=%d)",
+ streamInfo->mStream, streamInfo->mSessionId, added);
+ sendCommand(command);
+}
+
status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
{
{
@@ -899,10 +980,12 @@ void AudioPolicyService::AudioCommandThread::insertCommand_l(sp<AudioCommand>& c
} else {
data2->mKeyValuePairs = param2.toString();
}
- command->mTime = command2->mTime;
- // force delayMs to non 0 so that code below does not request to wait for
- // command status as the command is now delayed
- delayMs = 1;
+ if (!data2->mKeyValuePairs.compare(data->mKeyValuePairs)) {
+ command->mTime = command2->mTime;
+ // force delayMs to non 0 so that code below does not request to wait for
+ // command status as the command is now delayed
+ delayMs = 1;
+ }
} break;
case SET_VOLUME: {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a0d5aa2..b7f55ae 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -202,6 +202,12 @@ public:
audio_io_handle_t *handle);
virtual status_t stopAudioSource(audio_io_handle_t handle);
+ virtual status_t listAudioSessions(audio_stream_type_t stream,
+ Vector< sp<AudioSessionInfo>>& sessions);
+
+ status_t doStartOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session);
status_t doStopOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session);
@@ -226,6 +232,9 @@ public:
void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
void doOnDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+ void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added);
+ void doOnOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added);
+
private:
AudioPolicyService() ANDROID_API;
virtual ~AudioPolicyService();
@@ -249,6 +258,7 @@ private:
SET_VOLUME,
SET_PARAMETERS,
SET_VOICE_VOLUME,
+ START_OUTPUT,
STOP_OUTPUT,
RELEASE_OUTPUT,
CREATE_AUDIO_PATCH,
@@ -256,7 +266,8 @@ private:
UPDATE_AUDIOPORT_LIST,
UPDATE_AUDIOPATCH_LIST,
SET_AUDIOPORT_CONFIG,
- DYN_POLICY_MIX_STATE_UPDATE
+ DYN_POLICY_MIX_STATE_UPDATE,
+ EFFECT_SESSION_UPDATE,
};
AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -277,6 +288,9 @@ private:
status_t parametersCommand(audio_io_handle_t ioHandle,
const char *keyValuePairs, int delayMs = 0);
status_t voiceVolumeCommand(float volume, int delayMs = 0);
+ status_t startOutputCommand(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session);
void stopOutputCommand(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session);
@@ -296,6 +310,7 @@ private:
int delayMs);
void dynamicPolicyMixStateUpdateCommand(String8 regId, int32_t state);
void insertCommand_l(AudioCommand *command, int delayMs = 0);
+ void effectSessionUpdateCommand(sp<AudioSessionInfo>& info, bool added);
private:
class AudioCommandData;
@@ -349,6 +364,13 @@ private:
float mVolume;
};
+ class StartOutputData : public AudioCommandData {
+ public:
+ audio_io_handle_t mIO;
+ audio_stream_type_t mStream;
+ audio_session_t mSession;
+ };
+
class StopOutputData : public AudioCommandData {
public:
audio_io_handle_t mIO;
@@ -385,6 +407,12 @@ private:
int32_t mState;
};
+ class EffectSessionUpdateData : public AudioCommandData {
+ public:
+ sp<AudioSessionInfo> mAudioSessionInfo;
+ bool mAdded;
+ };
+
Mutex mLock;
Condition mWaitWorkCV;
Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
@@ -494,6 +522,9 @@ private:
virtual audio_unique_id_t newAudioUniqueId();
+ virtual void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info, bool added);
+
+
private:
AudioPolicyService *mAudioPolicyService;
};
@@ -510,7 +541,8 @@ private:
void onAudioPatchListUpdate();
void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
void setAudioPortCallbacksEnabled(bool enabled);
-
+ void onOutputSessionEffectsUpdate(sp<AudioSessionInfo>& info,
+ bool added);
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 45900c4..ab09cb3 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -79,6 +79,14 @@ LOCAL_C_INCLUDES += \
LOCAL_CFLAGS += -Wall -Wextra
+ifeq ($(BOARD_NEEDS_MEMORYHEAPION),true)
+ LOCAL_CFLAGS += -DUSE_MEMORY_HEAP_ION
+endif
+
+ifneq ($(BOARD_NUMBER_OF_CAMERAS),)
+ LOCAL_CFLAGS += -DMAX_CAMERAS=$(BOARD_NUMBER_OF_CAMERAS)
+endif
+
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 406c1c4..62ce610 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -828,6 +828,18 @@ status_t CameraHardwareInterfaceFlashControl::initializePreviewWindow(
return device->setPreviewWindow(mSurface);
}
+static void notifyCallback(int32_t, int32_t, int32_t, void*) {
+ /* Empty */
+}
+
+static void dataCallback(int32_t, const sp<IMemory>&, camera_frame_metadata_t*, void*) {
+ /* Empty */
+}
+
+static void dataCallbackTimestamp(nsecs_t, int32_t, const sp<IMemory>&, void*) {
+ /* Empty */
+}
+
status_t CameraHardwareInterfaceFlashControl::connectCameraDevice(
const String8& cameraId) {
sp<CameraHardwareInterface> device =
@@ -841,7 +853,7 @@ status_t CameraHardwareInterfaceFlashControl::connectCameraDevice(
}
// need to set __get_memory in set_callbacks().
- device->setCallbacks(NULL, NULL, NULL, NULL);
+ device->setCallbacks(notifyCallback, dataCallback, dataCallbackTimestamp, this);
mParameters = device->getParameters();
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 7c4594f..3c9fd16 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -81,7 +81,7 @@ static void camera_device_status_change(
sp<CameraService> cs = const_cast<CameraService*>(
static_cast<const CameraService*>(callbacks));
- cs->onDeviceStatusChanged(static_cast<camera_device_status_t>(camera_id),
+ cs->onDeviceStatusChanged(camera_id,
static_cast<camera_device_status_t>(new_status));
}
@@ -153,6 +153,7 @@ void CameraService::onFirstRef()
ALOGE("Could not load camera HAL module: %d (%s)", err, strerror(-err));
logServiceError("Could not load camera HAL module", err);
mNumberOfCameras = 0;
+ mNumberOfNormalCameras = 0;
return;
}
@@ -276,7 +277,7 @@ CameraService::~CameraService() {
gCameraService = nullptr;
}
-void CameraService::onDeviceStatusChanged(camera_device_status_t cameraId,
+void CameraService::onDeviceStatusChanged(int cameraId,
camera_device_status_t newStatus) {
ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
cameraId, newStatus);
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index d2c1bd3..53233bd 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -49,6 +49,10 @@
#include <memory>
#include <utility>
+#ifndef MAX_CAMERAS
+#define MAX_CAMERAS 2
+#endif
+
namespace android {
extern volatile int32_t gLogLevel;
@@ -98,7 +102,7 @@ public:
/////////////////////////////////////////////////////////////////////
// HAL Callbacks
- virtual void onDeviceStatusChanged(camera_device_status_t cameraId,
+ virtual void onDeviceStatusChanged(int cameraId,
camera_device_status_t newStatus);
virtual void onTorchStatusChanged(const String8& cameraId,
ICameraServiceListener::TorchStatus
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index fbd4034..96266ed 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1745,8 +1745,6 @@ void Camera2Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCod
err = CAMERA_ERROR_RELEASED;
break;
case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
- err = CAMERA_ERROR_UNKNOWN;
- break;
case ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE:
err = CAMERA_ERROR_SERVER_DIED;
break;
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 6020e35..af46d63 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -56,6 +56,9 @@ CameraClient::CameraClient(const sp<CameraService>& cameraService,
mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
mLegacyMode = legacyMode;
mPlayShutterSound = true;
+
+ mLongshotEnabled = false;
+ mBurstCnt = 0;
LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
@@ -240,11 +243,6 @@ void CameraClient::disconnect() {
return;
}
- if (mClientPid <= 0) {
- LOG1("camera is unlocked (mClientPid = %d), don't tear down hardware", mClientPid);
- return;
- }
-
// Make sure disconnect() is done once and once only, whether it is called
// from the user directly, or called by the destructor.
if (mHardware == 0) return;
@@ -364,12 +362,14 @@ status_t CameraClient::setPreviewCallbackTarget(
// start preview mode
status_t CameraClient::startPreview() {
+ Mutex::Autolock lock(mLock);
LOG1("startPreview (pid %d)", getCallingPid());
return startCameraMode(CAMERA_PREVIEW_MODE);
}
// start recording mode
status_t CameraClient::startRecording() {
+ Mutex::Autolock lock(mLock);
LOG1("startRecording (pid %d)", getCallingPid());
return startCameraMode(CAMERA_RECORDING_MODE);
}
@@ -377,7 +377,6 @@ status_t CameraClient::startRecording() {
// start preview or recording
status_t CameraClient::startCameraMode(camera_mode mode) {
LOG1("startCameraMode(%d)", mode);
- Mutex::Autolock lock(mLock);
status_t result = checkPidAndHardware();
if (result != NO_ERROR) return result;
@@ -557,6 +556,10 @@ status_t CameraClient::takePicture(int msgType) {
CAMERA_MSG_COMPRESSED_IMAGE);
enableMsgType(picMsgType);
+ mBurstCnt = mHardware->getParameters().getInt("num-snaps-per-shutter");
+ if(mBurstCnt <= 0)
+ mBurstCnt = 1;
+ LOG1("mBurstCnt = %d", mBurstCnt);
return mHardware->takePicture();
}
@@ -659,6 +662,20 @@ status_t CameraClient::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
} else if (cmd == CAMERA_CMD_PING) {
// If mHardware is 0, checkPidAndHardware will return error.
return OK;
+ } else if (cmd == CAMERA_CMD_HISTOGRAM_ON) {
+ enableMsgType(CAMERA_MSG_STATS_DATA);
+ } else if (cmd == CAMERA_CMD_HISTOGRAM_OFF) {
+ disableMsgType(CAMERA_MSG_STATS_DATA);
+ } else if (cmd == CAMERA_CMD_METADATA_ON) {
+ enableMsgType(CAMERA_MSG_META_DATA);
+ } else if (cmd == CAMERA_CMD_METADATA_OFF) {
+ disableMsgType(CAMERA_MSG_META_DATA);
+ } else if ( cmd == CAMERA_CMD_LONGSHOT_ON ) {
+ mLongshotEnabled = true;
+ } else if ( cmd == CAMERA_CMD_LONGSHOT_OFF ) {
+ mLongshotEnabled = false;
+ disableMsgType(CAMERA_MSG_SHUTTER);
+ disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
}
return mHardware->sendCommand(cmd, arg1, arg2);
@@ -678,6 +695,9 @@ void CameraClient::disableMsgType(int32_t msgType) {
#define CHECK_MESSAGE_INTERVAL 10 // 10ms
bool CameraClient::lockIfMessageWanted(int32_t msgType) {
+#ifdef MTK_HARDWARE
+ return true;
+#endif
int sleepCount = 0;
while (mMsgEnabled & msgType) {
if (mLock.tryLock() == NO_ERROR) {
@@ -801,7 +821,9 @@ void CameraClient::handleShutter(void) {
c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0);
if (!lockIfMessageWanted(CAMERA_MSG_SHUTTER)) return;
}
- disableMsgType(CAMERA_MSG_SHUTTER);
+ if ( !mLongshotEnabled ) {
+ disableMsgType(CAMERA_MSG_SHUTTER);
+ }
// Shutters only happen in response to takePicture, so mark device as
// idle now, until preview is restarted
@@ -886,7 +908,13 @@ void CameraClient::handleRawPicture(const sp<IMemory>& mem) {
// picture callback - compressed picture ready
void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
- disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
+ if (mBurstCnt)
+ mBurstCnt--;
+
+ if (!mBurstCnt && !mLongshotEnabled) {
+ LOG1("handleCompressedPicture mBurstCnt = %d", mBurstCnt);
+ disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
+ }
sp<ICameraClient> c = mRemoteCallback;
mLock.unlock();
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 17999a5..d2cb64a 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -164,6 +164,9 @@ private:
// This function keeps trying to grab mLock, or give up if the message
// is found to be disabled. It returns true if mLock is grabbed.
bool lockIfMessageWanted(int32_t msgType);
+
+ bool mLongshotEnabled;
+ int mBurstCnt;
};
}
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 7f14cd4..35947a9 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -25,7 +25,10 @@
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
#include <system/window.h>
-#include <hardware/camera.h>
+#include "hardware/camera.h"
+#ifdef USE_MEMORY_HEAP_ION
+#include <binder/MemoryHeapIon.h>
+#endif
namespace android {
@@ -322,6 +325,10 @@ public:
void releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (mem == NULL) {
+ ALOGE("%s: NULL memory reference", __FUNCTION__);
+ return;
+ }
if (mDevice->ops->release_recording_frame) {
ssize_t offset;
size_t size;
@@ -501,7 +508,11 @@ private:
mBufSize(buf_size),
mNumBufs(num_buffers)
{
+#ifdef USE_MEMORY_HEAP_ION
+ mHeap = new MemoryHeapIon(fd, buf_size * num_buffers);
+#else
mHeap = new MemoryHeapBase(fd, buf_size * num_buffers);
+#endif
commonInitialization();
}
@@ -509,7 +520,11 @@ private:
mBufSize(buf_size),
mNumBufs(num_buffers)
{
+#ifdef USE_MEMORY_HEAP_ION
+ mHeap = new MemoryHeapIon(buf_size * num_buffers);
+#else
mHeap = new MemoryHeapBase(buf_size * num_buffers);
+#endif
commonInitialization();
}
@@ -541,14 +556,24 @@ private:
camera_memory_t handle;
};
+#ifdef USE_MEMORY_HEAP_ION
+ static camera_memory_t* __get_memory(int fd, size_t buf_size, uint_t num_bufs,
+ void *ion_fd)
+ {
+#else
static camera_memory_t* __get_memory(int fd, size_t buf_size, uint_t num_bufs,
void *user __attribute__((unused)))
{
+#endif
CameraHeapMemory *mem;
if (fd < 0)
mem = new CameraHeapMemory(buf_size, num_bufs);
else
mem = new CameraHeapMemory(fd, buf_size, num_bufs);
+#ifdef USE_MEMORY_HEAP_ION
+ if (ion_fd)
+ *((int *) ion_fd) = mem->mHeap->getHeapID();
+#endif
mem->incStrong(mem);
return &mem->handle;
}
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index e428388..a1cc6ff 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -270,6 +270,37 @@ void SoundTriggerHwService::sendRecognitionEvent(struct sound_trigger_recognitio
if (module == NULL) {
return;
}
+ if (event-> type == SOUND_MODEL_TYPE_KEYPHRASE && event->data_size != 0
+ && event->data_offset != sizeof(struct sound_trigger_phrase_recognition_event)) {
+ // set some defaults for the phrase if the recognition event won't be parsed properly
+ // TODO: read defaults from the config
+
+ struct sound_trigger_phrase_recognition_event newEvent;
+ memset(&newEvent, 0, sizeof(struct sound_trigger_phrase_recognition_event));
+
+ sp<Model> model = module->getModel(event->model);
+
+ newEvent.num_phrases = 1;
+ newEvent.phrase_extras[0].id = 100;
+ newEvent.phrase_extras[0].recognition_modes = RECOGNITION_MODE_VOICE_TRIGGER;
+ newEvent.phrase_extras[0].confidence_level = 100;
+ newEvent.phrase_extras[0].num_levels = 1;
+ newEvent.phrase_extras[0].levels[0].level = 100;
+ newEvent.phrase_extras[0].levels[0].user_id = 100;
+ newEvent.common.status = event->status;
+ newEvent.common.type = event->type;
+ newEvent.common.model = event->model;
+ newEvent.common.capture_available = event->capture_available;
+ newEvent.common.capture_session = event->capture_session;
+ newEvent.common.capture_delay_ms = event->capture_delay_ms;
+ newEvent.common.capture_preamble_ms = event->capture_preamble_ms;
+ newEvent.common.trigger_in_data = event->trigger_in_data;
+ newEvent.common.audio_config = event->audio_config;
+ newEvent.common.data_size = event->data_size;
+ newEvent.common.data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
+
+ event = &newEvent.common;
+ }
sp<IMemory> eventMemory = prepareRecognitionEvent_l(event);
if (eventMemory == 0) {
return;